query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Combine existing chunks to recreate the file. The chunks must be present in the cwd. The new file will be written to cwd.
def combine(self): import re print 'Creating file', self.__filename bname = (os.path.split(self.__filename))[1] bname2 = bname # bugfix: if file contains characters like +,.,[] # properly escape them, otherwise re will fail to match. for a, b in zip(['+', '.', '[', ']','$', '(', ')'], ['\+','\.','\[','\]','\$', '\(', '\)']): bname2 = bname2.replace(a, b) chunkre = re.compile(bname2 + '-' + '[0-9]+') chunkfiles = [] for f in os.listdir("."): print f if chunkre.match(f): chunkfiles.append(f) print 'Number of chunks', len(chunkfiles), '\n' chunkfiles.sort(self.sort_index) data='' for f in chunkfiles: try: print 'Appending chunk', os.path.join(".", f) data += open(f, 'rb').read() except (OSError, IOError, EOFError), e: print e continue try: f = open(bname, 'wb') f.write(data) f.close() except (OSError, IOError, EOFError), e: raise FileSplitterException, str(e) print 'Wrote file', bname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_vcf_chunks(out_dir, path_name, path_size, chunks, overwrite):\n vcf_path = os.path.join(out_dir, path_name + \".vcf\")\n if overwrite or not os.path.isfile(vcf_path):\n first = True\n for chunk_i, chunk in enumerate(chunks):\n clip_path = chunk_base_name(path_name, out_dir, chunk_i, \"_clip.vcf\")\n if os.path.isfile(clip_path):\n if first is True:\n # copy everything including the header\n run(\"cat {} > {}\".format(clip_path, vcf_path))\n first = False\n else:\n # add on everythin but header\n run(\"grep -v \\\"^#\\\" {} >> {}\".format(clip_path, vcf_path), check=False)\n \n # add a compressed indexed version\n if overwrite or not os.path.isfile(vcf_path + \".gz\"):\n run(\"bgzip -c {} > {}\".format(vcf_path, vcf_path + \".gz\"))\n run(\"tabix -f -p vcf {}\".format(vcf_path + \".gz\"))", "def join_chunks(self):\n if self.state == self.STATE_UPLOADING and self.total_chunks_uploaded == self.total_chunks:\n\n # create file and write chunks in the right order\n temp_file = open(self.full_path, \"wb\")\n for chunk in self.chunks.all():\n chunk_bytes = chunk.file.read()\n temp_file.write(chunk_bytes)\n temp_file.close()\n\n # set state as completed\n self.state = self.STATE_COMPLETED\n super(FlowFile, self).save()\n\n # delete chunks automatically if is activated in settings\n if FLOWJS_AUTO_DELETE_CHUNKS:\n self.chunks.all().delete()", "def write_chunks(file, chunks):\n\n\tfor c in chunks:\n\n\t\tchunk(file, c[0], c[1])", "def _concatenate_inner(self, chunks, direction):\n tmp_bucket = []\n source_chunks = chunks if direction else chunks[::-1]\n target_chunks = ChunkList()\n for chunk in source_chunks:\n if (\n # if the chunk has matched dependency, do concatenation.\n chunk.dependency == direction or\n # if the chunk is SPACE, concatenate to the previous chunk.\n (direction == False and chunk.is_space())\n ):\n tmp_bucket.append(chunk)\n continue\n tmp_bucket.append(chunk)\n if not direction: tmp_bucket = tmp_bucket[::-1]\n new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])\n chunk.update_word(new_word)\n target_chunks.append(chunk)\n tmp_bucket = []\n if tmp_bucket: target_chunks += tmp_bucket\n return target_chunks if direction else target_chunks[::-1]", "def _concatenate_parts_to_file_for_pipe(self,\n outfile,\n image_parts,\n source_dir,\n debug=False):\n close_all_fds([outfile])\n part_count = len(image_parts)\n part_file = None\n try:\n for part in image_parts:\n self.log.debug(\"Concatenating Part:\" + str(part.filename))\n sha1sum = hashlib.sha1()\n part_file_path = source_dir + \"/\" + part.filename\n with open(part_file_path) as part_file:\n data = part_file.read(euca2ools.bundle.pipes._BUFSIZE)\n while data:\n sha1sum.update(data)\n outfile.write(data)\n outfile.flush()\n data = part_file.read(euca2ools.bundle.pipes._BUFSIZE)\n part_digest = sha1sum.hexdigest()\n self.log.debug(\n \"PART NUMBER:\" + str(image_parts.index(part) + 1) +\n \"/\" + str(part_count))\n self.log.debug('Part sha1sum:' + str(part_digest))\n self.log.debug('Expected sum:' + str(part.hexdigest))\n if part_digest != part.hexdigest:\n raise ValueError('Input part file may be corrupt:{0} '\n .format(part.filename),\n '(expected digest: {0}, actual: {1})'\n .format(part.hexdigest, part_digest))\n except IOError as ioe:\n # HACK\n self.log.debug('Error in _concatenate_parts_to_file_for_pipe.' +\n str(ioe))\n if not debug:\n return\n raise ioe\n finally:\n if part_file:\n part_file.close()\n self.log.debug('Concatentate done')\n self.log.debug('Closing write end of pipe after writing')\n outfile.close()", "def _copy(self):\n for d in self._current_chunk:\n self.out.write(d)", "def merge_chunks(self, data):\r\n fn = \"speech_%s_%s.mp3\" % (\r\n data[\"lang\"], data[\"datetime\"].strftime(\"%Y%m%d-%H%M%S\"))\r\n filename_main = unique_path(fn)\r\n with open(filename_main, \"wb\") as f:\r\n # MP3s can be simply concatenated together, result is legible.\r\n for i, filename in enumerate(data[\"filenames\"]):\r\n f.write(open(filename, \"rb\").read())\r\n # Add more silence for separators like commas and periods.\r\n silence_count = 0\r\n if data[\"chunks\"][i][-1] in [\".\",\"?\",\"!\"]:\r\n silence_count = conf.SilenceCountLong\r\n elif data[\"chunks\"][i][-1] in [\",\",\":\",\";\",\"(\",\")\"]:\r\n silence_count = conf.SilenceCountShort\r\n f.write(base64.decodestring(conf.Silence) * silence_count)\r\n for filename in data[\"filenames\"]:\r\n try:\r\n os.unlink(filename)\r\n except Exception: pass\r\n data.update(filenames=[filename_main], current=filename_main, count=1)", "def packFiles(source, filesPerBlock, dest):\n\tfileCount = 1\n\t\n\ttmpFileName = \"tmp.h5\"\t\n\n\n\toutFile = createBlockFile(tmpFileName)\t\n\tfor dirname, subdirs, files in os.walk(source):\t\n\t print 'Scanning ' + dirname + '...'\t\n\t for f in files:\t\n\t if f.endswith('.h5'):\t\n\t inFile = h5py.File(os.path.join(dirname, f), 'r')\t\n\t outFile.copy(inFile, outFile['songs'], f)\t\n\t inFile.close()\t\n\t fileCount = fileCount + 1\t\n\t if(fileCount > filesPerBlock):\t\n\t outFile.close()\t\n\t upload(tmpFileName, bucket)\t\n\t fileCount = 1\t\n\t outFile = createBlockFile(tmpFileName)\t\n\n \toutFile.close()\n \tif fileCount > 1:\n\t \tupload(tmpFileName, bucket)\n\n\tos.remove(tmpFileName)", "def hadd(new_name, files, chunk_size=900):\n \n if len(files) <= chunk_size:\n return hadd_ex(new_name, files)\n \n files = files[:]\n new_files = []\n while files:\n these = files[:chunk_size]\n files = files[chunk_size:]\n\n this_fn = new_name + '_%i' % len(new_files)\n new_files.append(this_fn)\n\n if not hadd_ex(this_fn, these):\n print '\\033[36;7m PROBLEM hadding \\033[m', new_name, 'in chunks of', chunk_size, 'on', this_fn\n return False\n\n assert len(new_files) < chunk_size\n\n ok = hadd_ex(new_name, new_files)\n if not ok:\n print '\\033[36;7m PROBLEM hadding', new_name, 'in chunks of', chunk_size, 'assembling final file'\n return False\n\n for fn in new_files:\n os.remove(fn)\n\n return True", "def combine_chunks(chunks, output_path, decompress=False, encrypt_key=None):\n\n msg = 'combining %s chunks' % len(chunks)\n logger.info(msg)\n\n salt_length = 0\n key_length = 0\n if encrypt_key:\n if encrypt_key.binary_salt:\n salt_length = len(encrypt_key.binary_salt)\n assert salt_length == 16, salt_length\n key_length = len(encrypt_key.binary_key)\n assert key_length == 16, key_length\n f = open(output_path, 'wb')\n if decompress:\n decompressor = zlib.decompressobj()\n if encrypt_key:\n # salt, then iv\n iv = chunks[0].read(byte_range=[salt_length, salt_length + key_length])\n decryptor = Cipher(\n CIPHER_MODE, encrypt_key.binary_key, iv, CIPHER_DECODE\n )\n # sort out any parity chunks\n parity_chunks = []\n while chunks[-1].parity:\n parity_chunks.insert(0, chunks.pop())\n if parity_chunks and has_par2():\n cwd1 = os.getcwd()\n os.chdir(os.path.dirname(parity_chunks[0].file_path))\n # The files have to end in .par2 to work.\n for parity_chunk in parity_chunks:\n new_name = '%s.%s' % (parity_chunk.filename, 'par2')\n os.rename(parity_chunk.filename, new_name)\n parity_chunk.file_path = os.path.join(\n os.path.dirname(parity_chunk.file_path), new_name)\n # It won't recognize them by name, so put them on the command line.\n par2_repair([p.filename for p in parity_chunks])\n os.chdir(cwd1)\n for (i, chunk) in enumerate(chunks):\n chunk.verify_checksum()\n if i == 0 and encrypt_key:\n chunk_size = chunk.size\n if encrypt_key.binary_salt:\n # strip salt and IV\n data = chunk.read(\n byte_range=[len(encrypt_key.binary_key) + \\\n len(encrypt_key.binary_salt),\n chunk.size-4]\n )\n else:\n # skip the IV\n data = chunk.read(\n byte_range=[len(encrypt_key.binary_key), chunk.size-4]\n )\n else:\n data = chunk.read()\n if encrypt_key:\n data = decryptor.update(data)\n assert not decryptor.final()\n if decompress:\n data = decompressor.decompress(data)\n assert not decompressor.unused_data\n f.write(data)\n f.close()\n logger.debug('file size is %s' % os.stat(output_path).st_size)", "def merge_files():\n # abs path of data folder\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\\\\KORD\")\n file_list = os.listdir(work_folder)\n with open(os.path.join(work_folder, \"..\\\\merged_history_KORD.csv\"), \"w\") as outfile:\n for line in open(os.path.join(work_folder, file_list[0])):\n outfile.write(line)\n print \"write the first line\"\n for i in range(1, len(file_list)):\n with open(os.path.join(work_folder, file_list[i])) as infile:\n infile.next()\n for line in infile:\n outfile.write(line)", "def mergeAndSaveFile(dumpMetaFile, chunkSizeFile, outFile):\n dump = open (dumpMetaFile, \"r\")\n chunk = open (chunkSizeFile, \"r\")\n out = open (outFile, \"w\")\n \n cline = \"\"\n cline = chunk.readline()\n cline = cline.rstrip(\"\\n\")\n\n while dump:\n dline = dump.readline()\n if not dline:\n break\n dline = dline.rstrip(\"\\n\")\n \n # Split line parts \n dlineParts = dline.split(' ')\n \n # Read lines from chunkSize\n numEntries = int(dlineParts[2])\n \n entries = []\n for i in range(numEntries):\n entries.append([dlineParts[i*3 + 3], dlineParts[i*3 + 4], dlineParts[i*3 + 5], 0])\n #entries[i][0] = dlineParts[i*3 + 3]\n #entries[i][1] = dlineParts[i*3 + 4]\n #entries[i][2] = dlineParts[i*3 + 5]\n #entries[i][3] = 0\n\n while True:\n clineParts = cline.split(' ')\n if ((dlineParts[0] == clineParts[0]) and (dlineParts[1] == clineParts[1])):\n for i in range(numEntries):\n if ((entries[i][0] == clineParts[3]) and (entries[i][1] == clineParts[4])):\n entries[i][3] = clineParts[2]\n else:\n break\n cline = chunk.readline()\n cline = cline.rstrip(\"\\n\")\n if not cline:\n break\n\n # Print output\n out.write(dlineParts[0]+\" \"+dlineParts[1]+\" \"+dlineParts[2]+\" \")\n for i in range(numEntries):\n out.write(str(entries[i][3])+\" \"+entries[i][0]+\" \"+entries[i][1]+\" \"+entries[i][2]+\" \")\n out.write(\"\\n\")\n out.close()", "def _chunks_merge(chunks):\n chunks_ = []\n while chunks:\n chunk, chunks = chunks\n chunks_.append(chunk)\n return chunks_[0][:0].join(reversed(chunks_)) if chunks_ else b\"\"", "def merge_root_files(self, force=False):\n self.OutFilePath.parent.mkdir(exist_ok=True)\n cmd = f'hadd{\" -f\" if force else \"\"} {self.proteus_raw_file_path()} {self.Raw.OutFilePath} {self.Ref.OutFilePath} {self.Adc2Vcal.OutFilePath}'\n pinfo(cmd)\n check_call(cmd, shell=True)", "def __concatonate_files(self, new_file_name, parent_folder):\n\n # make the output directory\n output_file = self.save_directory + \"/\" + new_file_name\n\n # check if save_directory exists\n if not os.path.exists(self.save_directory):\n try:\n # make the directory\n os.makedirs(self.save_directory)\n except PermissionError:\n # if the user is unable to write to this directory, we should not continue\n print(\"You do not have the correct permissions for creating a directory here. Please try again.\")\n exit(-1)\n\n barcode_files = []\n for root, directory, files in os.walk(parent_folder):\n # we need to know where each file is in the barcode folder so we can read data from it\n for name in files:\n barcode_files.append( os.path.join(root, name) )\n\n with open(output_file, 'w') as writer:\n for name in barcode_files:\n with open(name, 'r') as reader:\n for line in reader:\n writer.write(line)", "def file_sync_write_chunks(radosobject, chunksize, offset, chunks, size=None):\n padding = 0\n cursize = chunksize * offset\n radosobject.seek(cursize)\n for chunk in chunks:\n if padding:\n radosobject.sync_write(buffer(zeros(chunksize), 0, padding))\n if size is not None and cursize + chunksize >= size:\n chunk = chunk[:chunksize - (cursize - size)]\n radosobject.sync_write(chunk)\n cursize += len(chunk)\n break\n radosobject.sync_write(chunk)\n padding = chunksize - len(chunk)\n\n padding = size - cursize if size is not None else 0\n if padding <= 0:\n return\n\n q, r = divmod(padding, chunksize)\n for x in xrange(q):\n radosobject.sunc_write(zeros(chunksize))\n radosobject.sync_write(buffer(zeros(chunksize), 0, r))", "def append(self, file, idx):\n\n # print \"append %s %d\" % (file, idx)\n src = \"%s/%s\" % (self._dir, file)\n dst = \"%s/.%d.new\" % (self._tempdir, idx)\n copyfile(src, dst)\n result = self._run(\"%s --%d --block-size %d --bits %d --quiet --threads %d %s --mode %s --rehash %s %s\" %\n (self._ishakesumd, self._mode, self._block_size, self._output_bits, self._threads,\n self._profile, self._alg, self._hash, self._tempdir))\n os.remove(dst)\n return result", "def create_initial_file():\n\n merge_file = tempfile.NamedTemporaryFile()\n\n # spin the sources for the base file\n for source in sort_sources(\n recursive_glob(settings[\"datapath\"], settings[\"hostfilename\"])\n ):\n\n start = \"# Start {}\\n\\n\".format(os.path.basename(os.path.dirname(source)))\n end = \"\\n# End {}\\n\\n\".format(os.path.basename(os.path.dirname(source)))\n\n with open(source, \"r\", encoding=\"UTF-8\") as curFile:\n write_data(merge_file, start + curFile.read() + end)\n\n # spin the sources for extensions to the base file\n for source in settings[\"extensions\"]:\n for filename in sort_sources(\n recursive_glob(\n path_join_robust(settings[\"extensionspath\"], source),\n settings[\"hostfilename\"],\n )\n ):\n with open(filename, \"r\") as curFile:\n write_data(merge_file, curFile.read())\n\n maybe_copy_example_file(settings[\"blacklistfile\"])\n\n if os.path.isfile(settings[\"blacklistfile\"]):\n with open(settings[\"blacklistfile\"], \"r\") as curFile:\n write_data(merge_file, curFile.read())\n\n return merge_file", "def concat_chunks(file_list: list, output_path: str, verbose_level=0) -> str:\n temp_file_name = 'temp_' + str(len(file_list)) + \\\n str(int(round(time.time() * 1000))) + '.wav'\n files_str = ' '.join(file_list)\n if str(verbose_level) == '2':\n print('sox -V%s %s %s' % (verbose_level, files_str, output_path +\n os.sep + temp_file_name))\n os.system('sox -V%s %s %s' % (verbose_level, files_str, output_path +\n os.sep + temp_file_name))\n return temp_file_name", "def _rechunking(self, compressor, parallel=False, replace=False):\n target_path = tempfile.TemporaryDirectory()\n source_sf = strax.DataDirectory(self.path)\n st= self.st\n st.set_context_config(dict(allow_rechunk=False,\n n_chunks=10))\n st.storage = [source_sf]\n run_id = '0'\n st.make(run_id, self.target)\n assert st.is_stored(run_id, self.target)\n assert strax.utils.dir_size_mb(self.path) > 0\n original_n_files = len(glob.glob(os.path.join(self.path, '*', '*')))\n assert original_n_files > 3 # At least two files + metadata\n _, backend_key = source_sf.find(st.key_for(run_id, self.target))\n strax.rechunker(source_directory=backend_key,\n dest_directory=target_path.name if not replace else None,\n replace=True,\n compressor=compressor,\n target_size_mb=strax.default_chunk_size_mb * 2,\n parallel=parallel,\n max_workers=4,\n _timeout=5,\n )\n assert st.is_stored(run_id, self.target)\n # Should be empty, we just replaced the source\n assert strax.utils.dir_size_mb(target_path.name) == 0\n new_n_files = len(glob.glob(os.path.join(self.path, '*', '*',)))\n assert original_n_files > new_n_files\n st.set_context_config(dict(forbid_creation_of='*'))\n st.get_array(run_id, self.target)\n target_path.cleanup()", "def write_chunk(chunk, token):\n dest = rem_dir('grab')\n # input(dest)\n file_name = '{}_{}'.format('cpf_temp', token)\n dest_file_name = os.path.join(os.path.abspath(dest), file_name)\n # input(dest_file_name)\n WRITE_STREAM = open(dest_file_name, 'wb')\n WRITE_STREAM.write(chunk)\n WRITE_STREAM.close()\n\n return True", "def _concatenate_group(group, first_elem):\n target_file_name = re.sub(pattern_lane_num, r\"\\1\", os.path.basename(first_elem))\n target_path = os.path.join(tempfile.gettempdir(), target_file_name)\n\n # Overwriting all files by default\n with open(target_path, \"wb\") as outf:\n for fname in group:\n with open(fname, \"rb\") as inf:\n # TODO: check for newline at the end of file first?\n shutil.copyfileobj(inf, outf)\n return target_path", "def write_to_master_file(\n self, all_files=[], filename=sys.argv[2], separator=sys.argv[3]\n ) -> None:\n if filename == \"\":\n raise EnvironmentError(\"No filename provided!\")\n\n first_file = all_files[0]\n\n with open(filename, \"w+\") as master:\n with open(first_file, \"r+\") as initial_write:\n for line in initial_write:\n master.write(line)\n\n if len(all_files) > 1:\n for i in range(1, len(all_files)):\n master.write(separator)\n with open(all_files[i], \"r+\") as file_to_append:\n for line in file_to_append:\n master.write(line)", "def add_merge_job(dax, final_name, chunk, level, job_number, final):\n j = Job(name=\"merge.sh\")\n out_file_name = final_name + \"-%d-%d.tar.gz\" %(level, job_number)\n out_file = File(out_file_name)\n if final:\n out_file_name = final_name\n out_file = File(final_name)\n j.uses(out_file, link=Link.OUTPUT, transfer=final)\n j.addArguments(out_file)\n for f in chunk:\n flfn = File(f)\n j.uses(flfn, link=Link.INPUT)\n j.addArguments(flfn)\n j.addProfile(Profile(Namespace.CONDOR, 'request_disk', '100 GB'))\n dax.addJob(j)\n return out_file_name", "def writeto(sourcePaths, targetPath, eosDownload=False):\n\n LOG.info('merge %s -> %s', sourcePaths, targetPath)\n\n target = ROOT.TFile.Open(targetPath, 'recreate')\n # This is critical (and safe) - see https://root-forum.cern.ch/t/tfile-close-slow/24179\n ROOT.gROOT.GetListOfFiles().Remove(target)\n\n _nadd = 0\n\n for path in sourcePaths:\n pathOrig = path\n pathReal = os.path.realpath(pathOrig)\n if eosDownload and pathReal.startswith('/eos'):\n for _ in range(5):\n with tempfile.NamedTemporaryFile(suffix='.root', delete=False) as tmp:\n pass\n proc = subprocess.Popen(['xrdcp', '-f', 'root://eoscms.cern.ch/' + pathReal, tmp.name])\n proc.communicate()\n if proc.returncode == 0:\n path = tmp.name\n break\n else:\n try:\n os.unlink(tmp.name)\n except:\n pass\n time.sleep(5)\n else:\n raise RuntimeError('Failed to download ' + pathOrig)\n\n start = time.time()\n source = ROOT.TFile.Open(path)\n ROOT.gROOT.GetListOfFiles().Remove(source)\n\n nnew, nadd = merge(source, target)\n\n source.Close()\n target.Close() # closing target at each iteration to flush out in-memory objects\n\n LOG.info('%s -> %s: %d new, %d merged (%.1f s)', pathOrig, targetPath, nnew, nadd, time.time() - start)\n\n _nadd += nadd\n if pathOrig != sourcePaths[-1]:\n if _nadd > 1000000:\n # purge duplicate keys by compressing\n os.rename(targetPath, targetPath + '.tmp')\n writeto([targetPath + '.tmp'], targetPath)\n os.unlink(targetPath + '.tmp')\n _nadd = 0\n \n target = ROOT.TFile.Open(targetPath, 'update')\n ROOT.gROOT.GetListOfFiles().Remove(target)\n\n if eosDownload and pathReal.startswith('/eos'):\n try:\n os.unlink(path)\n except:\n pass", "def assemble_file(names):\n md5 = hashlib.md5()\n filename = ''.join([name.split('-')[-1] for name in names])\n fpath = os.path.join(FILES_DIR, filename)\n with open(fpath, \"wb\") as dst:\n for name in names:\n for chunk in chunked_reader(os.path.join(DATA_DIR, name)):\n md5.update(chunk)\n dst.write(chunk)\n\n return fpath, md5.digest().hex()", "def combine_tokens(tokens):\n partial_ipcdumps = []\n for start_index in range(0, len(tokens), TOKENS_PER_IPCDUMP):\n end_index = min(start_index + TOKENS_PER_IPCDUMP, len(tokens))\n current_tokens = tokens[start_index:end_index]\n partial_ipcdumps.append(\n create_partial_ipc_dump(current_tokens, file_path))\n\n combined_file_path = None\n if len(partial_ipcdumps) > 1:\n combined_file_path = combine_ipc_dumps(partial_ipcdumps, file_path)\n elif len(partial_ipcdumps) == 1:\n combined_file_path = partial_ipcdumps[0]\n\n if not combined_file_path:\n # This can happen in the case of a timeout or other error. The actual\n # error should already be logged, so no need to do it again here.\n return b''\n\n # TODO(mbarbella): Allow token combining functions to write files directly.\n handle = open(combined_file_path, 'rb')\n result = handle.read()\n handle.close()\n\n shell.remove_file(combined_file_path)\n return result", "def run(self):\n first_index, last_index = \\\n self.get_initial_blocks()\n while last_index - first_index > self.block_size:\n first_index, last_index = \\\n self.join_blocks(first_index, last_index)\n self.merge_blocks(self.output_file_name, first_index, last_index)", "def mergeFile():\n with open(\"output.txt\",'w') as o:\n o.write(data1)\n o.write(data2)\n o.write(data3)", "def merge_songs(in_dir, output_path):\n\twith open(output_path, 'a') as out_file:\n\t\tfor file in os.listdir(in_dir):\n\t\t\twith open(in_dir + file, 'r') as lyrics_file:\n\t\t\t\tout_file.write(lyrics_file.read())" ]
[ "0.65807515", "0.6522695", "0.5830749", "0.56331223", "0.5603233", "0.55737466", "0.5561251", "0.5557075", "0.5543502", "0.55282074", "0.5523479", "0.55110836", "0.547458", "0.54524946", "0.5427168", "0.54180014", "0.5275911", "0.5269838", "0.52052176", "0.5198629", "0.51861894", "0.5165819", "0.51608825", "0.5132295", "0.5116679", "0.51163363", "0.51034325", "0.5090486", "0.50695324", "0.5039642" ]
0.6982434
0
two BaseWrapper instances are equal enough
def assert_wrappers_equal(first, second): assert first.sk_params == second.sk_params assert first.history_ == second.history_ if not first.model_ or not second.model_: assert first.model_ == second.model_ else: assert_models_equal(first.model, second.model)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_inheritedClassesEquality(self):\n self.assertTrue(Record(1, 2) == DerivedRecord(1, 2))\n self.assertFalse(Record(1, 2) == DerivedRecord(1, 3))\n self.assertFalse(Record(1, 2) == DerivedRecord(2, 2))\n self.assertFalse(Record(1, 2) == DerivedRecord(3, 4))", "def test_identical(self):\n write this test!", "def testEquality(self):\n pass", "def _is_equal_same_type(self, other):\n return True", "def test_inheritedClassesInequality(self):\n self.assertFalse(Record(1, 2) != DerivedRecord(1, 2))\n self.assertTrue(Record(1, 2) != DerivedRecord(1, 3))\n self.assertTrue(Record(1, 2) != DerivedRecord(2, 2))\n self.assertTrue(Record(1, 2) != DerivedRecord(3, 4))", "def test_baseid_different(self):\n test1 = BaseModel()\n test2 = BaseModel()\n self.assertNotEqual(test1.id, test2, id)", "def same_as(self, other):\n return super().__eq__(other)", "def test_almost_equal(self):\n x = Point(\n lat=23.4,\n lng=23.1,\n author=self.u\n )\n self.assertTrue(self.a == x)\n self.assertFalse(self.a != x)", "def test00(self):\n b_0 = Base()\n b_1 = Base()\n self.assertEqual(b_0.id, 1)\n self.assertEqual(b_1.id, 2)", "def is_type_equivalent(self, other):\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n # removes base attributes in the phyiscal layer.\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n\n remove_base(mine)\n remove_base(theirs)\n\n return type(self) == type(other) and mine == theirs", "def is_identical(self, other):\n return (self.compounddatatype == other.compounddatatype and\n self.min_row == other.min_row and\n self.max_row == other.max_row)", "def assert_compatible(self, other):\n assert self.config == other.config, ('configs are not the same self: %s '\n 'other %s') % (self.config,\n other.config)\n\n assert self.hash_functions == other.hash_functions, (\n 'hash functions are not the same')\n return True", "def test_differentClassesEquality(self):\n self.assertFalse(Record(1, 2) == DifferentRecord(1, 2))", "def test_class_eq_method(self, test_instances):\n a, b, _ = test_instances\n\n assert a == b", "def test_differentClassesInequality(self):\n self.assertTrue(Record(1, 2) != DifferentRecord(1, 2))", "def test_equal_on_equal(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def iexact(self, other):", "def __eq__(self, other):\n return isinstance(other, type(self)) and self.size == other.size", "def is_identical(self, other):\n if self.is_input != other.is_input:\n return False\n\n if self.is_raw() and other.is_raw():\n return True\n if self.is_raw() or other.is_raw():\n return False\n return self.structure.is_identical(other.structure)", "def __eq__(self, other: 'Pool') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "def almost_equals(self, other, decimal=...): # -> bool:\n ...", "def test_attrs(self):\n for self_attr, wrapper_attr in [(\"reactor\", \"_reactor\"),\n (\"client\", \"_client\")]:\n self.assertIdentical(getattr(self, self_attr),\n getattr(self.wrapper, wrapper_attr))", "def test_compatible(self, other):\n if not self.center.dims == other.center.dims:\n raise ValueError(\"Devices have different dimensionality: {:d} vs {:d}\".format(self.center.dims, other.center.dims))\n\n if not self.center.shape == other.center.shape:\n raise ValueError(\"The shape of the central part does not match: {} vs {}\".format(self.center.shape, other.center.shape))\n\n if not len(self.leads) == len(other.leads):\n raise ValueError(\"The number of leads is different: {:d} vs {:d}\".format(len(self.leads), len(other.leads)))\n\n for n, (i,j) in enumerate(zip(self.leads, other.leads)):\n if not i.shape == j.shape:\n raise ValueError(\"The shape of a lead {:d} does not match: {} vs {}\".format(n,i.shape,j.shape))\n\n for n, (i,j) in enumerate(zip(self.connections, other.connections)):\n if not numpy.array_equal(i,j):\n raise ValueError(\"The connections arrays for lead {:d} are not equal\".format(n))", "def test_b(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertFalse(v1 == v2)\n self.assertFalse(v2 == v1)", "def _merge_sanity_check(self, other):\n if self._fields is not None and (\n set(self.query.values_select) != set(other.query.values_select)\n or set(self.query.extra_select) != set(other.query.extra_select)\n or set(self.query.annotation_select) != set(other.query.annotation_select)\n ):\n raise TypeError(\n \"Merging '%s' classes must involve the same values in each case.\"\n % self.__class__.__name__\n )", "def test_autocreate(self):\n a = Vector(1, 2)\n b = Vector(a)\n assert b == a", "def __le__(self, other: object) -> bool:\n ...", "def __le__(self, other: object) -> bool:\n ...", "def test_00(self):\n base0 = Base()\n base1 = Base()\n self.assertEqual(base0.id, 1)\n self.assertEqual(base1.id, 2)", "def __eq__(self, other: 'GatewayCollection') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__" ]
[ "0.6653726", "0.6650997", "0.65729487", "0.64894223", "0.6444425", "0.64114064", "0.63375264", "0.63026255", "0.6292803", "0.6269224", "0.6257633", "0.6256101", "0.62419796", "0.62266093", "0.6213192", "0.6190846", "0.6179785", "0.6126253", "0.61250675", "0.6109466", "0.609552", "0.609192", "0.6090806", "0.6088815", "0.6072727", "0.6072093", "0.6065218", "0.6065218", "0.60543853", "0.60422385" ]
0.67075044
0
The names of the arguments to the function which are contained in the PyArgKeywords list
def arg_names(self): return self._arg_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def names(self):\n result = []\n result.extend(self.positional_arguments)\n if self.arbitary_positional_arguments is not None:\n result.append(self.arbitary_positional_arguments)\n if self.arbitary_keyword_arguments is not None:\n result.append(self.arbitary_keyword_arguments)\n result.extend(self.keyword_arguments)\n return result", "def argument_list(self):\n answer = self._call('argument_list')\n return answer.names", "def _getargs(fn_sig):\n params = fn_sig.parameters\n args = []\n for k, v in params.items():\n if (v.kind & v.POSITIONAL_OR_KEYWORD) == v.POSITIONAL_OR_KEYWORD:\n args.append(k)\n else:\n msg = \"%s argument type unsupported in jitclass\" % v.kind\n raise errors.UnsupportedError(msg)\n return args", "def extract_keywords(func):\n if hasattr(func, 'im_func'):\n func = func.im_func\n\n try:\n return func.func_code.co_varnames[-len(func.func_defaults):]\n except (TypeError, ValueError, IndexError):\n return tuple()", "def argnames(method):\n return [arg for arg in method.__code__.co_varnames if arg != \"self\"]", "def get_keyword_args(function):\n argspec = inspect.getargspec(function)\n kwargs = argspec.args[len(argspec.args) - len(argspec.defaults):]\n kwargs = {arg: value for arg, value in zip(kwargs, argspec.defaults)}\n return kwargs", "def argnames(self):\n if self.get_key is None:\n return set()\n return set(self.get_key.names)", "def list_kwargs(func):\n \n details = inspect.getargspec(func)\n nopt = len(details.defaults)\n \n return details.args[-nopt:]", "def getPositionalArgs():", "def parameter_names(self) -> List[str]:", "def inspect_args_func(frame):\n args, _, _, values = inspect.getargvalues(frame)\n return {key: values[key] for key in args if key != 'self'}", "def _get_args(function, varargs=False):\n\n try:\n params = signature(function).parameters\n except ValueError:\n # Error on builtin C function\n return []\n args = [\n key\n for key, param in params.items()\n if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)\n ]\n if varargs:\n varargs = [\n param.name\n for param in params.values()\n if param.kind == param.VAR_POSITIONAL\n ]\n if len(varargs) == 0:\n varargs = None\n return args, varargs\n else:\n return args", "def args(self):\n\t\tret = []\n\t\tfor argname in self._arg_names:\n\t\t\tret += [self._args[argname]]\n\t\treturn ret", "def args(self) -> List[str]:\n return self.__args", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def func_args(self) -> str:\n\n return self.call_data[10:]", "def _get_param_names(self):\r\n return sorted([p\r\n for p in self.__dict__\r\n if p != 'additional_args'])", "def _validate_arglist_and_kwlist(self, p, items, keywords):\n kwnames = set()\n args = []\n kws = []\n self._validate_arglist_list(items, p.lexer.lexer)\n for arg in items:\n if isinstance(arg, ast.keyword):\n kws.append(arg)\n kwnames.add(arg.arg)\n else:\n args.append(arg)\n for kw in keywords:\n if not isinstance(kw, ast.keyword):\n msg = 'only named arguments may follow *expression'\n tok = FakeToken(p.lexer.lexer, p.lineno(2))\n syntax_error(msg, tok)\n if kw.arg in kwnames:\n msg = 'keyword argument repeated'\n tok = FakeToken(p.lexer.lexer, kw.lineno)\n syntax_error(msg, tok)\n kwnames.add(kw.arg)\n kws.extend(keywords)\n\n return args, kws", "def get_argument_as_keywords(self):\n status = True\n arg_kv = self.get_values_for_mandatory_args()\n if len(arg_kv) != len(self.req_args_list):\n msg = 'could not execute %s without mandatory arguments' % (object)\n self.data_repository = skip_and_report_status(self.data_repository, msg)\n status = False\n arg_kv = self.get_values_for_optional_args(arg_kv)\n return arg_kv, status", "def get_json_argument_list():\n list_of_arguments_to_get = [\"finish_time\", \"segmentation_training_samples\", \"patch_count_per_image\", \"learning_rate\", \"batch_k\",\n \"batch_p\", \"flip_augment\", \"standardize\", \"margin\", \"metric\"]\n\n return list_of_arguments_to_get", "def derive_args(func):\n args = inspect.getfullargspec(func).args\n if args and is_selfish_name(args[0]):\n del args[0]\n return args", "def getargspec(self,obj):\n\n if inspect.isfunction(obj):\n func_obj = obj\n elif inspect.ismethod(obj):\n func_obj = obj.im_func\n else:\n raise TypeError, 'arg is not a Python function'\n args, varargs, varkw = inspect.getargs(func_obj.func_code)\n return args, varargs, varkw, func_obj.func_defaults", "def getargvalues(frame):\r\n args, varargs, varkw = getargs(frame.f_code)\r\n return ArgInfo(args, varargs, varkw, frame.f_locals)", "def get_arg_name(args):\n names = []\n for arg in args:\n if type(arg).__name__ == 'ID':\n names.append(arg.name)\n elif type(arg).__name__ == 'UnaryOp':\n names.append(arg.expr.name)\n elif type(arg).__name__ == 'StructRef':\n #############################################\n # So far, we don't care about this situation:\n # fun(a->b)\n # POSSIBLE CODE HERE\n #############################################\n names.append(None)\n return names", "def get_args( self, **kwargs ):\n args = []\n for at in self.arg_types:\n args.append( kwargs[at] )\n return args", "def format_args(self, **kwargs: Any) -> str:\n decl = self.declaration\n\n # The logic allows this to be used for both function like and non\n # function like macros.\n # 'SOME_DEFINE'.partition('(')\n # >>> 'SOME_DEFINE', '', ''\n #\n # 'FUNCTION_LIKE(_a, _b)'.partition('(')\n # >>> 'FUNCTION_LIKE', '(', '_a, _b)'\n _, part, args = decl.partition(\"(\")\n return part + args", "def getArgs(func):\n # exclude the defaults at the end (hence the [:-1])\n args = list(utils.flatten(inspect.getargspec(func)[:-1]))\n return set(args).difference(set([None]))", "def GetFunctionParametersAndValues():\n frame = inspect.currentframe().f_back\n args, _, _, values = inspect.getargvalues(frame)\n return ([(i, values[i]) for i in args])", "def build_arg_list(fn, env):\r\n kw = {}\r\n argspec = inspect.getargspec(fn)\r\n\r\n # if there is a **kw argument in the fn definition,\r\n # just pass along the environment\r\n if argspec[2]:\r\n kw = env\r\n #else for each entry in the arglist set the value from the environment\r\n else:\r\n #skip self\r\n argnames = argspec[0][1:]\r\n for name in argnames:\r\n if name in env:\r\n kw[name] = env[name]\r\n return kw", "def get_kwd_args(func):\n try:\n sig = inspect.signature(func)\n except AttributeError:\n args, _, _, defaults = inspect.getargspec(func)\n if defaults:\n kwonlyargs = args[-len(defaults):]\n else:\n kwonlyargs = []\n else:\n kwonlyargs = {p.name:p.default for p in sig.parameters.values()\n if p.default is not p.empty}\n\n return kwonlyargs" ]
[ "0.7080962", "0.69528246", "0.6950511", "0.6814684", "0.68055576", "0.67487407", "0.6712614", "0.6712274", "0.6702668", "0.6666874", "0.6628423", "0.66158634", "0.6563383", "0.64373004", "0.64143133", "0.6394061", "0.6304303", "0.6285481", "0.6278419", "0.6258944", "0.62564945", "0.62373346", "0.62220895", "0.62058085", "0.61846083", "0.61682487", "0.61665857", "0.61631507", "0.613867", "0.6131295" ]
0.6993237
1
Create FunctionDef responsible for casting python argument to C
def Python_to_C(c_object): try : cast_function = py_to_c_registry[(c_object.dtype, c_object.precision)] except KeyError: errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal') cast_func = FunctionDef(name = cast_function, body = [], arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)], results = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)]) return cast_func
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def C_to_Python(c_object):\n try :\n cast_function = c_to_py_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)],\n results = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)])\n\n return cast_func", "def cython_c2py_conv_function_pointer(t_, ts):\n t = t_[1]\n argnames = []\n argdecls = []\n argbodys = []\n argrtns = []\n for n, argt in t[1][2]:\n argnames.append(n)\n decl, body, rtn = ts.cython_py2c(n, argt, proxy_name=\"c_\" + n)\n argdecls += decl.split('\\n') if isinstance(decl,basestring) else [decl]\n argbodys += body.split('\\n') if isinstance(body,basestring) else [body]\n argrtns += rtn.split('\\n') if isinstance(rtn,basestring) else [rtn]\n rtnname = 'rtn'\n rtnprox = 'c_' + rtnname\n rtncall = 'c_call_' + rtnname\n while rtnname in argnames or rtnprox in argnames:\n rtnname += '_'\n rtnprox += '_'\n argdecls = indent(argdecls)\n argbodys = indent(argbodys)\n rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2],\n cached=False, proxy_name=rtnprox, existing_name=rtncall)\n if rtndecl is None and rtnbody is None:\n rtnprox = rtnname\n rtndecls = [rtndecl]\n returns_void = (t[2][2] == 'void')\n if not returns_void:\n rtndecls.append(\"cdef {0} {1}\".format(ts.cython_ctype(t[2][2]),\n rtncall))\n rtndecl = indent(rtndecls)\n rtnbody = indent(rtnbody)\n s = ('def {{proxy_name}}({arglist}):\\n'\n '{argdecls}\\n'\n '{rtndecl}\\n'\n ' if {{var}} == NULL:\\n'\n ' raise RuntimeError(\"{{var}} is NULL and may not be '\n 'safely called!\")\\n'\n '{argbodys}\\n')\n s += ' {{var}}({carglist})\\n' if returns_void else \\\n ' {rtncall} = {{var}}({carglist})\\n'\n s += '{rtnbody}\\n'\n s = s.format(arglist=\", \".join(argnames), argdecls=argdecls,\n cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'),\n argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox,\n rtncall=rtncall, carglist=\", \".join(argrtns), rtnbody=rtnbody)\n caches = 'if {cache_name} is None:\\n' + indent(s)\n if not returns_void:\n caches += \"\\n return {rtnrtn}\".format(rtnrtn=rtnrtn)\n caches += '\\n {cache_name} = {proxy_name}\\n'\n return s, s, caches", "def arg_to_CFI(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if options.wrap_fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n \n cfi_args = {}\n for arg in ast.declarator.params:\n declarator = arg.declarator\n name = declarator.user_name\n attrs = declarator.attrs\n meta = declarator.metaattrs\n cfi_args[name] = False\n arg_typemap = arg.typemap\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif meta[\"assumed-rank\"]:\n cfi_args[name] = True\n elif attrs[\"rank\"]:\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"string\":\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"char\":\n if declarator.is_indirect():\n cfi_args[name] = True\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n cfi_args[name] = True\n has_cfi_arg = any(cfi_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cfi\"\n\n if not (need_buf_result or\n has_cfi_arg):\n return False\n\n options.wrap_fortran = False\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"cfi\"\n C_new._generated = \"arg_to_cfi\"\n C_new.splicer_group = \"cfi\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_cfi_suffix\n\n C_new.wrap.assign(c=True)#, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n name = arg.declarator.user_name\n if cfi_args[name]:\n arg.declarator.metaattrs[\"api\"] = generated_suffix\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n result_as_string.declarator.metaattrs[\"api\"] = \"cfi\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n # Fortran function may call C subroutine if string/vector result\n # Fortran function calls bufferify function.\n node._PTR_F_C_index = C_new._function_index\n return True", "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def convert_result_as_arg(self, node, ordered_functions):\n return ordered_functions # XXX - do nothing for now\n options = node.options\n fmt_func = node.fmtdict\n# if options.F_string_len_trim is False: # XXX what about vector?\n# return\n\n ast = node.ast\n result_typemap = ast.typemap\n result_name = None\n\n # Check if result needs to be an argument.\n attrs = ast.attrs\n meta = ast.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup in [\"char\", \"string\"]:\n result_name = fmt_func.F_string_result_as_arg\n# result_as_arg = fmt_func.F_string_result_as_arg\n# result_name = result_as_arg or fmt_func.C_string_result_as_arg\n# elif result_typemap.base == \"vector\":\n# has_vector_result = True\n# elif result_is_ptr:\n# if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n# need_cdesc_result = True\n# elif attrs[\"dimension\"]:\n# need_cdesc_result = True\n\n if not result_name:\n return\n\n##########\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n# generated_suffix = \"buf\"\n C_new._generated = \"result_to_arg\"\n fmt_func = C_new.fmtdict\n# fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix + \"XXX\"\n# fmt_func.function_suffix = fmt_func.function_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=True, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n##########\n\n # decl: const char * getCharPtr2()\n new_arg = C_new.ast.result_as_arg(result_name)\n new_arg.const = False # must be writeable\n# attrs = new_arg.attrs\n# new_arg.metaattrs[\"deref\"] = None\n # Special case for wrapf.py to override \"allocatable\"\n\n # Special case for wrapf.py to override \"allocatable\"\n node.ast.metaattrs[\"deref\"] = None\n new_arg.metaattrs[\"deref\"] = \"result\"\n new_arg.metaattrs[\"is_result\"] = True\n C_new.ast.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.metaattrs[\"deref\"] = None\n\n node.wrap.fortran = False\n# node.wrap.c = False\n\n return\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)", "def cpp_function(self):", "def arg_to_buffer(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if node.wrap.c is False:\n# if options.wrap_c is False: # XXX cdesc.yaml GetScalar2\n # The user does not require a C wrapper.\n # This can be the case if the Fortran wrapper is doing all\n # the work via splicer or fstatements.\n return\n\n # If a C++ function returns a std::string instance,\n # the default wrapper will not compile since the wrapper\n # will be declared as char. It will also want to return the\n # c_str of a stack variable. Warn and turn off the wrapper.\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = ast.declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n\n if node.wrap.fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n if options.F_string_len_trim is False: # XXX what about vector?\n return\n\n # Arguments.\n # Is result or any argument a string or vector?\n # If so, additional arguments will be passed down so\n # create buffer version of function.\n buf_args = {}\n for arg in declarator.params:\n has_buf_arg = None\n arg_typemap = arg.typemap\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif attrs[\"cdesc\"]:\n # User requested cdesc.\n has_buf_arg = \"cdesc\"\n elif arg_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\", \"copy\"]:\n has_buf_arg = \"cdesc\"\n # XXX - this is not tested\n # XXX - tested with string **arg+intent(out)+dimension(ndim)\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"char\":\n if arg.ftrim_char_in:\n pass\n elif declarator.is_indirect():\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n has_buf_arg = \"cdesc\"\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"vector\":\n if meta[\"intent\"] == \"in\":\n # Pass SIZE.\n has_buf_arg = \"buf\"\n else:\n has_buf_arg = \"cdesc\"\n elif (arg_typemap.sgroup == \"native\" and\n meta[\"intent\"] == \"out\" and\n meta[\"deref\"] != \"raw\" and\n declarator.get_indirect_stmt() in [\"**\", \"*&\"]):\n # double **values +intent(out) +deref(pointer)\n has_buf_arg = \"cdesc\"\n #has_buf_arg = \"buf\" # XXX - for scalar?\n buf_args[declarator.user_name] = has_buf_arg\n # --- End loop over function parameters\n has_buf_arg = any(buf_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n attrs = ast.declarator.attrs\n meta = ast.declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n # Result default to \"allocatable\".\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.base == \"vector\":\n need_buf_result = \"cdesc\"\n elif result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n if meta[\"dimension\"]:\n # int *get_array() +deref(pointer)+dimension(10)\n need_buf_result = \"cdesc\"\n\n # Functions with these results need wrappers.\n if not (need_buf_result or\n has_buf_arg):\n return\n\n # XXX node.wrap.fortran = False\n # Preserve wrap.c.\n # This keep a version which accepts char * arguments.\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"buf\"\n C_new._generated = \"arg_to_buffer\"\n C_new.splicer_group = \"buf\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n \n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=node.options.wrap_c)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if buf_args[declarator.user_name]:\n meta[\"api\"] = buf_args[declarator.user_name]\n if arg.ftrim_char_in:\n continue\n arg_typemap = arg.typemap\n if arg_typemap.base == \"vector\":\n # Do not wrap the orignal C function with vector argument.\n # Meaningless to call without the size argument.\n # TODO: add an option where char** length is determined by looking\n # for trailing NULL pointer. { \"foo\", \"bar\", NULL };\n node.wrap.c = False\n node.wrap.lua = False # NotImplemented\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n # Add additional argument to hold result.\n # This will allocate a new character variable to hold the\n # results of the C++ function.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n # We've added an argument to fill, use api=buf.\n result_as_string.declarator.metaattrs[\"api\"] = \"buf\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n \n # Fortran function may call C subroutine if string/vector result\n node._PTR_F_C_index = C_new._function_index", "def compile_function(self, function, arguments):", "def get_pytype(self, c_arg, parse_arg):\n if isinstance(c_arg, FunctionAddress):\n return 'O'\n else:\n try:\n return pytype_parse_registry[(parse_arg.dtype, parse_arg.precision)]\n except KeyError as e:\n raise NotImplementedError(\"Type not implemented for argument collection : \"+str(type(parse_arg))) from e", "def test_callback_from_c(self):\n source = io.StringIO(\"\"\"\n int add(int x, int y);\n int x(int a) {\n return add(a + 1, 13);\n }\n \"\"\")\n arch = get_current_arch()\n obj = cc(source, arch, debug=True)\n def my_add(x: int, y: int) -> int:\n return x + y + 2\n imports = {\n 'add': my_add\n }\n m = load_obj(obj, imports=imports)\n y = m.x(101)\n self.assertEqual(117, y)", "def fortran_c_wrapper(self) -> str:\n if self.fc_override is not None:\n return self.fc_override.replace('$CLASSNAME$', self.class_name).replace(\n \"$C_PREFIX$\", self.c_prefix).replace(\"$F_PREFIX$\", self.f_prefix)\n\n result = ''\n\n # declaration\n in_parameters = self._fc_in_parameters()\n return_type, out_parameters = self._fc_out_parameters()\n if self.may_throw:\n out_parameters.append('int * err_code')\n out_parameters.append('char ** err_msg')\n out_parameters.append('std::size_t * err_msg_len')\n\n func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n\n par_str = ', '.join(in_parameters + out_parameters)\n result += '{} {}({}) {{\\n'.format(return_type, func_name, par_str)\n\n # convert input\n for par in self.params:\n result += '{}'.format(par.fc_convert_input())\n\n # call C++ function and return result\n if self.may_throw:\n result += ' try {\\n'\n result += ' *err_code = 0;\\n'\n result += indent(self._fc_cpp_call(), 4*' ')\n result += indent(self._fc_return(), 4*' ')\n result += ' }\\n'\n for exception, code in error_codes.items():\n if code != 0:\n catch = ''\n catch += 'catch (std::{} const & e) {{\\n'.format(exception)\n catch += ' *err_code = {};\\n'.format(code)\n catch += ' static std::string msg;\\n'\n catch += ' msg = e.what();\\n'\n catch += ' *err_msg = const_cast<char*>(msg.data());\\n'\n catch += ' *err_msg_len = msg.size();\\n'\n catch += '}\\n'\n result += indent(catch, 4*' ')\n result += self._fc_return_default()\n else:\n result += self._fc_cpp_call()\n result += self._fc_return()\n result += '}\\n\\n'\n return result", "def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)", "def cast(*args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_cast(*args)", "def cast(*args):\n return _ITKCostFunctionsPython.itkCostFunction_cast(*args)", "def itkSingleValuedCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_cast(*args)", "def dispatchMacroEnvFunction(self, tree, tree_parent):\n cpp_func_name = \"getMacroProperty\"\n py_func = tree.attr\n # extract type from function name\n py_type = py_func[len(cpp_func_name):]\n if py_type not in self._fgpu_types:\n self.RaiseError(tree, f\"'{py_type}' is not a valid FLAME GPU type\")\n # get cpp type\n t = self._fgpu_types[py_type]\n cpp_func_name += f\"<{t}\"\n # mess with the parent to extract (and remove arguments so they dont end up in the argument list)\n if not tree_parent.args :\n self.RaiseError(tree, f\" Macro environment function '{py_func}' is expected to have some arguments.\")\n # if more than one arg then the rest are bounds to translate\n if len(tree_parent.args) > 1:\n bounds = tree_parent.args[1:]\n # process bounds by appending to cpp function template arguments\n for i in bounds:\n if isinstance(i, ast.Num): # num required for python 3.7\n if not isinstance(i.n, int):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an integer value.\")\n cpp_func_name += f\", {i.n}\"\n else: # all Python > 3.7 \n if not isinstance(i, ast.Constant):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an constant value (or Num in Python <3.8).\")\n if not isinstance(i.value, int):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an integer value.\")\n cpp_func_name += f\", {i.value}\"\n # remove bounds from argument list (in place)\n del tree_parent.args[1:]\n cpp_func_name += \">\"\n self.write(cpp_func_name)", "def itkCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkCostFunction_cast(*args)", "def _make_array(self, c):\n return (c * ctypes.py_object)()", "def make_get_python_out_struct(self):\n res = \\\n\"\"\"DLLEXPORT ___madz_LANG_python_TYPE_* {}_get_out_struct(){{\n return &___madz_LANG_python_OUTPUT;\n}}\n\n\"\"\"\n return res.format(self.python_mangle)", "def create_checked_function():\n\n ffi = cffi.FFI()\n ffi.cdef(\"\"\"\nint overhead(int32_t* list, size_t num, char* utf8, int* error);\n\"\"\")\n c = ffi.dlopen(\"./liboverhead/liboverhead.so\")\n overhead = c.overhead\n\n error_type = ffi.typeof(\"int*\")\n\n def func(list_, text):\n # typecheck/convert text\n if isinstance(text, unicode):\n text = text.encode(\"utf-8\")\n elif text is None:\n text = ffi.NULL\n elif not isinstance(text, str):\n raise TypeError\n\n len_ = len(list_)\n error = ffi.new(error_type)\n result = overhead(list_, len_, text, error)\n\n if not result:\n raise Exception(\"Error occured: %d\" % error[0])\n\n return result\n\n return func", "def adaptPythonToCpp(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptPythonToCpp(self, *args)", "def create_function():\n\n ffi = cffi.FFI()\n ffi.cdef(\"\"\"\nint overhead(int32_t* list, size_t num, char* utf8, int* error);\n\"\"\")\n c = ffi.dlopen(\"./liboverhead/liboverhead.so\")\n overhead = c.overhead\n\n def func(list_, length, text, error):\n return overhead(list_, length, text, error)\n\n return overhead", "def build(self, cres):\n _launch_threads()\n # Build wrapper for ufunc entry point\n ctx = cres.target_context\n library = cres.library\n signature = cres.signature\n llvm_func = library.get_function(cres.fndesc.llvm_func_name)\n wrapper, env = build_gufunc_wrapper(library, ctx, llvm_func,\n signature, self.sin, self.sout,\n fndesc=cres.fndesc,\n env=cres.environment)\n\n ptr = library.get_pointer_to_function(wrapper.name)\n\n # Get dtypes\n dtypenums = []\n for a in signature.args:\n if isinstance(a, types.Array):\n ty = a.dtype\n else:\n ty = a\n dtypenums.append(as_dtype(ty).num)\n\n return dtypenums, ptr, env", "def _PythonToCtype(data, c_type):\n if c_type is actuator_util.Vec3:\n # Handle Vec3.\n assert len(data) == 3\n c_data = c_type()\n c_data.x = data[0]\n c_data.y = data[1]\n c_data.z = data[2]\n return c_data\n elif hasattr(c_type, '_length_'):\n # Handle arrays.\n length = getattr(c_type, '_length_')\n assert len(data) == length\n\n c_data = c_type()\n for i in range(length):\n c_data[i] = _PythonToCtype(data[i], getattr(c_type, '_type_'))\n\n elif hasattr(c_type, '_fields_'):\n # Handle structures.\n fields = autogen_util.GetCFields(c_type)\n assert set(data.keys()) == {field for field, _ in fields}\n\n c_data = c_type()\n for field, field_type in fields:\n setattr(c_data, field, _PythonToCtype(data[field], field_type))\n\n else:\n c_data = c_type(data)\n\n return c_data", "def get_C_code(self, C_function_name):\n from cascada.bitvector.printing import BvCCodePrinter\n\n width2type = BvCCodePrinter._width2C_type\n\n # in C, * binds to the declarator, not the type specifier\n input_vars_c = ', '.join([\"{} {}\".format(width2type(v.width), v.name) for v in self.input_vars])\n output_vars_c = ', '.join([\"{} *{}\".format(width2type(v.width), v.name) for v in self.output_vars])\n if self.external_vars:\n external_vars_c = ', '.join([\"{} {}\".format(width2type(v.width), v.name) for v in self.external_vars])\n external_vars_c = external_vars_c + \", \"\n else:\n external_vars_c = \"\"\n\n aux = f\"void {C_function_name}({input_vars_c}, {external_vars_c}{output_vars_c})\"\n header = f\"{aux};\"\n body = f\"#include <stdint.h>\\n{aux}{{\" # stdint for uint_*\n\n outvar2outvar_c = {v: core.Variable(\"*\" + v.name, v.width, allowed_symbols=\"*\") for v in self.output_vars}\n\n def primary_assignment2C_code(my_var, my_expr):\n assert isinstance(my_expr, (core.Constant, core.Variable, operation.PrimaryOperation))\n if my_var in self.output_vars:\n return f\"*{my_var} = {my_expr.crepr()};\"\n else:\n return f\"{width2type(my_var.width)} {my_var} = {my_expr.crepr()};\"\n\n for var, expr in self.assignments.items():\n expr = expr.xreplace(outvar2outvar_c)\n if isinstance(expr, operation.SecondaryOperation):\n expr = expr.doit(eval_sec_ops=True)\n body += f\"\\n\\t{primary_assignment2C_code(var, expr)}\"\n body += \"\\n};\"\n\n return header, body", "def make_func_code(params):\n class FuncCode(object):\n __slots__ = ('co_varnames', 'co_argcount')\n fc = FuncCode()\n fc.co_varnames = params\n fc.co_argcount = len(params)\n return fc", "def cast(*args):\n return _ITKCostFunctionsPython.itkMultipleValuedCostFunction_cast(*args)", "def call_ccall(x):\n ret = c_call(x)\n return ret, cython.typeof(ret)", "def call_cdef_inline(x):\n ret = cdef_inline(x)\n return ret, cython.typeof(ret)", "def _build_comute_argtype(num_nd, num_nd_write):\n ret = [_xc_func_p, ctypes.c_size_t]\n ret += [_ndptr] * num_nd\n ret += [_ndptr_w] * num_nd_write\n return tuple(ret)" ]
[ "0.70022583", "0.67676306", "0.6716482", "0.65700346", "0.6408847", "0.6320961", "0.6177105", "0.61178625", "0.609686", "0.60915", "0.6037971", "0.60285324", "0.5978813", "0.5958597", "0.59014344", "0.58797467", "0.58504516", "0.58494455", "0.5848168", "0.5846862", "0.5815121", "0.5813147", "0.5790319", "0.57641983", "0.57453537", "0.5734473", "0.57235515", "0.5712704", "0.5711848", "0.56919324" ]
0.7423723
0
Create FunctionDef responsible for casting c argument to python
def C_to_Python(c_object): try : cast_function = c_to_py_registry[(c_object.dtype, c_object.precision)] except KeyError: errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal') cast_func = FunctionDef(name = cast_function, body = [], arguments = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)], results = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)]) return cast_func
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Python_to_C(c_object):\n try :\n cast_function = py_to_c_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)],\n results = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)])\n\n return cast_func", "def cython_c2py_conv_function_pointer(t_, ts):\n t = t_[1]\n argnames = []\n argdecls = []\n argbodys = []\n argrtns = []\n for n, argt in t[1][2]:\n argnames.append(n)\n decl, body, rtn = ts.cython_py2c(n, argt, proxy_name=\"c_\" + n)\n argdecls += decl.split('\\n') if isinstance(decl,basestring) else [decl]\n argbodys += body.split('\\n') if isinstance(body,basestring) else [body]\n argrtns += rtn.split('\\n') if isinstance(rtn,basestring) else [rtn]\n rtnname = 'rtn'\n rtnprox = 'c_' + rtnname\n rtncall = 'c_call_' + rtnname\n while rtnname in argnames or rtnprox in argnames:\n rtnname += '_'\n rtnprox += '_'\n argdecls = indent(argdecls)\n argbodys = indent(argbodys)\n rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2],\n cached=False, proxy_name=rtnprox, existing_name=rtncall)\n if rtndecl is None and rtnbody is None:\n rtnprox = rtnname\n rtndecls = [rtndecl]\n returns_void = (t[2][2] == 'void')\n if not returns_void:\n rtndecls.append(\"cdef {0} {1}\".format(ts.cython_ctype(t[2][2]),\n rtncall))\n rtndecl = indent(rtndecls)\n rtnbody = indent(rtnbody)\n s = ('def {{proxy_name}}({arglist}):\\n'\n '{argdecls}\\n'\n '{rtndecl}\\n'\n ' if {{var}} == NULL:\\n'\n ' raise RuntimeError(\"{{var}} is NULL and may not be '\n 'safely called!\")\\n'\n '{argbodys}\\n')\n s += ' {{var}}({carglist})\\n' if returns_void else \\\n ' {rtncall} = {{var}}({carglist})\\n'\n s += '{rtnbody}\\n'\n s = s.format(arglist=\", \".join(argnames), argdecls=argdecls,\n cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'),\n argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox,\n rtncall=rtncall, carglist=\", \".join(argrtns), rtnbody=rtnbody)\n caches = 'if {cache_name} is None:\\n' + indent(s)\n if not returns_void:\n caches += \"\\n return {rtnrtn}\".format(rtnrtn=rtnrtn)\n caches += '\\n {cache_name} = {proxy_name}\\n'\n return s, s, caches", "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def cpp_function(self):", "def convert_result_as_arg(self, node, ordered_functions):\n return ordered_functions # XXX - do nothing for now\n options = node.options\n fmt_func = node.fmtdict\n# if options.F_string_len_trim is False: # XXX what about vector?\n# return\n\n ast = node.ast\n result_typemap = ast.typemap\n result_name = None\n\n # Check if result needs to be an argument.\n attrs = ast.attrs\n meta = ast.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup in [\"char\", \"string\"]:\n result_name = fmt_func.F_string_result_as_arg\n# result_as_arg = fmt_func.F_string_result_as_arg\n# result_name = result_as_arg or fmt_func.C_string_result_as_arg\n# elif result_typemap.base == \"vector\":\n# has_vector_result = True\n# elif result_is_ptr:\n# if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n# need_cdesc_result = True\n# elif attrs[\"dimension\"]:\n# need_cdesc_result = True\n\n if not result_name:\n return\n\n##########\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n# generated_suffix = \"buf\"\n C_new._generated = \"result_to_arg\"\n fmt_func = C_new.fmtdict\n# fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix + \"XXX\"\n# fmt_func.function_suffix = fmt_func.function_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=True, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n##########\n\n # decl: const char * getCharPtr2()\n new_arg = C_new.ast.result_as_arg(result_name)\n new_arg.const = False # must be writeable\n# attrs = new_arg.attrs\n# new_arg.metaattrs[\"deref\"] = None\n # Special case for wrapf.py to override \"allocatable\"\n\n # Special case for wrapf.py to override \"allocatable\"\n node.ast.metaattrs[\"deref\"] = None\n new_arg.metaattrs[\"deref\"] = \"result\"\n new_arg.metaattrs[\"is_result\"] = True\n C_new.ast.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.metaattrs[\"deref\"] = None\n\n node.wrap.fortran = False\n# node.wrap.c = False\n\n return\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)", "def arg_to_CFI(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if options.wrap_fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n \n cfi_args = {}\n for arg in ast.declarator.params:\n declarator = arg.declarator\n name = declarator.user_name\n attrs = declarator.attrs\n meta = declarator.metaattrs\n cfi_args[name] = False\n arg_typemap = arg.typemap\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif meta[\"assumed-rank\"]:\n cfi_args[name] = True\n elif attrs[\"rank\"]:\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"string\":\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"char\":\n if declarator.is_indirect():\n cfi_args[name] = True\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n cfi_args[name] = True\n has_cfi_arg = any(cfi_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cfi\"\n\n if not (need_buf_result or\n has_cfi_arg):\n return False\n\n options.wrap_fortran = False\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"cfi\"\n C_new._generated = \"arg_to_cfi\"\n C_new.splicer_group = \"cfi\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_cfi_suffix\n\n C_new.wrap.assign(c=True)#, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n name = arg.declarator.user_name\n if cfi_args[name]:\n arg.declarator.metaattrs[\"api\"] = generated_suffix\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n result_as_string.declarator.metaattrs[\"api\"] = \"cfi\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n # Fortran function may call C subroutine if string/vector result\n # Fortran function calls bufferify function.\n node._PTR_F_C_index = C_new._function_index\n return True", "def get_pytype(self, c_arg, parse_arg):\n if isinstance(c_arg, FunctionAddress):\n return 'O'\n else:\n try:\n return pytype_parse_registry[(parse_arg.dtype, parse_arg.precision)]\n except KeyError as e:\n raise NotImplementedError(\"Type not implemented for argument collection : \"+str(type(parse_arg))) from e", "def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)", "def arg_to_buffer(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if node.wrap.c is False:\n# if options.wrap_c is False: # XXX cdesc.yaml GetScalar2\n # The user does not require a C wrapper.\n # This can be the case if the Fortran wrapper is doing all\n # the work via splicer or fstatements.\n return\n\n # If a C++ function returns a std::string instance,\n # the default wrapper will not compile since the wrapper\n # will be declared as char. It will also want to return the\n # c_str of a stack variable. Warn and turn off the wrapper.\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = ast.declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n\n if node.wrap.fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n if options.F_string_len_trim is False: # XXX what about vector?\n return\n\n # Arguments.\n # Is result or any argument a string or vector?\n # If so, additional arguments will be passed down so\n # create buffer version of function.\n buf_args = {}\n for arg in declarator.params:\n has_buf_arg = None\n arg_typemap = arg.typemap\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif attrs[\"cdesc\"]:\n # User requested cdesc.\n has_buf_arg = \"cdesc\"\n elif arg_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\", \"copy\"]:\n has_buf_arg = \"cdesc\"\n # XXX - this is not tested\n # XXX - tested with string **arg+intent(out)+dimension(ndim)\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"char\":\n if arg.ftrim_char_in:\n pass\n elif declarator.is_indirect():\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n has_buf_arg = \"cdesc\"\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"vector\":\n if meta[\"intent\"] == \"in\":\n # Pass SIZE.\n has_buf_arg = \"buf\"\n else:\n has_buf_arg = \"cdesc\"\n elif (arg_typemap.sgroup == \"native\" and\n meta[\"intent\"] == \"out\" and\n meta[\"deref\"] != \"raw\" and\n declarator.get_indirect_stmt() in [\"**\", \"*&\"]):\n # double **values +intent(out) +deref(pointer)\n has_buf_arg = \"cdesc\"\n #has_buf_arg = \"buf\" # XXX - for scalar?\n buf_args[declarator.user_name] = has_buf_arg\n # --- End loop over function parameters\n has_buf_arg = any(buf_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n attrs = ast.declarator.attrs\n meta = ast.declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n # Result default to \"allocatable\".\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.base == \"vector\":\n need_buf_result = \"cdesc\"\n elif result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n if meta[\"dimension\"]:\n # int *get_array() +deref(pointer)+dimension(10)\n need_buf_result = \"cdesc\"\n\n # Functions with these results need wrappers.\n if not (need_buf_result or\n has_buf_arg):\n return\n\n # XXX node.wrap.fortran = False\n # Preserve wrap.c.\n # This keep a version which accepts char * arguments.\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"buf\"\n C_new._generated = \"arg_to_buffer\"\n C_new.splicer_group = \"buf\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n \n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=node.options.wrap_c)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if buf_args[declarator.user_name]:\n meta[\"api\"] = buf_args[declarator.user_name]\n if arg.ftrim_char_in:\n continue\n arg_typemap = arg.typemap\n if arg_typemap.base == \"vector\":\n # Do not wrap the orignal C function with vector argument.\n # Meaningless to call without the size argument.\n # TODO: add an option where char** length is determined by looking\n # for trailing NULL pointer. { \"foo\", \"bar\", NULL };\n node.wrap.c = False\n node.wrap.lua = False # NotImplemented\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n # Add additional argument to hold result.\n # This will allocate a new character variable to hold the\n # results of the C++ function.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n # We've added an argument to fill, use api=buf.\n result_as_string.declarator.metaattrs[\"api\"] = \"buf\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n \n # Fortran function may call C subroutine if string/vector result\n node._PTR_F_C_index = C_new._function_index", "def test_callback_from_c(self):\n source = io.StringIO(\"\"\"\n int add(int x, int y);\n int x(int a) {\n return add(a + 1, 13);\n }\n \"\"\")\n arch = get_current_arch()\n obj = cc(source, arch, debug=True)\n def my_add(x: int, y: int) -> int:\n return x + y + 2\n imports = {\n 'add': my_add\n }\n m = load_obj(obj, imports=imports)\n y = m.x(101)\n self.assertEqual(117, y)", "def make_get_python_out_struct(self):\n res = \\\n\"\"\"DLLEXPORT ___madz_LANG_python_TYPE_* {}_get_out_struct(){{\n return &___madz_LANG_python_OUTPUT;\n}}\n\n\"\"\"\n return res.format(self.python_mangle)", "def adaptPythonToCpp(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptPythonToCpp(self, *args)", "def compile_function(self, function, arguments):", "def fortran_c_wrapper(self) -> str:\n if self.fc_override is not None:\n return self.fc_override.replace('$CLASSNAME$', self.class_name).replace(\n \"$C_PREFIX$\", self.c_prefix).replace(\"$F_PREFIX$\", self.f_prefix)\n\n result = ''\n\n # declaration\n in_parameters = self._fc_in_parameters()\n return_type, out_parameters = self._fc_out_parameters()\n if self.may_throw:\n out_parameters.append('int * err_code')\n out_parameters.append('char ** err_msg')\n out_parameters.append('std::size_t * err_msg_len')\n\n func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n\n par_str = ', '.join(in_parameters + out_parameters)\n result += '{} {}({}) {{\\n'.format(return_type, func_name, par_str)\n\n # convert input\n for par in self.params:\n result += '{}'.format(par.fc_convert_input())\n\n # call C++ function and return result\n if self.may_throw:\n result += ' try {\\n'\n result += ' *err_code = 0;\\n'\n result += indent(self._fc_cpp_call(), 4*' ')\n result += indent(self._fc_return(), 4*' ')\n result += ' }\\n'\n for exception, code in error_codes.items():\n if code != 0:\n catch = ''\n catch += 'catch (std::{} const & e) {{\\n'.format(exception)\n catch += ' *err_code = {};\\n'.format(code)\n catch += ' static std::string msg;\\n'\n catch += ' msg = e.what();\\n'\n catch += ' *err_msg = const_cast<char*>(msg.data());\\n'\n catch += ' *err_msg_len = msg.size();\\n'\n catch += '}\\n'\n result += indent(catch, 4*' ')\n result += self._fc_return_default()\n else:\n result += self._fc_cpp_call()\n result += self._fc_return()\n result += '}\\n\\n'\n return result", "def create_checked_function():\n\n ffi = cffi.FFI()\n ffi.cdef(\"\"\"\nint overhead(int32_t* list, size_t num, char* utf8, int* error);\n\"\"\")\n c = ffi.dlopen(\"./liboverhead/liboverhead.so\")\n overhead = c.overhead\n\n error_type = ffi.typeof(\"int*\")\n\n def func(list_, text):\n # typecheck/convert text\n if isinstance(text, unicode):\n text = text.encode(\"utf-8\")\n elif text is None:\n text = ffi.NULL\n elif not isinstance(text, str):\n raise TypeError\n\n len_ = len(list_)\n error = ffi.new(error_type)\n result = overhead(list_, len_, text, error)\n\n if not result:\n raise Exception(\"Error occured: %d\" % error[0])\n\n return result\n\n return func", "def dispatchMacroEnvFunction(self, tree, tree_parent):\n cpp_func_name = \"getMacroProperty\"\n py_func = tree.attr\n # extract type from function name\n py_type = py_func[len(cpp_func_name):]\n if py_type not in self._fgpu_types:\n self.RaiseError(tree, f\"'{py_type}' is not a valid FLAME GPU type\")\n # get cpp type\n t = self._fgpu_types[py_type]\n cpp_func_name += f\"<{t}\"\n # mess with the parent to extract (and remove arguments so they dont end up in the argument list)\n if not tree_parent.args :\n self.RaiseError(tree, f\" Macro environment function '{py_func}' is expected to have some arguments.\")\n # if more than one arg then the rest are bounds to translate\n if len(tree_parent.args) > 1:\n bounds = tree_parent.args[1:]\n # process bounds by appending to cpp function template arguments\n for i in bounds:\n if isinstance(i, ast.Num): # num required for python 3.7\n if not isinstance(i.n, int):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an integer value.\")\n cpp_func_name += f\", {i.n}\"\n else: # all Python > 3.7 \n if not isinstance(i, ast.Constant):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an constant value (or Num in Python <3.8).\")\n if not isinstance(i.value, int):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an integer value.\")\n cpp_func_name += f\", {i.value}\"\n # remove bounds from argument list (in place)\n del tree_parent.args[1:]\n cpp_func_name += \">\"\n self.write(cpp_func_name)", "def create_function():\n\n ffi = cffi.FFI()\n ffi.cdef(\"\"\"\nint overhead(int32_t* list, size_t num, char* utf8, int* error);\n\"\"\")\n c = ffi.dlopen(\"./liboverhead/liboverhead.so\")\n overhead = c.overhead\n\n def func(list_, length, text, error):\n return overhead(list_, length, text, error)\n\n return overhead", "def cpp_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cts = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n ct = self.cpp_type(x)\n elif argkind is Arg.LIT:\n ct = self.cpp_literal(x)\n elif isinstance(x, Number):\n ct = self.cpp_literal(x)\n else:\n try:\n ct = self.cpp_type(x) # guess it is a type\n except TypeError:\n ct = x # guess it is a variable\n cts.append(ct)\n fname += '' if 0 == len(cts) else \"< \" + \", \".join(cts) + \" >\"\n return fname", "def cast(*args):\n return _ITKCostFunctionsPython.itkCostFunction_cast(*args)", "def _make_array(self, c):\n return (c * ctypes.py_object)()", "def cast(*args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_cast(*args)", "def adaptCorbaToCpp(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptCorbaToCpp(self, *args)", "def cython_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cfs = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n cf = self.cython_functionname(x)[1]\n elif argkind is Arg.LIT:\n cf = self.cython_literal(x)\n elif argkind is Arg.VAR:\n cf = x\n elif isinstance(x, Number):\n cf = self.cython_literal(x)\n else:\n try:\n cf = self.cython_functionname(x)[1] # guess type\n except TypeError:\n cf = x # guess variable\n cfs.append(cf)\n fname += '' if 0 == len(cfs) else \"_\" + \"_\".join(cfs)\n return fname", "def cpp_type_to_python(self, ot: str):\n t = ot\n t = remove_cvref(t)\n t = self._remove_variable_type_prefix(t)\n try:\n return cpp_base_type_to_python(t)\n except KeyError:\n pass\n if is_function_pointer_type(t):\n func = function_pointer_type_info(t)\n args = \",\".join([self.cpp_type_to_python(arg.type) for arg in func.args])\n return f'Callable[[{args}], {self.cpp_type_to_python(func.ret_type)}]'\n\n if is_function_type(t):\n func = function_type_info(t)\n args = \",\".join([self.cpp_type_to_python(arg.type) for arg in func.args])\n return f'Callable[[{args}], {self.cpp_type_to_python(func.ret_type)}]'\n\n if is_pointer_type(t):\n cpp_base = self.resolve_to_basic_type_remove_const(pointer_base(t))\n if is_pointer_type(cpp_base) or is_array_type(cpp_base):\n return f'\"level 2 pointer:{t}\"' # un-convertible: level 2 pointer\n if cpp_base in ARRAY_BASES:\n return ARRAY_BASES[cpp_base]\n return self.cpp_type_to_python(cpp_base)\n if is_array_type(t):\n b = array_base(t)\n if b in ARRAY_BASES: # special case: string array\n return ARRAY_BASES[b]\n base = self.cpp_type_to_python(b)\n return f'List[{base}]'\n if is_tuple_type(t):\n es = tuple_elements(t)\n bases = [self.cpp_type_to_python(i) for i in es]\n bases_str = \",\".join(bases)\n return f'Tuple[{bases_str}]'\n\n # check classes\n objects = self.objects\n if t in objects:\n o = objects[t]\n if isinstance(o, GeneratorClass) or isinstance(o, GeneratorEnum):\n return t.replace(\"::\", \".\").strip(\" .\") # todo fix this\n if isinstance(o, GeneratorTypedef):\n return self.cpp_type_to_python(o.target)\n\n if t.startswith(\"(anonymous\"):\n return f'\"{t}\"'\n\n # this means this is\n logger.warning(\"%s might be an internal symbol, failed to resolve to basic type\", t)\n return t", "def _build_comute_argtype(num_nd, num_nd_write):\n ret = [_xc_func_p, ctypes.c_size_t]\n ret += [_ndptr] * num_nd\n ret += [_ndptr_w] * num_nd_write\n return tuple(ret)", "def build(self, cres):\n _launch_threads()\n # Build wrapper for ufunc entry point\n ctx = cres.target_context\n library = cres.library\n signature = cres.signature\n llvm_func = library.get_function(cres.fndesc.llvm_func_name)\n wrapper, env = build_gufunc_wrapper(library, ctx, llvm_func,\n signature, self.sin, self.sout,\n fndesc=cres.fndesc,\n env=cres.environment)\n\n ptr = library.get_pointer_to_function(wrapper.name)\n\n # Get dtypes\n dtypenums = []\n for a in signature.args:\n if isinstance(a, types.Array):\n ty = a.dtype\n else:\n ty = a\n dtypenums.append(as_dtype(ty).num)\n\n return dtypenums, ptr, env", "def itkSingleValuedCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_cast(*args)", "def itkCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkCostFunction_cast(*args)", "def make_func_code(params):\n class FuncCode(object):\n __slots__ = ('co_varnames', 'co_argcount')\n fc = FuncCode()\n fc.co_varnames = params\n fc.co_argcount = len(params)\n return fc", "def _PythonToCtype(data, c_type):\n if c_type is actuator_util.Vec3:\n # Handle Vec3.\n assert len(data) == 3\n c_data = c_type()\n c_data.x = data[0]\n c_data.y = data[1]\n c_data.z = data[2]\n return c_data\n elif hasattr(c_type, '_length_'):\n # Handle arrays.\n length = getattr(c_type, '_length_')\n assert len(data) == length\n\n c_data = c_type()\n for i in range(length):\n c_data[i] = _PythonToCtype(data[i], getattr(c_type, '_type_'))\n\n elif hasattr(c_type, '_fields_'):\n # Handle structures.\n fields = autogen_util.GetCFields(c_type)\n assert set(data.keys()) == {field for field, _ in fields}\n\n c_data = c_type()\n for field, field_type in fields:\n setattr(c_data, field, _PythonToCtype(data[field], field_type))\n\n else:\n c_data = c_type(data)\n\n return c_data" ]
[ "0.72192234", "0.68052465", "0.6632314", "0.64130855", "0.6358928", "0.63258225", "0.6155145", "0.6139662", "0.60484755", "0.60477465", "0.60254824", "0.6015706", "0.59863913", "0.59403557", "0.58158", "0.57880235", "0.57846373", "0.5784282", "0.5761604", "0.57465273", "0.5739184", "0.5732361", "0.5699242", "0.5689414", "0.56871146", "0.56743395", "0.5621684", "0.5606441", "0.557284", "0.5560054" ]
0.69464856
1
Generate function Call of c/python api PyErr_SetString
def PyErr_SetString(exception, message): func = FunctionDef(name = 'PyErr_SetString', body = [], arguments = [Variable(dtype = PyccelPyObject(), name = 'o'), Variable(dtype = NativeString(), name = 's')], results = []) exception = Variable(PyccelPyObject(), name = exception) return FunctionCall(func, [exception, message])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def failure_code(sub):\r\n return '''{\r\n %(failure_var)s = %(id)s;\r\n if (!PyErr_Occurred()) {\r\n PyErr_SetString(PyExc_RuntimeError,\r\n \"Unexpected error in an Op's C code. \"\r\n \"No Python exception was set.\");\r\n }\r\n goto __label_%(id)i;}''' % sub", "def SPYExceptionHandler(*excargs, **exckwargs):\n\n # Depending on the number of input arguments, we're either in Jupyter/iPython\n # or \"regular\" Python - this matters for coloring error messages\n if len(excargs) == 3:\n isipy = False\n etype, evalue, etb = excargs\n else:\n etype, evalue, etb = sys.exc_info()\n try: # careful: if iPython is used to launch a script, ``get_ipython`` is not defined\n ipy = get_ipython()\n isipy = True\n cols = ipy.InteractiveTB.Colors\n cols.filename = cols.filenameEm\n cols.bold = ansiBold\n sys.last_traceback = etb # smartify ``sys``\n except NameError:\n isipy = False\n\n # Pass ``KeyboardInterrupt`` on to regular excepthook so that CTRL + C\n # can still be used to abort program execution (only relevant in \"regular\"\n # Python prompts)\n if issubclass(etype, KeyboardInterrupt) and not isipy:\n sys.__excepthook__(etype, evalue, etb)\n return\n\n # Starty by putting together first line of error message\n emsg = \"{}\\nSyNCoPy encountered an error in{} \\n\\n\".format(cols.topline if isipy else \"\",\n cols.Normal if isipy else \"\")\n\n # If we're dealing with a `SyntaxError`, show it and getta outta here\n if issubclass(etype, SyntaxError):\n\n # Just format exception, don't mess around w/ traceback\n exc_fmt = traceback.format_exception_only(etype, evalue)\n for eline in exc_fmt:\n if \"File\" in eline:\n eline = eline.split(\"File \")[1]\n fname, lineno = eline.split(\", line \")\n emsg += \"{}{}{}\".format(cols.filename if isipy else \"\",\n fname,\n cols.Normal if isipy else \"\")\n emsg += \", line {}{}{}\".format(cols.lineno if isipy else \"\",\n lineno,\n cols.Normal if isipy else \"\")\n elif \"SyntaxError\" in eline:\n smsg = eline.split(\"SyntaxError: \")[1]\n emsg += \"{}{}SyntaxError{}: {}{}{}\".format(cols.excName if isipy else \"\",\n cols.bold if isipy else \"\",\n cols.Normal if isipy else \"\",\n cols.bold if isipy else \"\",\n smsg,\n cols.Normal if isipy else \"\")\n else:\n emsg += \"{}{}{}\".format(cols.line if isipy else \"\",\n eline,\n cols.Normal if isipy else \"\")\n\n # Show generated message and leave (or kick-off debugging in Jupyer/iPython if %pdb is on)\n logger = get_parallel_logger()\n logger.critical(emsg)\n if isipy:\n if ipy.call_pdb:\n ipy.InteractiveTB.debugger()\n return\n\n # Build an ordered(!) dictionary that encodes separators for traceback components\n sep = OrderedDict({\"filename\": \", line \",\n \"lineno\": \" in \",\n \"name\": \"\\n\\t\",\n \"line\": \"\\n\"})\n\n # Find \"root\" of traceback tree (and remove outer-most frames)\n keepgoing = True\n while keepgoing:\n frame = traceback.extract_tb(etb)[0]\n etb = etb.tb_next\n if frame.filename.find(\"site-packages\") < 0 or \\\n (frame.filename.find(\"site-packages\") >= 0 and \\\n frame.filename.find(\"syncopy\") >= 0):\n tb_entry = \"\"\n for attr in sep.keys():\n tb_entry += \"{}{}{}{}\".format(getattr(cols, attr) if isipy else \"\",\n getattr(frame, attr),\n cols.Normal if isipy else \"\",\n sep.get(attr))\n emsg += tb_entry\n keepgoing = False\n\n # Format the exception-part of the traceback - the resulting list usually\n # contains only a single string - if we find more just use everything\n exc_fmt = traceback.format_exception_only(etype, evalue)\n if len(exc_fmt) == 1:\n exc_msg = exc_fmt[0]\n idx = exc_msg.rfind(etype.__name__)\n if idx >= 0:\n exc_msg = exc_msg[idx + len(etype.__name__):]\n exc_name = \"{}{}{}{}\".format(cols.excName if isipy else \"\",\n cols.bold if isipy else \"\",\n etype.__name__,\n cols.Normal if isipy else \"\")\n else:\n exc_msg = \"\".join(exc_fmt)\n exc_name = \"\"\n\n # Now go through traceback and put together a list of strings for printing\n if __tbcount__ and etb is not None:\n emsg += \"\\n\" + \"-\"*80 + \"\\nAbbreviated traceback:\\n\\n\"\n tb_count = 0\n tb_list = []\n for frame in traceback.extract_tb(etb):\n if frame.filename.find(\"site-packages\") < 0 or \\\n (frame.filename.find(\"site-packages\") >= 0 and \\\n frame.filename.find(\"syncopy\") >= 0):\n tb_entry = \"\"\n for attr in sep.keys():\n tb_entry += \"{}{}{}{}\".format(\"\", # placeholder for color if wanted\n getattr(frame, attr),\n \"\", # placeholder for color if wanted\n sep.get(attr))\n tb_list.append(tb_entry)\n tb_count += 1\n if tb_count == __tbcount__:\n break\n emsg += \"\".join(tb_list)\n\n # Finally, another info message\n if etb is not None:\n emsg += \"\\nUse `import traceback; import sys; traceback.print_tb(sys.last_traceback)` \" + \\\n \"for full error traceback.\\n\"\n\n # Glue actual Exception name + message to output string\n emsg += \"{}{}{}{}{}\".format(\"\\n\" if isipy else \"\",\n exc_name,\n cols.bold if isipy else \"\",\n exc_msg,\n cols.Normal if isipy else \"\",)\n\n\n # Show generated message and get outta here\n logger = get_parallel_logger()\n logger.critical(emsg)\n\n # Kick-start debugging in case %pdb is enabled in Jupyter/iPython\n if isipy:\n if ipy.call_pdb:\n ipy.InteractiveTB.debugger()", "def format_exc():\n from traceback import format_exc\n return format_exc().decode('utf-8', 'surrogateescape')", "def getCompilerError():", "def ErrorString(self): # real signature unknown; restored from __doc__\n pass", "def transformErr2Str(self,*args):\n error_code = c_int32(args[0])\n error_str = create_string_buffer(\"\\000\"*1024)\n status = self.__acqiris_QuantroDLL1.transformErr2Str(self.__instrumentID,error_code,error_str) \n return str(error_str)", "def traceback(self):", "def stack_trace(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:", "def exc_info(): # real signature unknown; restored from __doc__\n pass", "def test_friendly_exception_formatting_exc_with_str_overload():\n ex = InsufficientSignatures(1, 3)\n\n formatted_exception = friendlyEx(ex)\n\n assert formatted_exception == '{}'.format(ex.reason)", "def exception(self):\n exc_type, exc_value, exc_tb = sys.exc_info()\n cui.message(traceback.format_exception_only(exc_type, exc_value)[-1],\n log_message=traceback.format_exc())", "def _handle_error(self, err: ctypes.c_char_p, method: str) -> Exception:\n if err:\n string = ctypes.string_at(err).decode(\"utf-8\")\n self._free_error(err)\n return RuntimeError(string)\n else:\n return RuntimeError(f\"Unknown error in {method}. \")", "def fancy_traceback(exc: Exception) -> str:\n text = \"\".join(traceback.format_exception(type(exc), exc, exc.__traceback__))\n return f\"```py\\n{text[-4086:]}\\n```\"", "def _FormatException(exc):\n return ''.join(traceback.format_exception_only(type(exc), exc))", "def test_does_not_crash(self):\n py_function(6)", "def repr_failure(self, excinfo):\n if excinfo.errisinstance(MypyError):\n return excinfo.value.args[0]\n return super().repr_failure(excinfo)", "def src_strerror(error):\n return ffi.string(_lib.src_strerror(error)).decode()", "def __ex(exception_string, internal=False):\n ex = str(exception_string).strip()\n while \" \" * 2 in ex:\n ex = ex.replace((\" \" * 2), \" \")\n if internal:\n ex = \"PaVal: \" + ex\n raise Exception(ex)", "def py_raise(*xs):\n raise NotImplemented", "def StandViz_ReportError( errorobj, args, Header = None ): # error reporting and traceback function\n (MyPath, MyFile) = os.path.split( args[0] ) # retrieve filename and path of running python script\n (MyBaseName, MyExt) = os.path.splitext( MyFile ) # separate basefilename from extension\n errorfilename = \"{}.txt\".format(MyBaseName) # create new error filename based on base of script filename\n ERRFILE = open( errorfilename, 'w' ) # open text file for writting\n if( Header != None ): ERRFILE.write( '%s\\n' % Header ) # if Header defined, write Header to file\n ERRFILE.write( \"Error running '{}'\\n\".format(MyFile) ) # write error message with filename\n MyTrace = errorobj[2] # retrieve error object\n while( MyTrace != None ): # loop through stack trace\n (line, file, name) = ( MyTrace.tb_lineno, MyTrace.tb_frame.f_code.co_filename, MyTrace.tb_frame.f_code.co_name ) # extract line, file, and error name\n F = open( file, 'r' ) # open source file of Python script\n L = F.readlines() # read scripot source into memory\n F.close() # close script file\n code = L[line-1].strip() # extract line of source code that caused error\n ERRFILE.write( \" File '{}', line {}, in {}\\n {}\\n\".format(file, line, name, code) ) # write filename, source code line, error name, and error code\n MyTrace = MyTrace.tb_next # step to next level of call stack trace\n ERRFILE.write( \"errorobj: {}\\n\".format(errorobj) ) # write error object and arguments for call\n ERRFILE.write( \"Calling Argument Vector: {}\\n\".format(args) ) # write calling arguments\n ERRFILE.close() # close text file with error stack trace\n os.system( \"notepad.exe {}\".format(errorfilename) ) # display error log file with notepad.exe", "def my_err_handler(traceback, exec_info):\n print \"Custom function invoked\"\n print \"Formatted exception\"\n print traceback.format_exc()\n print \"System exec info\"\n print exec_info\n exp_type, exp_value, exp_traceback = exec_info\n print \"String formatted exception\"\n print traceback.format_exception(exp_type, exp_value, exp_traceback)\n print \"End of custom function\"", "def test_cclerror_repr():\n e = pyccl.CCLError(\"blah\")\n e2 = eval(repr(e))\n assert str(e2) == str(e)\n assert e2 == e", "def text(eparams, context=5):\n import os\n import types\n import time\n import traceback\n import linecache\n import inspect\n import pydoc\n\n etype, evalue, etb = eparams\n if isinstance(etype, types.ClassType):\n etype = etype.__name__\n pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable\n date = time.ctime(time.time())\n head = \"%s\\n%s\\n%s\\n\" % (str(etype), pyver, date) + '''\nA problem occurred in a Python script. Here is the sequence of\nfunction calls leading up to the error, in the order they occurred.\n'''\n\n frames = []\n records = inspect.getinnerframes(etb, context)\n for frame, file, lnum, func, lines, index in records:\n file = file and os.path.abspath(file) or '?'\n args, varargs, varkw, locals = inspect.getargvalues(frame)\n call = ''\n if func != '?':\n call = 'in ' + func + \\\n inspect.formatargvalues(args, varargs, varkw, locals,\n formatvalue=lambda value: '=' + pydoc.text.repr(value))\n\n highlight = {}\n\n def reader(lnum=[lnum]):\n highlight[lnum[0]] = 1\n try:\n return linecache.getline(file, lnum[0])\n finally:\n lnum[0] += 1\n vars = scanvars(reader, frame, locals)\n\n rows = [' %s %s' % (file, call)]\n if index is not None:\n i = lnum - index\n for line in lines:\n num = '%5d ' % i\n rows.append(num + line.rstrip())\n i += 1\n\n done, dump = {}, []\n for name, where, value in vars:\n if name in done:\n continue\n done[name] = 1\n if value is not __UNDEF__:\n if where == 'global':\n name = 'global ' + name\n elif where == 'local':\n name = name\n else:\n name = where + name.split('.')[-1]\n dump.append('%s = %s' % (name, pydoc.text.repr(value)))\n else:\n dump.append(name + ' undefined')\n\n rows.append('\\n'.join(dump))\n frames.append('\\n%s\\n' % '\\n'.join(rows))\n\n exception = ['%s: %s' % (str(etype), str(evalue))]\n if isinstance(evalue, types.InstanceType):\n for name in dir(evalue):\n value = pydoc.text.repr(getattr(evalue, name))\n exception.append('\\n%s%s = %s' % (\" \" * 4, name, value))\n\n return head + ''.join(frames) + ''.join(exception) + '''\n\nThe above is a description of an error in a Python program. Here is\nthe original traceback:\n\n%s\n''' % ''.join(traceback.format_exception(etype, evalue, etb))", "def exception_handler(exctype, val, trace):\n logger.info(\n ''.join(traceback.format_exception(exctype, val, trace)))", "def vpython_error_message():\n error_message = (\n \"<p>&#9888; Sorry, spacesimmer! OrbitX has crashed for \"\n \"some reason.</p>\"\n\n \"<p>Any information that OrbitX has on the crash has \"\n \"been saved to a logfile. If you want to get this problem fixed,\"\n \" send the contents of the log file \"\n \"<blockquote>\" +\n logs.logfile_name.replace('\\\\', '\\\\\\\\') +\n \"</blockquote> \"\n \"to Patrick Melanson along with a description of what was \"\n \"happening in the program when it crashed.</p>\"\n\n \"<p>Again, thank you for using OrbitX!</p>\"\n )\n vpython.canvas.get_selected().append_to_caption(f\"\"\"<script>\n if (document.querySelector('div.error') == null) {{\n error_div = document.createElement('div');\n error_div.className = 'error';\n error_div.innerHTML = \"{error_message}\";\n document.querySelector('body').prepend(error_div);\n }}\n </script>\"\"\")\n vpython.canvas.get_selected().append_to_caption(\"\"\"<style>\n .error {\n color: #D8000C !important;\n background-color: #FFBABA;\n margin: 10px 0;\n padding: 10px;\n border-radius: 5px 5px 5px 5px;\n width: 700px;\n }\n span.code {\n color: #D8000C !important;\n font-family: monospace;\n }\n blockquote {\n font-family: monospace;\n }\n </style>\"\"\")\n\n time.sleep(0.1) # Let vpython send out this update", "def graphical_exception_handler(self, exc_type, exc_value, exc_tb):\n bugdialog.ShowEI(exc_type, exc_value, exc_tb)\n if compat.PYTHON2: sys.exc_clear()", "def raise_error(Err):\n raise Err()", "def exception(self, *args, **kwargs):", "def _get_traceback(self, exc_info=None):\n import traceback\n import sys\n return '\\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))", "def create_exception(self, msg: str):" ]
[ "0.60588336", "0.5961557", "0.58332723", "0.5556526", "0.55433345", "0.54902726", "0.5450629", "0.5449993", "0.5403903", "0.53904366", "0.53624654", "0.533152", "0.53102547", "0.529605", "0.52905834", "0.52848315", "0.52823675", "0.5257808", "0.5237709", "0.52302915", "0.5175386", "0.5172794", "0.51624376", "0.5112163", "0.51074886", "0.5106983", "0.50938654", "0.50820225", "0.505966", "0.5052084" ]
0.7551365
0
Generate TypeError exception from the variable information (datatype, precision)
def generate_datatype_error(variable): dtype = variable.dtype if isinstance(dtype, NativeBool): precision = '' if isinstance(dtype, NativeComplex): precision = '{} bit '.format(variable.precision * 2 * 8) else: precision = '{} bit '.format(variable.precision * 8) message = '"Argument must be {precision}{dtype}"'.format( precision = precision, dtype = variable.dtype) return PyErr_SetString('PyExc_TypeError', message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_value_error_for_computing_missing_type():\n with pytest.raises(ValueError):\n compute_type(\"missing_type\", {})", "def test_type_error(self):\n self._error_test(TypeError)", "def test_invalid_expression_type(self, parse_input_mocked_metadata):\n with pytest.raises(TypeError, match=r\"not of declared type int\"):\n parse_input_mocked_metadata(\"int Beta = -0.231e-6+5.21e-2j\")", "def try_wrong_types(self, p, name, type_):\n for x in (1, 1.0, \"x\", True, np.ndarray,):\n if type(x) != type_:\n with self.assertRaises(TypeError, msg=f\"{name} {type_} {x}\"):\n setattr(p, name, x)", "def test_incorrect_arg_type(self):\n\n with pytest.raises(TypeError) as exc_info:\n upper_incomplete_gamma(a='A', z=0.3)\n\n expected_error_msg = (\n 'type of argument \"a\" must be one of (int, float); got str instead'\n )\n assert str(exc_info.value) == expected_error_msg", "def test_non_pd_type_error(self):\n\n x = BaseTransformer(columns=\"a\")\n\n with pytest.raises(ValueError):\n\n x.transform(X=[1, 2, 3, 4, 5, 6])", "def test_datatype_error(self):\n arr = numpy.zeros((10,10), dtype='complex')\n self.assertRaises(ValueError, bytscl, arr)", "def _raise_value_error(is_gt, tracker, seq):\n if is_gt:\n raise TrackEvalException(\n 'GT data for sequence %s cannot be converted to the right format. Is data corrupted?' % seq)\n else:\n raise TrackEvalException(\n 'Tracking data from tracker %s, sequence %s cannot be converted to the right format. '\n 'Is data corrupted?' % (tracker, seq))", "def test_type_errors():\n\n\ttry:\n\t\ttransmissions = compute_transmissions(cal_directory, lines = 3.0)\n\texcept TypeError:\n\t\ttry:\n\t\t\ttransmissions = compute_transmissions(cal_directory, calibrator = 300.0)\n\t\texcept TypeError:\n\t\t\tassert True\n\t\telse:\n\t\t\tassert False\n\telse:\n\t\tassert False", "def type_error(var, types):\n\n divisor = None\n if len(types) == 2:\n divisor = \" or \"\n elif len(types) > 2:\n divisor = \", \"\n\n raise TypeError(\n \"'{var_name}' must be {type}, received '{var_type}'\"\n .format(var_name=RaiseIfNot._get_name(var),\n type=divisor.join(map(\n lambda x: \"'\" + x + \"'\",\n types)), var_type=type(var)))", "def test_exception_raised(self):\n\n df = d.create_df_2()\n\n x = ScalingTransformer(columns=[\"a\", \"b\", \"c\"], scaler=\"standard\")\n\n with pytest.raises(\n TypeError,\n match=r\"\"\"The following columns are not numeric in X; \\['b', 'c'\\]\"\"\",\n ):\n\n x.check_numeric_columns(df)", "def test_creation_float():\n with pytest.raises(ValueError) as __:\n value = 42.30474\n __ = param.Integer(value=value)", "def test_dict_type(self):\n\n expected = TypeError\n input_ = []\n with self.assertRaises(expected):\n math.factorial(input_)", "def test__specification_type_to_python_type_unsupported_type(self):\n with self.assertRaises(TypeError):\n _specification_type_to_python_type(\"unsupported_type\")", "def test_non_pd_type_error(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=\"a\")\n\n with pytest.raises(ValueError):\n\n x.fit(X=df, y=[1, 2, 3, 4, 5, 6])", "def test_invalid_value(self):\n with self.assertRaises(TypeError):\n METRIC_SYSTEM.length('25a', LENGTH_KILOMETERS)\n with self.assertRaises(TypeError):\n METRIC_SYSTEM.temperature('50K', TEMP_CELSIUS)", "def _TypeMismatch(a, b):\n return 'Types do not match, %s v. %s' % (str(a), str(b))", "def test_instantiate_7():\n with raises(ValueError):\n FixedPoint(1.5, 'Q20.204')", "def test_wrong_type_error(self, parse_input_mocked_metadata):\n with pytest.raises(ValueError, match=\"invalid value\"):\n bb = parse_input_mocked_metadata(\n \"for int m in [1, 4.2, 9]\\n\\tMZgate(0, 1) | [0, 1]\"\n )", "def test_constructor_wrong_parameter_type(self):\n\n for invalid in (None, 1):\n with self.assertRaises(TypeError):\n group_tr = OCIO.FixedFunctionTransform(invalid)", "def conversionNotPossibleException(valueType: cern.japc.value.ValueType, valueType2: cern.japc.value.ValueType) -> cern.japc.value.ValueConversionException:\n ...", "def test_invalid_argument_type(self):\n t = TruthTable('A or B')\n\n with self.assertRaises(InvalidArgumentTypeError):\n t.equivalent_to(float())\n\n with self.assertRaises(InvalidArgumentTypeError):\n t.equivalent_to(None)", "def test_validation_can_fail():\n\n @type_checked\n def _run_test(something:int): pass\n\n with pytest.raises(TypeError) as error:\n _run_test(\"abc\")\n\n assert \"abc is of type str, expecting int.\" in error.value.args", "def test_float_type(self):\n\n input_ = 1.2\n expected = ValueError\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_01_float(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(float(1.2), float(2.2), 1)\n self.assertEqual(\"width must be an integer\", str(x.exception))", "def test_new_invalid(self) -> None:\n with pytest.raises(TypeError) as excinfo:\n RunwayTestDefinition({}) # type: ignore\n assert str(excinfo.value).startswith(\"expected data of type\")", "def test_check_X_not_int_not_float():\n with pytest.raises(ValueError):\n check_X(['hi'], verbose=False)", "def test_data_type(self):\n self.assertRaises(TypeError, Square, 'hello', 3, 2)\n self.assertRaises(TypeError, Square, 3, True, 2)\n self.assertRaises(TypeError, Square, 3, 2, 3.45)", "def ExceptionPropertyType_test(type1: str, type2: str):\n m = pyflamegpu.ModelDescription(\"model\")\n ed = m.Environment()\n add_func_t1 = getattr(ed, f\"newProperty{type1}\")\n add_func_array_t1 = getattr(ed, f\"newPropertyArray{type1}\")\n set_func_t1 = getattr(ed, f\"setProperty{type1}\")\n set_func_t2 = getattr(ed, f\"setProperty{type2}\")\n set_func_array_t2 = getattr(ed, f\"setPropertyArray{type2}\")\n \n a_t1 = 1\n a_t2 = 1\n b_t1 = [0] * ARRAY_TEST_LEN\n b_t2 = [0] * ARRAY_TEST_LEN\n for i in range(ARRAY_TEST_LEN):\n b_t1[i] = i\n b_t2[i] = i\n add_func_t1(\"a\", a_t1, True)\n add_func_array_t1(\"b\", b_t1, True)\n \n with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:\n set_func_t2(\"a\", a_t2)\n assert e.value.type() == \"InvalidEnvPropertyType\"\n with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:\n set_func_array_t2(\"b\", b_t2)\n assert e.value.type() == \"InvalidEnvPropertyType\"\n with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:\n set_func_t2(\"b\", 0, a_t2)\n assert e.value.type() == \"InvalidEnvPropertyType\"", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)" ]
[ "0.6931209", "0.6566379", "0.6381943", "0.63050914", "0.62194294", "0.61516786", "0.6136856", "0.61005616", "0.60737664", "0.60367274", "0.60167736", "0.6010583", "0.6010164", "0.5982661", "0.59672403", "0.59668577", "0.59405553", "0.5933665", "0.5899024", "0.5894452", "0.58893466", "0.5887788", "0.5876288", "0.58660054", "0.5862698", "0.5856714", "0.585536", "0.5847813", "0.57912815", "0.57860976" ]
0.81578225
0
exercising some Letter methods
def test_letter_methods(self): # shift l = get_character("G") self.assertEqual(l.x, 0) self.assertEqual(l.y, 0) l.shift(2, 2) self.assertEqual(l.x, 2) self.assertEqual(l.y, 2) # scale adjusts the scale attributes orig_width = l.scale_x orig_height = l.scale_y l.scale(x=0.5, y=2) self.assertEqual(l.scale_x, orig_width / 2) self.assertEqual(l.scale_y, orig_height * 2) # invert changes the degree attr l.rotate(180) self.assertEqual(l.degrees, 180)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_letters(word, guesses):\n pass", "def letter_for(label):\n return \"ABCDEFGHIJ\"[label]", "def init_letters():\n return ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',\n 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',\n 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I',\n 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',\n 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z')", "def getAlphabet(self):\n return self.alpha", "def getAlphabet(self):\n return self.alpha", "def getAlphabet(self):\n return self.alpha", "def letter(self) -> str:\n my_letter = None\n if self is LieType.A:\n my_letter = \"A\"\n elif self is LieType.B:\n my_letter = \"B\"\n elif self is LieType.C:\n my_letter = \"C\"\n elif self is LieType.D:\n my_letter = \"D\"\n else:\n raise ValueError(\n \"This is not in the enum of Lie types so this should be unreachable\")\n return my_letter", "def test_message_letter(Message, letter):\n assert get_message_letter(Message) == letter", "def letters(m) -> str:\n string = \"\".join(m.letter_list)\n if m[0] == \"sky\":\n string = string.upper()\n return string", "def getLetter(index):\n alphabet = string.ascii_lowercase + \" \"\n return alphabet[index]", "def next_letter(letter):\r\n\tcoded_text = ''\r\n\tstep = 1\r\n\tif letter in ascii_lowercase:\r\n\t\tcoded_text = coded_text + ascii_lowercase[ascii_lowercase.index(letter) + step % len(ascii_lowercase)]\r\n\r\n\tif letter in ascii_uppercase:\r\n\t\tcoded_text = coded_text + ascii_uppercase[ascii_uppercase.index(letter) + step % len(ascii_uppercase)]\r\n\r\n\telse:\r\n\t\tcoded_text += text\r\n\r\n\treturn coded_text", "def get_letter_dict():\n\treturn {\n\t\t'A': 0,\n\t\t'B': 0,\n\t\t'C': 0,\n\t\t'D': 0,\n\t\t'E': 0,\n\t\t'F': 0,\n\t\t'G': 0,\n\t\t'H': 0,\n\t\t'I': 0,\n\t\t'J': 0,\n\t\t'K': 0,\n\t\t'L': 0,\n\t\t'M': 0,\n\t\t'N': 0,\n\t\t'O': 0,\n\t\t'P': 0,\n\t\t'Q': 0,\n\t\t'R': 0,\n\t\t'S': 0,\n\t\t'T': 0,\n\t\t'U': 0,\n\t\t'V': 0,\n\t\t'W': 0,\n\t\t'X': 0,\n\t\t'Y': 0,\n\t\t'Z': 0\n\t}", "def next_letter(letter, step):\r\n\r\n\tif letter in ascii_uppercase:\r\n\t\tnew_letter = get_new_letter(ascii_uppercase, letter, step)\r\n\telif letter in ascii_lowercase:\r\n\t\tnew_letter = get_new_letter(ascii_lowercase, letter, step)\r\n\telse:\r\n\t\tnew_letter = letter\r\n\treturn new_letter", "def index_letter(self, index):\n\t\treturn ALPHABET[index]", "def alphabet_war(fight):", "def test_letter_delimiter(self):\n self.non_default_delimiter_template('a')", "def getletter(variable, letternumber):\n\n # Get the corresponding letter\n return str(variable)[letternumber - 1]", "def letters(string, user_input):\n\n string = upper(string) # pass the string to the upper function to capitalize it\n options = [] # The array where the options given by the user will be stored\n new_string = '' # Empty string to append to\n\n for char in user_input: # Itterate over words in the options passed by the user \n options.append(char) # Append them to the options list\n\n for char in string: # Itterate over each character in the original string\n if char == 'A' and 'A' in options: # If the character is \"A\" and given by the user\n char = '4' # Repalce it with \"4\"\n if char == 'E' and 'E' in options: # If the character is \"E\" and given by the user\n char = '3' # Repalce it with \"3\"\n if char == 'S' and 'S' in options: # If the character is \"A\" and given by the user\n char = '5' # Repalce it with \"5\"\n if char == 'B' and 'B' in options: # If the character is \"B\" and given by the user\n char = '13' # Repalce it with \"13\"\n if char == 'O' and 'O' in options: # If the character is \"O\" and given by the user\n char = '0' # Repalce it with \"0\"\n if char == 'I' and 'I' in options: # If the character is \"I\" and given by the user\n char = '1' # Repalce it with \"1\"\n if char == 'V' and 'V' in options: # If the character is \"V\" and given by the user\n char = '\\/' # Repalce it with \"\\/\"\n if char == 'W' and 'W' in options: # If the character is \"W\" and given by the user\n char = '\\/\\/' # Repalce it with \"\\/\\/\"\n new_string += char # Append all the characters to the empty string\n\n return new_string # Return the new string", "def affine_decipher_letter(letter, multiplier=1, adder=0, one_based=True):\n if letter in string.ascii_letters:\n cipher_number = pos(letter)\n if one_based: cipher_number += 1\n plaintext_number = ( \n modular_division_table[multiplier, (cipher_number - adder) % 26]\n )\n if one_based: plaintext_number -= 1\n if letter in string.ascii_uppercase:\n return unpos(plaintext_number).upper()\n else:\n return unpos(plaintext_number) \n else:\n return letter", "def letter(self):\n return self._letter", "def _get_letter_by_code(table: list, first_dig: str, second_dig: str) -> str:\n try:\n if first_dig == '1':\n return table[2][int(second_dig) - 1]\n elif first_dig == '2' or first_dig == '3':\n return table[1][int(second_dig) - 1]\n else:\n return table[0][int(second_dig) - 1]\n except IndexError:\n print(f'Неизвестный символ с кодом {first_dig}{second_dig}')\n return ''", "def get_available_letters():\n available = string.ascii_lowercase\n\n return available", "def getCode1Letter(self):\n dataDict = self.__dict__\n raise ApiError(\"\"\"%s.getCode1Letter:\n getCode1Letter should never be called - must be overridden in subclass\"\"\" % self.qualifiedName\n + \": %s\" % (self,)\n )", "def letter_num(num: int):\n if abs(num) > 26 or num == 0:\n let = ord('a') + 26 - 1\n else:\n let = ord('a') + abs(num) - 1\n return chr(let)", "def say_letter(self, keyboard, keycode, char, modifiers):\n\n if keycode[1] in ('shift', 'rshift'):\n return # ignore.. shifted keys will have their Shift modifier set\n elif keycode[1] == 'tab':\n self.play_sound('tab')\n elif keycode[1] == 'delete':\n self.play_sound('delete')\n elif keycode[1] == 'backspace':\n self.textbox.text = self.textbox.text[:-1]\n self.play_sound('backspace')\n elif keycode[1] == 'enter':\n self.textbox.text += '\\n'\n self.play_sound('enter')\n elif char == ' ':\n self.textbox.text += ' '\n self.play_sound('space') \n elif char is None:\n self.play_sound('error')\n else:\n if 'shift' in modifiers or 'rshift' in modifiers:\n self.textbox.text += char.upper()\n else:\n self.textbox.text += char\n if RENAMED_CHAR.get(char):\n self.play_sound(RENAMED_CHAR[char])\n else: \n self.play_sound(char)", "def get_letter_to_code_mappings():\n return {\n \"a\": \"Alfa\", \"b\": \"Bravo\", \"c\": \"Charlie\", \"d\": \"Delta\", \"e\": \"Echo\",\n \"f\": \"Foxtrot\", \"g\": \"Golf\", \"h\": \"Hotel\", \"i\": \"India\", \"j\":\n \"Juliett\", \"k\": \"Kilo\", \"l\": \"Lima\", \"m\": \"Mike\", \"n\": \"November\", \"o\":\n \"Oscar\", \"p\": \"Papa\", \"q\": \"Quebec\", \"r\": \"Romeo\", \"s\": \"Sierra\", \"t\":\n \"Tango\", \"u\": \"Uniform\", \"v\": \"Victor\", \"w\": \"Whiskey\", \"x\": \"Xray\",\n \"y\": \"Yankee\", \"z\": \"Zulu\", \"0\": \"Zero\", \"1\": \"One\", \"2\": \"Two\", \"3\":\n \"Three\", \"4\": \"Four\", \"5\": \"Five\", \"6\": \"Six\", \"7\": \"Seven\", \"8\":\n \"Eight\", \"9\": \"Niner\", \"=\": \"Equals\", \"?\": \"Query\", \"/\": \"Slash\", \",\":\n \"Comma\", \".\": \"Stop\", \":\": \"Colon\", \"'\": \"Apostrophe\", \"-\": \"Dash\",\n \"(\": \"Open\", \")\": \"Close\", \"@\": \"At\",\n }", "def _convert(self, message, get_leter_index):\r\n\t\tord_a = ord('a')\r\n\t\treturn \"\".join(\r\n\t\t\t_nth_letter(get_leter_index(ord(char) - ord_a, ord(key_char) - ord_a))\r\n\t\t\t\tfor char, key_char in zip(message, itertools.cycle(self.key))\r\n\t\t)", "def letter_prob(c):\n if c == ' ': return 0.1904\n if c == 'e' or c == 'E': return 0.1017\n if c == 't' or c == 'T': return 0.0737\n if c == 'a' or c == 'A': return 0.0661\n if c == 'o' or c == 'O': return 0.0610\n if c == 'i' or c == 'I': return 0.0562\n if c == 'n' or c == 'N': return 0.0557\n if c == 'h' or c == 'H': return 0.0542\n if c == 's' or c == 'S': return 0.0508\n if c == 'r' or c == 'R': return 0.0458\n if c == 'd' or c == 'D': return 0.0369\n if c == 'l' or c == 'L': return 0.0325\n if c == 'u' or c == 'U': return 0.0228\n if c == 'm' or c == 'M': return 0.0205\n if c == 'c' or c == 'C': return 0.0192\n if c == 'w' or c == 'W': return 0.0190\n if c == 'f' or c == 'F': return 0.0175\n if c == 'y' or c == 'Y': return 0.0165\n if c == 'g' or c == 'G': return 0.0161\n if c == 'p' or c == 'P': return 0.0131\n if c == 'b' or c == 'B': return 0.0115\n if c == 'v' or c == 'V': return 0.0088\n if c == 'k' or c == 'K': return 0.0066\n if c == 'x' or c == 'X': return 0.0014\n if c == 'j' or c == 'J': return 0.0008\n if c == 'q' or c == 'Q': return 0.0008\n if c == 'z' or c == 'Z': return 0.0005\n return 1.0", "def get_letter(self, vowel_need):\r\n\r\n return self.letters.get(vowel_need, self.vowels)", "def letters():\n letters = \"BINGO\"\n for letter in letters:\n yield letter" ]
[ "0.68726075", "0.6728515", "0.6708404", "0.6602445", "0.6602445", "0.6602445", "0.6595359", "0.65413225", "0.6491447", "0.64886683", "0.6447457", "0.64450705", "0.64275444", "0.63569146", "0.63441706", "0.627932", "0.6252092", "0.62292844", "0.6165212", "0.6158538", "0.61365527", "0.61301607", "0.61140853", "0.60862553", "0.607768", "0.6074581", "0.6072664", "0.60617816", "0.60533845", "0.6041342" ]
0.68069106
1
Helper method to get baseline file.
def get_test_baseline(self, file_name): return os.path.abspath( os.path.join( os.path.abspath(__file__), u'..', u'baselines', file_name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _GetBaseline(self, filename, directory, upstream_only = False):\n\n local_filename = os.path.join(directory, filename)\n local_directory = local_filename[:local_filename.rfind(\"/\")]\n if upstream_only:\n last_index = local_filename.rfind(\".\")\n if last_index > -1:\n local_filename = (local_filename[:last_index] +\n UPSTREAM_IMAGE_FILE_ENDING)\n\n download_file_modifiers = \"\"\n if local_filename.endswith(\".png\"):\n download_file_modifiers = \"b\" # binary file\n\n if not self.dont_download:\n CreateDirectory(local_directory)\n\n local_baseline = None\n url_of_baseline = None\n\n if self.use_local_baselines:\n test_path_key = self._NormalizeBaselineIdentifier(filename)\n dict = self.baseline_dict\n if upstream_only:\n dict = self.webkit_baseline_dict\n if test_path_key in dict:\n local_baseline = dict[test_path_key]\n url_of_baseline = local_baseline\n shutil.copy(local_baseline, local_directory)\n elif self.verbose:\n print \"Baseline %s does not exist in the index.\" % test_path_key\n else:\n index = 0\n possible_files = self._GetPossibleFileList(filename, upstream_only)\n # Download the baselines from the webkit.org site.\n while local_baseline == None and index < len(possible_files):\n local_baseline = self._DownloadFile(possible_files[index],\n local_filename,\n download_file_modifiers,\n True)\n if local_baseline:\n url_of_baseline = possible_files[index]\n index += 1\n\n if not local_baseline:\n if self.verbose:\n print \"Could not find any baseline for %s\" % filename\n else:\n local_baseline = os.path.normpath(local_baseline)\n if local_baseline and self.verbose:\n print \"Found baseline: %s\" % url_of_baseline\n\n return BaselineCandidate(local_baseline, url_of_baseline)", "def baseline(self):\n return self.data[self.data['treatment'] == 'Baseline']", "def read_base_test(base_file):\n with open(base_file) as f:\n contents = f.read()\n return contents", "def findBaseline(filename, projectSource):\n status = False \n filePath = checkPath(filename, projectSource)\n baseFileRead = open(filePath, \"r\") \n for line in baseFileRead.readlines():\n if re.search(\"Objects \\*{25}\", line) != None:\n status = True\n if status == False:\n sys.stderr.write(\"Warning: Expected Base file content not found\")\n baseFileRead.close()\n return filePath", "def _load_baseline(lang: str='en', model_name_or_path: Optional[str]=None, baseline_path: Optional[str]=None, baseline_url: Optional[str]=None) ->Optional[Tensor]:\n if baseline_path:\n baseline: Optional[Tensor] = _read_csv_from_local_file(baseline_path)\n elif baseline_url:\n baseline = _read_csv_from_url(baseline_url)\n elif lang and model_name_or_path:\n _URL_BASE = 'https://raw.githubusercontent.com/Tiiiger/bert_score/master/bert_score/rescale_baseline'\n baseline_url = f'{_URL_BASE}/{lang}/{model_name_or_path}.tsv'\n baseline = _read_csv_from_url(baseline_url)\n else:\n baseline = None\n warn('Baseline was not successfully loaded. No baseline is going to be used.')\n return baseline", "def GetBaseFile(self, filename):\r\n\r\n raise NotImplementedError(\r\n \"abstract method -- subclass %s must override\" % self.__class__)", "def GetBaseFile(self, filename):\n\n raise NotImplementedError(\n \"abstract method -- subclass %s must override\" % self.__class__)", "def _read_baseline(self, path):\n base_rmsd = dict()\n fin = open(path,'r')\n for line in fin:\n if line == '\\s' or line == '' or line == '\\n':\n continue\n k, v = line.split()\n base_rmsd[k.strip()] = float(v.strip())\n return base_rmsd", "def bbl_file(self, base_file):\n bbl_path = os.path.abspath(os.path.splitext(base_file)[0]) + '.bbl'\n return self.open_encode_safe(bbl_path).readlines()", "def get_baseline_output_id(self) -> int:\n pass", "def base_filename(self):\n return self.filename.split('.')[0]", "def __file__(self):\n\t\treturn __file__", "def __file__(self):\n\t\treturn __file__", "def __file__(self):\n\t\treturn __file__", "def getBaseSrcFile(self) -> List[int]:\n ...", "def __file__(self):\n return __file__", "def baseline(self):\n if getattr(self, \"_baseline\", None) is None:\n self._baseline = (self.container.height - 1) / 2\n return self._baseline", "def base(self):\n return os.path.basename(self.path)", "def _getfilename(self):\n pass", "def get_iaq_baseline(self) -> List[int]:\n # name, command, signals, delay\n return self._run_profile((\"iaq_get_baseline\", [0x20, 0x15], 2, 0.01))", "def _getBaselineThresh(self):\n print('Calculating 10% baseline')\n self.baseline = obrienBaseline.obrienBaseline(\n self.d['dos1rate'], timeWidth=5.0, \n cadence=0.1)\n self.peak_std = ( (self.d['dos1rate'][self.peakInd]/10 - \n self.baseline[self.peakInd]/10)/ \n np.sqrt(self.d['dos1rate'][self.peakInd]/10))\n return", "def baseline_TVOC(self) -> int:\n return self.get_iaq_baseline()[1]", "def get_base_path(self) -> str:\n raise NotImplementedError()", "def get_source_file(self):\n return self.get_attribute(\"source_file\")", "def get_inifile(self):\n return self.inifile", "def get_ap_file(self):\n with open(self.trendfile, 'r') as readfile:\n data = json.load(readfile)\n return data['trendtable']", "def _get_filename():\n dirname = os.path.dirname(__file__)\n return os.path.join(dirname, 'occulttraining.txt')", "def __init__(self, filepath, baseline_name=BASELINE_FILE_NAME,\n filename=FILE_NAME, sway_name=FILE_NAME_S):\n self.filepath = filepath\n self.baseline_name = baseline_name\n self.filename = filename\n self.sway_name = sway_name\n self.XSCALE = 22.5\n self.YSCALE = 13.\n self.lim_X = 20\n self.lim_Y = 20\n self.get_baseline_points()", "def test_get_result_top_file(self):\n pass", "def getCurrentFilePath(self):\n return os.path.abspath(self.filePath)" ]
[ "0.72251785", "0.66584283", "0.65599346", "0.6401578", "0.6180834", "0.60123277", "0.5969603", "0.5948323", "0.59022737", "0.5869274", "0.583606", "0.5801047", "0.5801047", "0.5801047", "0.57897425", "0.57875085", "0.56959826", "0.56877124", "0.56490695", "0.56345946", "0.56341195", "0.56069237", "0.5555994", "0.554183", "0.5504578", "0.5499079", "0.54968023", "0.5479849", "0.54668957", "0.54073495" ]
0.79847074
0
Return a list of all the cells in the grid. We start increasing x first, i.e. 0th cell is the first cell, 1cell is the one with the next x in the list and y unchanged, .... Return array An array of size n_cells n_dims.
def cells_list(self): xx, yy = np.meshgrid(self.x_spacings, self.y_spacings) return np.vstack([yy.ravel(), xx.ravel()]).transpose()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_cells_from_dims(num_verts_x: int, num_verts_y: int):\n num_cells_x = num_verts_x - 1\n num_cells_y = num_verts_y - 1\n num_cells = num_cells_x*num_cells_y\n cell_array = np.zeros((num_cells, 4), dtype=int)\n cell_num = 0\n\n # I am sure this could be done in a more efficient way.\n for y_cell in range(num_cells_y):\n for x_cell in range(num_cells_x):\n cell_array[cell_num, 0] = x_cell + num_verts_x*y_cell\n cell_array[cell_num, 1] = cell_array[cell_num, 0] + 1\n cell_array[cell_num, 2] = cell_array[cell_num, 0] + num_verts_x + 1\n cell_array[cell_num, 3] = cell_array[cell_num, 0] + num_verts_x\n cell_num += 1\n\n return cell_array", "def cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n part = Partition(list(self))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def get_cells(self):\n self.list = [self.origin]\n\n for i in range(1, self.size):\n if(self.direction ==self.direction.RIGHT):\n self.list.append((self.origin[0], self.origin[1]+i))\n elif(self.direction ==self.direction.DOWN):\n self.list.append((self.origin[0]-i, self.origin[1]))\n\n return self.list", "def cell_list(self):\n lst_of_idx = []\n height = self.__height\n width = self.__width\n for i in range(width):\n for j in range(height):\n lst_of_idx.append((i,j))\n lst_of_idx.append((3,7))\n return lst_of_idx", "def all_cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.circle_star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def get_cells(self):\n cell_list = []\n for cell_row in self.board:\n for current_cell in cell_row:\n if current_cell is not None:\n cell_list.append(current_cell)\n return cell_list", "def makeStartingGrid(self):\n return util.make2DArray(self.xN, self.yN, False)", "def cells(self) -> List[Tuple[int, int]]:\n return self._cells", "def traverse_grid(self, start_cell, direction, num_steps):\n elements = []\n\n for step in range(num_steps):\n row = start_cell[0] + step * direction[0]\n col = start_cell[1] + step * direction[1]\n elements.append(self._grid[row][col])\n\n return elements", "def get_neighbours(self, cell):\n\t\tx,y = cell.find_id()\n\t\tlength = self.space.shape[1]\n\t\twidth = self.space.shape[0]\n\t\tif (length == 0 or width == 0 or x < 0 or x >= length or y < 0 or y >= width):\n\t\t\treturn []\n\t\tneighs = [(i,j) for i in range(y-1,y+2) if 0<=i<width for j in range(x-1,x+2) if 0<=j<length]\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tneighbours.append(self.space[neigh[0],neigh[1]])\n\t\treturn neighbours", "def get_neighbours(self, cell: Position) -> Iterable[Position]:\n x, y = cell\n\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n ]", "def cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def get_adjcells(self,cell):\n adj_cells = []\n cells_xy = []\n if cell.x > 0:\n adj_cells.append(self.cell_array.item((cell.x-1,cell.y)))\n if cell.x < self.grid_size - 1:\n adj_cells.append(self.cell_array.item((cell.x+1,cell.y)))\n if cell.y > 0:\n adj_cells.append(self.cell_array.item((cell.x,cell.y-1)))\n if cell.y < self.grid_size - 1:\n adj_cells.append(self.cell_array.item((cell.x,cell.y+1)))\n return adj_cells", "def get_neighbors(grid, x, y):\n out = []\n if x > 0:\n out.append(grid[x-1, y])\n if y > 0:\n out.append(grid[x, y-1])\n if y < grid.shape[1] - 1:\n out.append(grid[x, y+1])\n if x < grid.shape[0] - 1:\n out.append(grid[x+1, y])\n return out", "def get_cells(self):\r\n return \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def grid_coords(self):\n return [(x, y) for y in range(self.height) for x in range(self.width)]", "def cells(self):\n return copy.deepcopy(self._cells)", "def GLDAS025Cellgrid():\n return GLDAS025Grids(only_land=False)", "def generate_cells(self,n_c):\n self.n_c = n_c\n self.cells = [Cell() for i in range(n_c)]\n return self.cells", "def generate_grid(nrows, ncols, ndots):\n\n # Validation\n if nrows * ncols < ndots:\n raise Exception(\"ndots must be <= than grid size\")\n\n rows = np.arange(1, nrows + 1)\n cols = np.arange(1, ncols + 1)\n\n # Create empty matrix\n grid = np.empty((len(rows), len(cols), 2), dtype=np.intp)\n grid[..., 0] = rows[:, None]\n grid[..., 1] = cols \n\n return grid.reshape(nrows * ncols, -1)[:ndots]", "def cells(self):\n return ((row, col) for row in self.rows for col in self.cols)", "def get_start_grid(cols=4, rows=4):\n grid = [[0]*cols for i in range(rows)]\n for i in range(2):\n empties = get_empty_cells(grid)\n y,x = random.choice(empties)\n grid[y][x] = 2 if random.random() < 0.9 else 4\n return grid", "def get_cells(pts, inv_cell_width, Ny, Nx, log=sys.stdout):\n lib = _initlib()\n p = require(pts, dtype=float64, requirements=['C']) \n npts = p.shape[0]\n assert(p.shape ==(npts,3))\n out = empty(npts, dtype=int64)\n\n res = lib.find_lattice(p, npts, inv_cell_width, Ny, Nx, out)\n return out", "def get_visible_cells(self):\r\n ux, uy = self.GetScrollPixelsPerUnit()\r\n sx, sy = self.GetViewStart()\r\n w, h = self.GetGridWindow().GetClientSize().Get()\r\n sx *= ux\r\n sy *= uy\r\n start_col = self.XToCol(sx)\r\n start_row = self.YToRow(sy)\r\n end_col = self.XToCol(sx + w, True)\r\n end_row = self.YToRow(sy + h, True)\r\n return start_row, end_row, start_col, end_col", "def regex_grid(n):\n cx = 2 ** (n - 1)\n cy = 2 ** (n - 1)\n grid = [[grid_numbering(n, i , j, cx, cy) for i in range(2 ** n)] for j in range(2 ** n)]\n \n return grid", "def getCellRange(self, cellx, celly, size):\n y = int(celly - ((size -1) / 2))\n x = int(cellx - ((size -1) / 2))\n _y = int(celly + ((size -1) / 2))\n _x = int(cellx + ((size -1) / 2))\n return list(product(range(x, _x+1), range(y,_y+1)))", "def make_grid(self):\n\n\t\tinit_grid = (self.grid_width//2, self.grid_height//2)\n\t\tgrid_list = []\n\n\t\tfor i in range(self.canv_width//self.grid_width):\n\t\t\tfor j in range(self.canv_height//self.grid_height):\n\t\t\t\tif j == 0 or j%2 ==0:\n\t\t\t\t\tgrid_list.append((init_grid[0]+i*self.grid_width, init_grid[1]+j*self.grid_height))\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tgrid_list.append((grid_list[-1][0]+(self.grid_width//2), init_grid[1]+j*self.grid_height))\n\n\t\treturn grid_list", "def _create_grid_with_cells(self, width, height):\n grid = []\n for row in range(height):\n grid.append([])\n for column in range(width):\n if column % 2 == 1 and row % 2 == 1:\n grid[row].append(TILE_EMPTY)\n elif (\n column == 0 or row == 0 or column == width - 1 or row == height - 1\n ):\n grid[row].append(TILE_CRATE)\n else:\n grid[row].append(TILE_CRATE)\n grid[-2][-3] = TILE_EMPTY\n grid[1][0] = TILE_EMPTY\n return grid", "def get_start_grid(cols=4, rows=4):\n\tgrid = [[\"\"]*cols for i in range(rows)]\n\tfor i in range(2):\n\t\tempties = get_empty_cells(grid)\n\t\ty,x = random.choice(empties)\n\t\tgrid[y][x] = 2 if random.random() < prob_2 else 4\n\treturn grid", "def create_grid(self):\n return [[0] * self.width for _ in range(self.height)]" ]
[ "0.7313377", "0.70912474", "0.70496106", "0.70136446", "0.70017177", "0.69983876", "0.6967043", "0.6942949", "0.69047695", "0.6899912", "0.6888806", "0.6857896", "0.68333375", "0.6820522", "0.6802178", "0.6766183", "0.66904247", "0.66788596", "0.6637702", "0.66127455", "0.6606392", "0.66014403", "0.659337", "0.65744764", "0.65483505", "0.6543926", "0.65347534", "0.6508396", "0.64976", "0.64827985" ]
0.77503717
0
Wrapper to run Praat 'To Textgrid (silences)' function.
def detect_silences(sound, sil_threshold, sil_duration): textgrid = call(sound, 'To TextGrid (silences)', 100, 0.0, sil_threshold, sil_duration, 0.1, 'silence', 'speech') return textgrid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__ (self,win,text='Press a key to continue',**kwargs):\n\n self.win = win\n \n self.text = visual.TextStim(win,text=text,**kwargs)", "def __init__ (self,win,text='Press a key to continue',**kwargs):\n\n self.win = win\n \n self.text = visual.TextStim(win,text=text,**kwargs)", "def make_silence_phones_txt(self):\n raise NotImplementedError", "def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = handle_emojis(text)\n text = clean_number(text)\n text = spacing_punctuation(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n #text = stop(text)# if changing this, then chnage the dims \n #(not to be done yet as its effecting the embeddings..,we might be\n #loosing words)...\n return text", "def wrapper(scr, text: list):\n\n display = Display(scr, text)\n display.run()", "async def translate(context, arguments, style):\n large_font_spaces = 5\n small_font_spaces = int(27 / 40 * large_font_spaces)\n large_font = True\n\n text = arguments[0]\n runes = \"\"\n\n rune_count = 0\n for i, char in enumerate(text.lower()):\n if char in default_values.RUNES[style]:\n runes += default_values.RUNES[style][char]\n rune_count += 1\n elif char in default_values.RUNES[\"archaic\"]:\n runes += default_values.RUNES[\"archaic\"][char]\n rune_count += 1\n else:\n runes += text[i]\n if char != \" \":\n large_font = False\n\n if not context.desktop_ui or not large_font or rune_count > 27:\n runes = runes.replace(\" \", \" \" * small_font_spaces)\n else:\n runes = runes.replace(\" \", \" \" * large_font_spaces)\n\n await context.message.channel.send(runes)\n return True", "def do(text):\n return freeling_stemming(text)", "def cut_item_texts(self, arrays=None):\n if not arrays: arrays = self.masks()\n for a in arrays:\n for item in self.sources(a):\n i = self._meta['columns'][item]\n for tk in self.valid_tks:\n text = self.text(item, True, tk)\n if text: i['text'][tk] = text\n for ed in ['x', 'y']:\n if i['text'].get('{} edits'.format(ed)):\n for tk in self.valid_tks:\n text = self.text(item, True, tk, ed)\n if text: i['text']['{} edits'.format(ed)][tk] = text\n return None", "def apply(self, text):", "def text(text = 'abcd', size = 10, justify = 'left', layer = 0, font = \"DEPLOF\"):\n t = Device('text')\n xoffset = 0\n yoffset = 0\n\n face = font\n if face == \"DEPLOF\":\n scaling = size/1000\n\n for line in text.split('\\n'):\n l = Device(name = 'textline')\n for c in line:\n ascii_val = ord(c)\n if c == ' ':\n xoffset += 500*scaling\n elif (33 <= ascii_val <= 126) or (ascii_val == 181):\n for poly in _glyph[ascii_val]:\n xpts = np.array(poly)[:, 0]*scaling\n ypts = np.array(poly)[:, 1]*scaling\n l.add_polygon([xpts + xoffset, ypts + yoffset],\n layer = layer)\n xoffset += (_width[ascii_val] + _indent[ascii_val])*scaling\n else:\n valid_chars = '!\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~µ'\n warnings.warn('[PHIDL] text(): Warning, some characters ignored, no geometry for character \"%s\" with ascii value %s. ' \\\n 'Valid characters: %s' % (chr(ascii_val), ascii_val, valid_chars))\n t.add_ref(l)\n yoffset -= 1500*scaling\n xoffset = 0\n else:\n from .font import _get_font_by_name, _get_font_by_file, _get_glyph\n\n # Load the font\n # If we've passed a valid file, try to load that, otherwise search system fonts\n font = None\n if (face.endswith(\".otf\") or face.endswith(\".ttf\")) and os.path.exists(face):\n font = _get_font_by_file(face)\n else:\n try:\n font = _get_font_by_name(face)\n except ValueError:\n pass\n if font is None:\n raise ValueError(('[PHIDL] Failed to find font: \"%s\". ' +\n 'Try specifying the exact (full) path to the .ttf or .otf file. ' +\n 'Otherwise, it might be resolved by rebuilding the matplotlib font cache') % (face))\n\n # Render each character\n for line in text.split('\\n'):\n l = Device('textline')\n xoffset = 0\n for letter in line:\n letter_dev = Device(\"letter\")\n letter_template, advance_x = _get_glyph(font, letter)\n for poly in letter_template.polygons:\n letter_dev.add_polygon(poly.polygons, layer=layer)\n ref = l.add_ref(letter_dev)\n ref.move(destination=(xoffset, 0))\n ref.magnification = size\n xoffset += size*advance_x\n\n ref = t.add_ref(l)\n ref.move(destination=(0, yoffset))\n yoffset -= size\n\n justify = justify.lower()\n for l in t.references:\n if justify == 'left': pass\n if justify == 'right': l.xmax = 0\n if justify == 'center': l.move(origin = l.center,\n destination = (0, 0), axis = 'x')\n\n t.flatten()\n return t", "def obtain_text():\n pass", "async def aesthetic(self, ctx, *, text):\n out = \"\"\n for char in text:\n out += utils.fullwidth_transform.get(char, char)\n await ctx.send(out)", "def FlashyText(win,center,text,timing):\n winner = Text(center,text)\n winner.setFace(\"arial\")\n winner.setFill(\"black\")\n winner.setSize(30)\n for i in range(1,6):\n time.sleep(timing)\n if i % 2 == 0:\n winner.draw(win)\n else:\n winner.undraw()", "async def outline_text(draw_surface, coords, draw_text, font):\n draw = partial(draw_surface.text, text=draw_text, font=font,\n fill=\"black\")\n for offset_pair in product(range(-1, 2), repeat=2):\n draw((coords[0]+offset_pair[0], coords[1]+offset_pair[1]))\n draw(coords, fill=\"white\")", "def args_batch_to_text(args_batch: ArgsBatch) -> Text:\n lines = []\n for args in args_batch:\n lines.append('; '.join(str(a) for a in args))\n return '\\n'.join(lines)", "def setText(*args):", "def draw_text(self, text, i, j, **params):", "def preprocess(self, text):\r\n return text", "def rich(text):\n return full(text, False)", "def mytext(x,y,s,**kwargs):\n # we take care of this one\n model = kwargs.pop('model', None)\n if model:\n th = text(x,y,model,**kwargs)\n draw()\n x0,y0,w,h = th.get_window_extent().bounds\n gca().texts.remove(th)\n x = x0\n y = y0\n kwargs['transform'] = matplotlib.transforms.IdentityTransform()\n kwargs['horizontalalignment'] = 'left'\n kwargs['verticalalignment'] = 'baseline'\n# print x,y,kwargs\n return text(x,y,s,**kwargs)", "def get_text(downgrade_titles=False):", "def text_grid(self, text, clear_screen=True, x=0, y=0, text_color='black', font=None):\n\n assert 0 <= x < Display.GRID_COLUMNS,\\\n \"grid columns must be between 0 and %d, %d was requested\" %\\\n ((Display.GRID_COLUMNS - 1, x))\n\n assert 0 <= y < Display.GRID_ROWS,\\\n \"grid rows must be between 0 and %d, %d was requested\" %\\\n ((Display.GRID_ROWS - 1), y)\n\n return self.text_pixels(text, clear_screen, x * Display.GRID_COLUMN_PIXELS, y * Display.GRID_ROW_PIXELS,\n text_color, font)", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def preprocess_text(self, seq):\n if self.text_preprocess_fn:\n seq = list(map(self.text_preprocess_fn, seq))\n return seq", "def textile(text, **kwargs):\n from django.contrib.markup.templatetags.markup import textile\n return textile(text)", "def basic(text):\n lines = text.split(\"\\n\")\n result = []\n\n for line in lines:\n result.append(_inline(line))\n\n return \"\\n\".join(result)", "def getKernels(indices):\n\n\t\t\ti = indices[0]\n\t\t\tj = indices[1]\n\n\t\t\th = i - self.scope\n\t\t\tk = j + self.scope\n\n\t\t\tif h < 0: h = 0\n\t\t\tif k > len(text): k = len(text)-1\n\n\t\t\treturn text[h:i].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), text[i:j].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), text[j:k].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")\n\t\t\t#return \"|\"+text[h:i].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")+\"|\", text[i:j].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), \"|\"+text[j:k].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")+\"|\"", "def textCurves(*args, font: AnyStr=\"\", name: AnyStr=\"\", object: bool=True, text: AnyStr=\"\",\n q=True, query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def generate(self, path, language, textgrid):\n activations = []\n self.model.eval()\n iterator = tokenize(path, language, path_like=True, train=False)\n if self.generation == 'bucket':\n # Here, we give as input the text line by line.\n for line in iterator:\n line = line.strip() # Remove trailing characters\n\n line = '[CLS] ' + line + ' [SEP]'\n tokenized_text = self.tokenizer.wordpiece_tokenizer.tokenize(line)\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)\n segment_ids = [1 for x in tokenized_text]\n mapping = utils.match_tokenized_to_untokenized(tokenized_text, line)\n\n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segment_ids])\n\n with torch.no_grad():\n encoded_layers = self.model(tokens_tensor, segments_tensors) # last_hidden_state, pooled_last_hidden_states, all_hidden_states\n # filtration\n encoded_layers = np.vstack(encoded_layers[2][1:]) # retrieve all the hidden states (dimension = layer_count * len(tokenized_text) * feature_count)\n encoded_layers = encoded_layers[self.loi, :, :]\n activations += utils.extract_activations_from_tokenized(encoded_layers, mapping)\n elif self.generation == 'sequential':\n # Here we give as input the sentence up to the actual word, incrementing by one at each step.\n for line in iterator:\n for index in range(1, len(line.split())):\n tmp_line = \" \".join(line.split()[:index])\n tmp_line = tmp_line.strip() # Remove trailing characters\n\n tmp_line = '[CLS] ' + tmp_line + ' [SEP]'\n tokenized_text = self.tokenizer.wordpiece_tokenizer.tokenize(tmp_line)\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)\n segment_ids = [1 for x in tokenized_text]\n mapping = utils.match_tokenized_to_untokenized(tokenized_text, line)\n\n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segment_ids])\n\n with torch.no_grad():\n encoded_layers = self.model(tokens_tensor, segments_tensors) # dimension = layer_count * len(tokenized_text) * feature_count\n # filtration\n encoded_layers = np.vstack(encoded_layers[2][1:])\n encoded_layers = encoded_layers[self.loi, :, :]\n activations.append(utils.extract_activations_from_tokenized(encoded_layers, mapping)[-1])\n result = pd.DataFrame(np.vstack(activations), columns=['layer-{}-{}'.format(layer, index) for layer in self.loi for index in range(self.FEATURE_COUNT)])\n return result", "def tokenize(self, text, **kwargs):\n if self.opt['tracker'] == 'babi6':\n text = babi6_dirty_fix(text)\n text = text.replace('<SILENCE>', '_SILENCE_')\n\n return [t.text for t in NLP.tokenizer(text)]" ]
[ "0.5339875", "0.5339875", "0.52565366", "0.5225016", "0.5224651", "0.52221847", "0.52040446", "0.5149263", "0.50935054", "0.509346", "0.50882745", "0.50577587", "0.50410724", "0.50378585", "0.5034093", "0.5024333", "0.5018697", "0.50046563", "0.4991087", "0.49812207", "0.49809837", "0.49805796", "0.49801278", "0.49495757", "0.49275097", "0.4912974", "0.49064887", "0.48921722", "0.4890334", "0.48809183" ]
0.59195006
0
Saves chunked speech intervals as WAV file.
def save_chunks(chunk_sound, out_path, video_id): chunk_start_ms = int(chunk_sound.get_start_time()*1000) chunk_end_ms = int(chunk_sound.get_end_time()*1000) chunk_duration = chunk_end_ms - chunk_start_ms chunk_fn = '{0}_{1}_{2}.wav'.format(video_id, chunk_start_ms, chunk_end_ms) chunk_file_path = path.join(out_path, chunk_fn) chunk_sound.save(chunk_file_path, 'WAV') return {'filename': chunk_fn, 'video_id': video_id, 'start_time': chunk_start_ms, 'end_time': chunk_end_ms, 'duration': chunk_duration}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_wav(file_name, signal, fs):\n wavfile.write(file_name, fs, np.int16(signal/np.max(np.abs(signal)) * (2**(16)/2-1)))", "def save_audio(self, name=DEFAULT_OUT_NAME):\n print(\"Saving...\")\n wf = wave.open(name+'.wav', 'wb')\n wf.setnchannels(DEFAULT_CHANNELS)\n wf.setsampwidth(self.audio.get_sample_size(DEFAULT_FORMAT))\n wf.setframerate(DEFAULT_RATE)\n wf.writeframes(b''.join(self.frames))\n wf.close()\n print('Saved')", "def __save(self,audio):\n self.__openSampleFile()\n self.__sampleFile.writeframes(audio)", "def save_frames_to_wav_file(frames: np.ndarray, sample_rate: int, file_path: str):\n wavfile.write(file_path, sample_rate, np.hstack(frames))", "def write_wav(fname, samps, sampling_rate=16000, normalize=True):\n\t# for multi-channel, accept ndarray [Nsamples, Nchannels]\n\tif samps.ndim != 1 and samps.shape[0] < samps.shape[1]:\n\t\tsamps = np.transpose(samps)\n\t\tsamps = np.squeeze(samps)\n\t# same as MATLAB and kaldi\n\tif normalize:\n\t\tsamps = samps * MAX_INT16\n\t\tsamps = samps.astype(np.int16)\n\tfdir = os.path.dirname(fname)\n\tif fdir and not os.path.exists(fdir):\n\t\tos.makedirs(fdir)\n\t# NOTE: librosa 0.6.0 seems could not write non-float narray\n\t# so use scipy.io.wavfile instead\n\twavfile.write(fname, sampling_rate, samps)", "def save(self, fname, master_volume=1.):\n \n # first pass - find max amplitude value to normalise output\n vmax = 0.\n for c in range(len(self.out_channels)):\n vmax = max(\n abs(self.out_channels[str(c)].values.max()),\n abs(self.out_channels[str(c)].values.min()),\n vmax\n )\n\n # normalisation for conversion to int32 bitdepth wav\n norm = master_volume * (pow(2, 31)-1) / vmax\n\n # setup array to house wav stream data \n chans = np.zeros((self.out_channels['0'].values.size,\n len(self.out_channels)), dtype=\"int32\")\n \n # normalise and collect channels into a list\n for c in range(len(self.out_channels)):\n vals = self.out_channels[str(c)].values\n chans[:,c] = (vals*norm).astype(\"int32\")\n \n # finally combine and write out wav file\n wavfile.write(fname, self.samprate, chans)\n print(f\"Saved {fname}\")", "def _save_wav(buff, data, rate) -> None:\n # Code inspired from `IPython.display.Audio`\n data = np.array(data, dtype=float)\n\n bit_depth = 16\n max_sample_value = int(2**(bit_depth - 1)) - 1\n\n num_channels = data.shape[1] if len(data.shape) > 1 else 1\n scaled = np.int16(data / np.max(np.abs(data)) * max_sample_value)\n # The WAVE spec expects little-endian integers of \"sampwidth\" bytes each.\n # Numpy's `astype` accepts array-protocol type strings, so we specify:\n # - '<' to indicate little endian\n # - 'i' to specify signed integer\n # - the number of bytes used to represent each integer\n # See: https://numpy.org/doc/stable/reference/arrays.dtypes.html\n encoded_wav = scaled.astype(f'<i{bit_depth // 8}', copy=False).tobytes()\n\n with wave.open(buff, mode='wb') as waveobj:\n waveobj.setnchannels(num_channels)\n waveobj.setframerate(rate)\n waveobj.setsampwidth(bit_depth // 8)\n waveobj.setcomptype('NONE', 'NONE')\n waveobj.writeframes(encoded_wav)", "def write_wav(self, full_out_file = None):\n\n if full_out_file is None:\n \n (out_file, out_dir) = misc.save_file(FilterSpec='*.wav', DialogTitle='Write sound to ...', \n DefaultName='')\n full_out_file = os.path.join(out_dir, out_file)\n if full_out_file is None:\n print('Output discarded.')\n return 0\n else:\n full_out_file = os.path.abspath(full_out_file)\n (out_dir , out_file) = os.path.split(full_out_file)\n\n write(str(full_out_file), int(self.rate), self.data)\n print('Sounddata written to ' + out_file + ', with a sample rate of ' + str(self.rate))\n print('OutDir: ' + out_dir)\n \n return full_out_file", "def save_sound(filename,sound,sample_freq,num_channels):\n #open a wave file in write ('w') mode, this will create the file\n file=wave.open(filename,'w')\n #set the framerate aka sample frequency\n file.setframerate(sample_freq)\n #set the number of the channels\n file.setnchannels(num_channels)\n #the size of the one sample in bytes\n file.setsampwidth(2)\n #write the actual sound to the file, notice the call to get_raw\n file.writeframesraw(sound.get_raw())\n file.close()", "def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')", "def save_wav(filename, samples, rate=16000, width=2, channels=1):\n wav = wave.open(filename, 'wb')\n wav.setnchannels(channels)\n wav.setsampwidth(width)\n wav.setframerate(rate)\n wav.writeframes(samples)\n wav.close()", "def split_on_silence_threshold(wav_file, dest_dir):\n # Read the file\n audioSegment = AudioSegment.from_wav(wav_file)\n # Calculating the silence threshold\n # Normalizing the audio file belfore finding the threshold\n full_audio_wav = normalize(audioSegment)\n loudness_ms_list = [] # Save the audio levels of all the chunks\n for ms_chunk in full_audio_wav:\n loudness_ms_list.append(round(ms_chunk.dBFS))\n print(\"Audio levels are recorded\", file=sys.stderr)\n # Using pandas df for easier manipulation\n df = pd.DataFrame(loudness_ms_list)\n df[0] = df[df[0] != float(\"-inf\")] # Remove the very low levels\n st = df[0].mean()\n st = st if st < -16 else -16 # Because -16db is default\n # Splits the audio if silence duration is MSL long\n MSL = 500 # minimum silence length in ms\n chunks = split_on_silence(\n full_audio_wav, \n # split on silences longer than 500ms (500ms)\n min_silence_len=MSL, \n # anything under -16 dBFS is considered silence\n silence_thresh=st, \n # keep 200 ms of leading/trailing silence\n keep_silence=200, \n )\n # Saving all the chunks\n print(\"Writing all the files, this may take some time!\", file=sys.stderr)\n for index, chunk in enumerate(chunks):\n chunk_file_name = os.path.join(dest_dir, \"sample_{}.wav\".format(str(index).zfill(10)))\n print(\"Saving the file to \" + chunk_file_name, file=sys.stderr)\n # You can export as mp3 etc, note that it has dependency on ffmpeg\n chunk.export(chunk_file_name, format=\"wav\")", "def save_sample(file_path, sampling_rate, audio):\n audio = (audio.numpy() * 32768).astype(\"int16\")\n write(file_path, sampling_rate, audio)", "def write_data_to_wav(self, file_name: str, data):\r\n # apply scale and convert to int16\r\n data = np.int16(data/np.max(np.abs(data)) * self.wav_scale)\r\n # write to file\r\n write(file_name, self.audio_sample_rate, data)\r\n print('Sound ', file_name, ' has been saved')", "def save_stereo(self, fname, master_volume=1.):\n\n if len(self.out_channels) > 2:\n print(\"Warning: sonification has > 2 channels, only first 2 will be used. See 'save_combined' method.\")\n \n # first pass - find max amplitude value to normalise output\n # and concatenate channels to list\n vmax = 0.\n channels = []\n for c in range(min(len(self.out_channels), 2)):\n vmax = max(\n abs(self.out_channels[str(c)].values.max()),\n abs(self.out_channels[str(c)].values.min()),\n vmax\n ) / master_volume\n channels.append(self.out_channels[str(c)].values)\n \n wav.write(fname, \n np.column_stack(channels),\n self.samprate, \n scale = (-vmax,vmax),\n sampwidth=3)\n \n print(\"Saved.\")", "def export_wav(self, folder, name_fmt=\"{:02d}.wav\", dtype=np.int16):\n data = np.atleast_2d(self.in_time)\n\n assert data.ndim == 2\n assert np.all(np.abs(data) <= 1.0)\n\n # convert and scale to new output datatype\n if dtype in [np.uint8, np.int16, np.int32]:\n lim_orig = (-1.0, 1.0)\n lim_new = (np.iinfo(dtype).min, np.iinfo(dtype).max)\n data = _rescale(data, lim_orig, lim_new).astype(dtype)\n elif dtype != np.float32:\n raise TypeError(f\"dtype {dtype} is not supported by scipy.wavfile.write.\")\n\n path = Path(folder)\n if not path.is_dir():\n path.mkdir(parents=True, exist_ok=False)\n\n for i in range(data.shape[0]):\n wavfile.write(path / name_fmt.format(i + 1), self.fs, data[i])", "def write(f, sr, x, normalized=False):\n channels = 2 if (x.ndim == 2 and x.shape[1] == 2) else 1\n if normalized: # normalized array - each item should be a float in [-1, 1)\n y = np.int16(x * 2 ** 15)\n else:\n y = np.int16(x)\n song = pydub.AudioSegment(y.tobytes(), frame_rate=sr, sample_width=2, channels=channels)\n song.export(f, format=\"mp3\", bitrate=\"64k\")", "def write_timeline_to_wav(output_path, data, sample_rate):\n\n scipy.io.wavfile.write(output_path, sample_rate, data.T)", "def save_combined(self, fname, ffmpeg_output=False, master_volume=1.):\n # setup list to house wav stream data \n inputs = [None]*len(self.out_channels)\n\n # first pass - find max amplitude value to normalise output\n vmax = 0.\n for c in range(len(self.out_channels)):\n vmax = max(\n abs(self.out_channels[str(c)].values.max()),\n abs(self.out_channels[str(c)].values.min()),\n vmax\n ) / master_volume\n \n print(\"Creating temporary .wav files...\")\n \n for c in range(len(self.out_channels)):\n tempfname = f\"./.TEMP_{c}.wav\"\n wav.write(tempfname, \n self.out_channels[str(c)].values,\n self.samprate, \n scale = (-vmax,vmax),\n sampwidth=3)\n inputs[self.channels.forder[c]] = ff.input(tempfname)\n \n print(\"Joning temporary .wav files...\")\n (\n ff.filter(inputs, 'join', inputs=len(inputs), channel_layout=self.channels.setup)\n .output(fname)\n .overwrite_output()\n .run(quiet=~ffmpeg_output)\n )\n \n print(\"Cleaning up...\")\n for c in range(len(self.out_channels)):\n os.remove(f\"./.TEMP_{c}.wav\")\n \n print(\"Saved.\")", "def save_wavetables(self, path: str, filename_prefix: str = '') -> None:\n for i in range(len(self.wavetables)):\n if not os.path.exists(path):\n os.mkdir(path)\n location = os.path.join(path, filename_prefix + f'{i:02d}.wav')\n wav_file = WavFile(location)\n wav_file.write_samples(self.wavetables[i])", "def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)", "def save(self, name):\n try:\n os.mkdir(os.path.join(self.dbpath, name))\n except:\n pass\n\n wf = wave.open(os.path.join(self.dbpath, name, str(uuid.uuid4()) + \".wav\"), 'wb')\n wf.setnchannels(self.CHANNELS)\n wf.setsampwidth(self.p.get_sample_size(self.FORMAT))\n wf.setframerate(self.RATE)\n wf.writeframes(b''.join(list(self.frames)))\n wf.close()", "def export_wav(\n filename_wav: Path,\n tradb: vae.io.TraDatabase,\n channel: int,\n time_start: Optional[float] = None,\n time_stop: Optional[float] = None,\n decimation_factor: int = 1,\n):\n y, fs = tradb.read_continuous_wave(\n channel=channel,\n time_start=time_start,\n time_stop=time_stop,\n time_axis=False,\n show_progress=False,\n raw=True, # read as ADC values (int16)\n )\n\n if decimation_factor > 1:\n y = signal.decimate(y, decimation_factor).astype(np.int16)\n fs //= decimation_factor\n\n wavfile.write(filename_wav, fs, y)", "def save_secured_song_to_wave(self, file_location):\n protected_wav = wave.open(os.path.abspath(file_location), 'wb')\n protected_wav.setnchannels(self.original_song.getnchannels())\n protected_wav.setsampwidth(self.original_song.getsampwidth())\n protected_wav.setframerate(self.original_song.getframerate())\n protected_wav.writeframes(self.metadata)\n\n for val in self.full_song:\n protected_wav_val = struct.pack('<h', val)\n protected_wav.writeframesraw(protected_wav_val)\n\n protected_wav.close()", "def wavwrite(y, fs, filename):\n \n x = copy.deepcopy(y) # copy array\n x *= INT16_FAC # scaling floating point -1 to 1 range signal to int16 range\n x = np.int16(x) # converting to int16 type\n wavfile.write(filename, fs, x)", "def save_separated_audio(self, audios, filename):\n\n # Create folder with mixture name\n folder_path = os.path.join(self.config[\"separated_audio_folder\"], os.path.splitext(filename)[0])\n os.makedirs(folder_path)\n # Save each separated source\n for class_idx, audio in enumerate(audios):\n librosa.output.write_wav(os.path.join(folder_path, self.data_set.classes[class_idx]) + '.wav',\n audio.T,\n sr=self.data_set.config[\"sampling_rate\"])\n # Also copy the mixture in the folder\n copyfile(self.data_set.audio_full_filename(filename), os.path.join(folder_path, \"original_mix.wav\"))", "def wavwrite(y, fs, filename):\n\n\tx = copy.deepcopy(y) # copy array\n\tx *= INT16_FAC # scaling floating point -1 to 1 range signal to int16 range\n\tx = np.int16(x) # converting to int16 type\n\twrite(filename, fs, x)", "def save_to_file(\n sources,\n codec='wav', audio_adapter=ffmpeg.FFMPEGProcessAudioAdapter(),\n bitrate='128k', synchronous=True):\n\n # filename = \"chengdu.mp3\"\n pool = Pool()\n tasks = []\n for instrument, data in sources.items():\n path = \"./out/\"+instrument + \".\" + codec\n\n if pool:\n task = pool.apply_async(audio_adapter.save, (\n path,\n data,\n 44100,\n codec,\n bitrate))\n tasks.append(task)\n else:\n audio_adapter.save(path, data, 44100, codec, bitrate)\n if synchronous and pool:\n while len(tasks) > 0:\n task = tasks.pop()\n task.get()\n task.wait(timeout=200)", "def output_beat_to_file(file_name, e):\n print(\"Writing to file:\", file_name)\n routine = gp.compile(e,pset)\n with open(file_name+\".raw\",'w') as f:\n for t in range(200000):\n f.write(chr(int(routine(t+1))%256))\n # Now convert to wav\n subprocess.call(SOX_COMMAND + \" \" + file_name + \".raw\" + \" \" + file_name + \".wav\", shell=True)\n subprocess.call(LAME_COMMAND + \" \" + file_name + \".wav\", shell=True)", "def wavwrite(fname, Fs, xt):\n # convert to np.int16 data type\n xt = np.array((2**15-1)*xt, np.int16)\n sio_wav.write(fname, Fs, xt)" ]
[ "0.6919104", "0.6847627", "0.672993", "0.6697336", "0.66271955", "0.6598934", "0.65593815", "0.65480053", "0.65218306", "0.6511665", "0.64606607", "0.6440448", "0.63984156", "0.6338785", "0.63294864", "0.6315476", "0.63118845", "0.63059294", "0.6304708", "0.6291267", "0.62757117", "0.6230143", "0.6198605", "0.61878157", "0.6102107", "0.6099962", "0.60927933", "0.60619295", "0.6056393", "0.6048941" ]
0.6976282
0
Takes input ends of all feed pipes and feeds odd numbers starting from low until high both inclusive. in a round robin fashion. process ends by feeding 1 to all pipes. 1 is a sentinel value.
def distributor(ls_feed_pipe_open,low,high): def getNumber(low,high): i = low if i%2 == 0: #if i is even, then start from i+1 odd. i += 1 while i<=high: yield i i+=2 #no need to check for even numbers, so skip it here at begining yield -1 #when generator yields -1, it reached high, so terminate next_pipe = 0 number = getNumber(low,high) while True: msg = next(number) if msg == -1: #to check when generator reached high. break else: #feed pipes in a round robin fashion, #so that over time each generatePrime process experiences same load. ls_feed_pipe_open[next_pipe].send(msg) next_pipe += 1 if next_pipe == len(ls_feed_pipe_open): next_pipe = 0 for p in ls_feed_pipe_open: p.send(-1) #-1 is sentinel value for all generatePrime processs return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def odd():\n num = 0\n while True:\n yield num * (num & 1)\n num += 1", "def infinite_odd_generator():\n current = 1\n while True:\n yield current\n current = current + 2", "def input_pipe():\n x = ''\n while True:\n x = yield x\n yield # to keep the generator in lock step with input", "def odd_generator(limit):\n current = 1\n while current < limit:\n yield current\n current = current + 2", "def fission_pipes():\n def _pipes(num):\n return [base.BasePipe(i) for i in range(1, num + 1)]\n yield _pipes\n base.reset()", "def stage1(self):\n n = self.min\n while True:\n n, bin_ = self.sort_to_bin(n)\n if n is None:\n n = self.get_new_n(bin_)\n if n is None:\n break\n if self.viz:\n yield", "def testNumberPipeTwoLines(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('4')\n self.assertFalse(pl.inPipeline)\n self.assertEqual(4, pl.stdin)\n repl.runCommandLine('')\n self.assertEqual(4, pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)", "def generatePrime(ls_primes, feed_pipe,return_dict):\n local_primes = []\n while True:\n n = feed_pipe.recv()\n if n == -1: # sentinel given by distributor.\n break\n else:\n is_prime = True\n\n ##check for divisibility\n ## no need to check for 2 since all are odd numbers\n for prime in ls_primes[1:]:\n if n%prime == 0:\n is_prime = False\n break\n\n ##if the number is prime, append to global list\n if is_prime:\n local_primes.append(n)\n if len(local_primes) >0:\n return_dict[os.getpid()] = local_primes\n return return_dict\n return 0", "def io_pipe():\n r_fd, w_fd = os.pipe()\n with io.open(r_fd, 'rb', 0) as r, \\\n \t io.open(w_fd, 'wb', 0) as w:\n \tyield r, w", "def fission_pipe():\n yield base.BasePipe(1)\n base.reset()", "def run(self):\n assert len(self.elements) >= 2, \"In order flow, pipe needs 2 or more elements\"\n in_pipe = self.elements[0]\n other_pipes = self.elements[1:-1]\n out_pipe = self.elements[-1]\n\n self.make_assertions(in_pipe, other_pipes, out_pipe)\n\n for data in in_pipe.grasp():\n write = True\n\n for element in other_pipes:\n if isinstance(element, elements.DataPypElement):\n data = element.extend(data)\n elif isinstance(element, elements.FilterPypElement):\n if not element.stay(data):\n write = False\n break\n if write:\n out_pipe.extract(data)", "def pipemeter(cmd1, cmd2):\n\n proc1 = subprocess.Popen(cmd1, bufsize=0, shell=True, stdout=subprocess.PIPE)\n proc2 = subprocess.Popen(cmd2, bufsize=0, shell=True, stdin=subprocess.PIPE)\n bytes_piped = 0\n\n while True:\n data = proc1.stdout.read(CHUNKSIZE)\n length = len(data)\n if length == 0:\n break\n\n written = proc2.stdin.write(data)\n if written != length:\n raise RuntimeError(\"Write failed, wanted to write: {}, written={}\".format(length, written))\n\n bytes_piped += length\n\n proc1.stdout.close()\n proc2.stdin.close()\n\n return proc1.wait(), proc2.wait(), bytes_piped", "def run(self, data, rewrap=False, prefetch=0):\n if rewrap:\n data = [data]\n\n for pipe in self._pipes:\n pipe.feed(data)\n data = pipe\n else:\n iterable = self._prefetch_callable(data, prefetch) if prefetch else data\n for out_data in iterable:\n yield out_data", "def test_1_single_process():\n\n # ********************************************************\n # We will put this function in its own thread in test_1()\n def put_data_in_stream(stream):\n num_steps=5\n step_size=4\n for i in range(num_steps):\n data = list(range(i*step_size, (i+1)*step_size))\n stream.extend(data)\n run()\n return\n\n # ********************************************************\n # We will put these lines in a separate process in test_1()\n x = Stream('x')\n y = Stream('y')\n double(x, y)\n\n # *********************************************************\n # We will put these lines in a separate process in test_1().\n s = Stream(name='s')\n increment(y, s)\n print_stream(s, name=s.name)\n\n # *********************************************************\n # This function is executed in a separate thread in test_1().\n put_data_in_stream(x)", "def test_1_single_process():\n\n # ********************************************************\n # We will put this function in its own thread in test_1()\n def put_data_in_stream(stream):\n num_steps=5\n step_size=4\n for i in range(num_steps):\n data = list(range(i*step_size, (i+1)*step_size))\n stream.extend(data)\n run()\n return\n\n # ********************************************************\n # We will put these lines in a separate process in test_1()\n x = Stream('x')\n y = Stream('y')\n double(x, y)\n\n # *********************************************************\n # We will put these lines in a separate process in test_1().\n s = Stream(name='s')\n increment(y, s)\n print_stream(s, name=s.name)\n\n # *********************************************************\n # This function is executed in a separate thread in test_1().\n put_data_in_stream(x)", "def get_pipes(self, num = 1):\n if self.api is None:\n self.api = ChessAPI(self)\n self.api.start()\n return [self.api.create_pipe() for _ in range(num)]", "def testNumberPipeOneLine(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('4 |')\n self.assertAlmostEqual(4, pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)", "def task_10_generator_of_simple_numbers() -> Generator[int, None, None]:\n def is_num_simple(n):\n \"\"\"\n Return: True if n is a simple number or False if it is not\n \"\"\"\n for i in range(n, 1, -1):\n if n % i == 0 and i < n and n != 1:\n return False\n return True\n\n # generator part\n n = 2\n while n < 200:\n if is_num_simple(n):\n yield n\n n = n + 1", "def next_p2 (num):\n rval = 1\n while rval<num:\n rval <<= 1\n return rval", "def run_alternate(bandit1:SlotMachine, bandit2: SlotMachine, num_pulls_to_do):\n using = bandit1\n other = bandit2\n\n num_pulls_so_far = 0\n winning_count = 0\n while num_pulls_so_far < num_pulls_to_do:\n did_I_win = using.pull()\n num_pulls_so_far += 1\n winning_count += did_I_win # an alternative to if did_I_win: winning_count += 1\n\n temp = using\n using = other\n other = temp\n\n return winning_count", "def part_two(data: List[int]) -> int:\n app = IntCodeApplication(data, name=\"BOOST Part II\", flexible_memory=True)\n app.stdin.put(2)\n app.run()\n return app.stdout.get()", "def integers():\n i = 1\n while True:\n yield i\n i = i + 1", "def integers():\n i = 1\n while True:\n yield i\n i = i + 1", "def test_io_in_out_loop(self):\n self.l.output(conf_io=0x1, state_io=0x0)\n for i in range(10):\n state_d, state_io, count = self.l.output(state_io=0x1)\n self.assertTrue(state_io & 0x2)\n state_d, state_io, count = self.l.output(state_io=0x0)\n self.assertTrue(not state_io & 0x2)", "def incr(n=1):\n for i in xrange(n):\n pulse_hi(INCR)", "def next_p2(num):\n rval = 1\n while rval < num:\n rval <<= 1\n return rval", "def inout(input_, output_):\n while True:\n chunk = input_.read(1024)\n if not chunk:\n break\n output_.write(chunk)", "def split_into_steps(processes, input_limit=None, input_liquid_limit=None, belt_type='blue'):\n\tdef limit(item, input=False):\n\t\tif input and is_liquid(item) and input_liquid_limit is not None:\n\t\t\treturn input_liquid_limit\n\t\telif input and not is_liquid(item) and input_limit is not None:\n\t\t\treturn input_limit\n\t\telse:\n\t\t\treturn line_limit(item, belt_type)\n\n\tresults = []\n\tinputs = []\n\tfor process in processes.values():\n\t\tsteps = max(\n\t\t\t[\n\t\t\t\tthroughput / limit(item, process.is_input)\n\t\t\t\tfor item, throughput in process.inputs().items()\n\t\t\t] + [\n\t\t\t\tthroughput / limit(item, process.is_input)\n\t\t\t\tfor item, throughput in process.outputs().items()\n\t\t\t]\n\t\t)\n\n\t\t# note steps is fractional. by dividing original throughput by perfect number of steps,\n\t\t# each such step would be maximal - the problem is there would need to be a fractional\n\t\t# step at the end. So we put down floor(steps) maximal steps, followed by a step\n\t\t# scaled down to represent the fractional step.\n\t\twhole_steps, leftover = divmod(steps, 1)\n\t\tmaximal_step = process.rescale(process.throughput / steps)\n\t\tfractional_step = maximal_step.rescale(maximal_step.throughput * leftover)\n\n\t\tpart = [maximal_step] * whole_steps\n\t\tif leftover:\n\t\t\tpart.append(fractional_step)\n\n\t\tif process.is_input:\n\t\t\tinputs += part\n\t\telse:\n\t\t\tresults += part\n\n\treturn results, inputs", "def infinite_increment():\n i = 0\n while 1:\n yield i\n i += 1", "def _limit_helper(stream: Union[BinaryIO, Generator, List], limit: int) -> Generator:\n for value in stream:\n yield value\n if limit == 1:\n return\n else:\n limit = limit - 1 # FIXME" ]
[ "0.5789352", "0.5761657", "0.5741719", "0.564393", "0.52422297", "0.51917", "0.50984246", "0.50861835", "0.5054945", "0.50331575", "0.50145054", "0.5011917", "0.50026035", "0.49603093", "0.49603093", "0.4935153", "0.49219003", "0.49094537", "0.4875977", "0.4841634", "0.48397136", "0.4836972", "0.4836972", "0.4836903", "0.4833675", "0.4786137", "0.4757106", "0.47484192", "0.47403568", "0.47188297" ]
0.68875015
0
will take numbers sequentially from feed_pipe, verify if it is prime. any primes found will be returned as a dict to main process. dict contains only one key value pair. val is always a list.
def generatePrime(ls_primes, feed_pipe,return_dict): local_primes = [] while True: n = feed_pipe.recv() if n == -1: # sentinel given by distributor. break else: is_prime = True ##check for divisibility ## no need to check for 2 since all are odd numbers for prime in ls_primes[1:]: if n%prime == 0: is_prime = False break ##if the number is prime, append to global list if is_prime: local_primes.append(n) if len(local_primes) >0: return_dict[os.getpid()] = local_primes return return_dict return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def worker(nums, outdict):\n for n in nums:\n outdict[n] = primes2(n)", "def primes():\n D = {} # map composite integers to primes witnessing their compositeness\n q = 2 # first integer to test for primality\n while True:\n if q not in D:\n yield q # not marked composite, must be prime\n D[q*q] = [q] # first multiple of q not already marked\n else:\n for p in D[q]: # move each witness to its next multiple\n D.setdefault(p+q,[]).append(p)\n del D[q] # no longer need D[q], free memory\n q += 1", "def distributor(ls_feed_pipe_open,low,high):\n def getNumber(low,high):\n i = low\n if i%2 == 0: #if i is even, then start from i+1 odd.\n i += 1\n while i<=high:\n yield i\n i+=2 #no need to check for even numbers, so skip it here at begining\n yield -1 #when generator yields -1, it reached high, so terminate\n\n next_pipe = 0\n number = getNumber(low,high)\n while True:\n msg = next(number)\n if msg == -1: #to check when generator reached high.\n break\n else:\n #feed pipes in a round robin fashion,\n #so that over time each generatePrime process experiences same load.\n ls_feed_pipe_open[next_pipe].send(msg)\n next_pipe += 1\n if next_pipe == len(ls_feed_pipe_open):\n next_pipe = 0\n for p in ls_feed_pipe_open:\n p.send(-1) #-1 is sentinel value for all generatePrime processs\n return 0", "def getNums():\n key = allprimes() # Empty list for key is created\n\n # Runs code endlessly as no instruction was\n while True: # given to end the code\n num = input(\"Please enter a number:\") # Changed number to integer as it's outputted\n try: # as a string from input\n selected_num = int(num) # Asked for number with try function\n except:\n print(\"\\n Please input only a number!\") # Only accepts a number\n continue\n if selected_num > 100: # Limits number to 100 as that was limit\n print(\"Please only select a number up to 100.\")\n continue\n if selected_num in key:\n print(\"You have picked a prime number please select another number.\")\n continue\n for i, number in enumerate(key): # Iterator function to run through key\n complementary = selected_num - number # Initiated formula\n if complementary in key[i:]: # Obtained complimentary number if available\n print(str(selected_num) + \" = {} + {}\".format(number, complementary))\n break # Printed values as requested for assignment", "def allprimes():\n\n key = [] # The empty list is initiated\n\n for val in range(2, 101): # Set to obtain all prime values from 2 to 100\n if val >= 2: # They are then stored into the list\n for n in range(2, val): # The values have to be greater than 2 as 1 cannot\n if not (val % n): # be included\n break # Pulls all prime numbers by iterating through them\n else: # If a number does not obtain a remainder that means\n key.append(val) # it cannot be divisable by anything but it's own\n # number it is appended as a prime number\n return key", "def primes(count):\n\n prime_list = []\n num = 2\n\n while count > 0:\n\n if prime_checker(num):\n prime_list.append(num)\n count -= 1\n num += 1\n\n return prime_list", "def primes(count):\n\n prime_numbers = [2]\n next_num = 3 \n\n def is_prime(next_num):\n if next_num % 2 == 0:\n return False \n \n for i in range(3, next_num, 2):\n if next_num % i == 0:\n return False \n return True \n\n while count > len(prime_numbers): \n if is_prime(next_num): \n prime_numbers.append(next_num)\n next_num += 1\n\n return prime_numbers", "def test_prime_12(self):\n\t self.assertTrue(prime_generator(12), [2, 3, 5, 7, 11])", "def test_prime_10(self):\n\t self.assertTrue(prime_generator(10), [2, 3, 5, 7])", "def get_primes(self, startnum=2):\n i = startnum\n while True:\n if self.is_prime(i):\n yield i\n i += 1", "def primes():\n yield 1\n primes = []\n for n in itertools.count(2):\n if not any(n % p == 0 for p in primes):\n # No divisor found among previous primes\n yield n\n primes.append(n)", "def prime_generator():\r\n # map of composites (key) with at least one prime factor in list as value\r\n D = {}\r\n\r\n # first number to test if prime\r\n q = 2\r\n\r\n while 1:\r\n if q not in D:\r\n # next prime found\r\n yield q\r\n # add it's square as a composite to D\r\n D[q**2] = [q]\r\n else:\r\n # update dictionary entries based on composite and its listed primes\r\n for p in D[q]:\r\n D.setdefault(p+q, []).append(p)\r\n del D[q]\r\n q += 1", "def primes():\r\n try:\r\n args = request.args\r\n start_num, end_num = validate_request(args)\r\n # cache key\r\n key = f'primes:{start_num}:{end_num}'\r\n rv = cache.get(key)\r\n if rv is None: # not in cache\r\n job = get_primes_list.queue(start_num, end_num)\r\n print(job.get_id())\r\n cache.set(key, job.get_id(), timeout=3600)\r\n return jsonify(job.get_id()), 200\r\n else:\r\n return jsonify(rv), 200\r\n except Exception as e:\r\n raise InvalidUsage(\"Error Processing request {}\".format(e))", "def gen_primes():\n # Maps composites to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\"\n # indefinitely, but only as long as required by the current\n # number being tested.\n\n D = {}\n\n # The running integer that's checked for primeness\n\n q = 2\n\n while True:\n if q not in D:\n # q is a new prime.\n # Yield it and mark its first multiple that isn't\n # already marked in previous iterations\n yield q\n D[q * q] = [q]\n else:\n # q is composite. D[q] is the list of primes that\n # divide it. Since we've reached q, we no longer\n # need it in the map, but we'll mark the next\n # multiples of its witnesses to prepare for larger\n # numbers\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n\n q += 1", "def primes():\n yield 2\n found_primes = [2]\n a = 3\n while True:\n for p in found_primes:\n if p**2 > a:\n found_primes.append(a)\n yield a\n a += 2\n break\n elif a % p == 0:\n a += 2\n break", "def find_prime(num):\n\n if not isinstance(num, int) or isinstance(num, bool):\n raise TypeError(\"number input must be an integer\")\n\n if num <= 1:\n raise ValueError(\"number must be greater than 1\")\n\n pri_num = [2]\n\n # The code below will test if every iteration of 'var' is a prime number\n for var in range(2, num + 1):\n res = 0\n for var2 in pri_num:\n if var == 2:\n break\n elif (var % var2) == 0:\n break\n elif (var2 == pri_num[-1]):\n res = var\n if res:\n pri_num.append(res)\n print(pri_num)\n\n return 0", "def gen_primes():\n\n # Maps composites to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\"\n # indefinitely, but only as long as required by the current number\n # being tested\n\n D = {}\n\n # The runing integer that is checked for primeness\n q = 2\n\n while True:\n if q not in D:\n # q is a new prime.\n # Yield it and mark its first multiple that isn't\n # already marked in previous iterations\n\n yield q\n D[q * q] = [q]\n else:\n # q is composite. D[q] is the list of primes that\n # divide it. Since we've reached q, we no longer\n # need it in the map, but we'll mark the next multiples\n # of its witnesses to prepare for larger numbers\n\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n\n q += 1", "def gen_primes():\n\n # Maps composites (=non-primes) to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\" indefinitely,\n # but only as long as required by the current number being tested.\n D = {}\n\n q = 1 # the running integer that is checked for primeness\n while (q := q+1):\n if q not in D:\n # q is a new prime. Yield it and mark its first multiple that is\n # not already marked in previous iterations\n yield q\n D[q*q] = [q]\n else:\n # q is composite. D[q] is the list of primes that divide it. Since\n # we have reached q, we no longer need it in the map, but we will\n # mark the next multiples of its witnesses to prepare for larger\n # numbers\n for p in D[q]:\n D.setdefault(p+q, []).append(p)\n del D[q]", "def test_prime_2(self):\n\t self.assertTrue(prime_generator(2), [2])", "def primes():\n yield 2\n found = []\n for i in itertools.count(start=3, step=2):\n for p in found:\n if i % p == 0:\n break\n else:\n yield i\n found.append(i)", "def check_prime_worker(job_queue):\n while True:\n # your code here\n # 1. get next available number from queue\n try:\n number = job_queue.get(block=False)\n print(f\"Process {current_process()} checks number {number}\")\n except Empty:\n break\n\n # 2. print the number and whether it\n # is prime or not, use is_prime()\n if is_prime(number):\n print(f\"{number} is prime\")\n else:\n print(f\"{number} is not prime\")\n\n # 3. use try/except to catch Empty exception\n # and quit the loop if no number remains in queue\n # done in step 1", "def primeIterator(no = 0,lessThan = None ):\r\n \r\n prmd = {2:1,3:2}\r\n sqrtn = 2\r\n l = 1\r\n count = 0\r\n #or (no==-1 and not lessThan) l < no or:\r\n print(\"no\", no)\r\n while ((no!=0 and count < no) or ( (no==0) and (lessThan and l<lessThan ) or (not lessThan ) ))and (l<4) :\r\n if l in prmd:\r\n count += 1\r\n yield l\r\n l+=1\r\n l=5\r\n add = 2\r\n \r\n while (no!=0 and count < no) or ( (no==0) and ( (lessThan and l<lessThan ) or (not lessThan )) ) : #check only 6n-1 and 6n+1\r\n if l > sqrtn**2:\r\n sqrtn = l**0.5\r\n for i in prmd:\r\n if i > sqrtn:\r\n prmd[l] = len(prmd)\r\n add = 2 if add==4 else 2\r\n count +=1\r\n yield l\r\n break\r\n if l%i ==0 : \r\n break\r\n l+=add", "def test_15(self):\n\t self.assertTrue(prime_generator(15), [2, 3, 5, 7, 11, 13])", "def getPrime(bits):\n\twhile(True) :\n\t\t# on continue a tirer des nombres tant que l'on n'a pas trouve de nombre premier\n\t\tp = getrandbits(bits)\n\t\tif(miller_rabin(p,100)) :\n\t\t\treturn p", "def prime_generator() -> int:\n \n #Start with the first prime.\n counter = count(2)\n candidate = next(counter)\n cache: list = [candidate]\n yield candidate\n \n # Set a flag.\n divisible = False\n while True:\n candidate = next(counter)\n # Check if the candidate is prime.\n for number in cache:\n # If number is greater than the squareroot of candidate, we are done.\n if number * number > candidate:\n break\n # If number divides candidate, candidate is not prime.\n if candidate % number == 0:\n divisible = True\n break\n # If is is prime, add it to the list.\n if not divisible:\n cache.append(candidate)\n yield candidate\n # Reset the flag.\n divisible = False", "def getPrime(self, group=17):\n default_group = 17\n\n primes = {\n 5: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF,\n 14: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF,\n 15: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF,\n 16: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF,\n 17:\n 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF,\n 18:\n 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF\n }\n\n if group in primes.keys():\n return primes[group]\n else:\n print(\"Error: No prime with group %i. Using default.\" % group)\n return primes[default_group]", "def primes(count):\n\n # store lst of prime numbers to be returned @ end\n primes = []\n\n # set up prime # generator from other fn starting at 2\n num = 2\n\n while count > 0:\n\n # check if prime\n if is_prime(num):\n # if so, append to primes lst\n primes.append(num)\n # decrement by 1; used to keep track of how many primes enter lst\n count -= 1\n\n # check next number if prime, etc.\n num += 1\n\n # return final lst\n return primes", "def primes(count):\n\n prime_nums = [2]\n prime = 3\n\n for i in range(1, count):\n\n while prime not in [3, 5, 7] and (\n prime % 3 == 0 or prime % 5 == 0 or prime % 7 == 0\n ):\n prime += 2\n\n prime_nums.append(prime)\n prime += 2\n\n return prime_nums", "def prime_numbers(x: int):\n A = [True] * x\n A[0] = A[1] = False\n for i in range(2, x, 1):\n if is_simple_number(i):\n for m in range(2 * i, x, i):\n A[m] = False\n n = 0\n for k in range(x):\n print(k, \"is prime\" if A[k] else \"is not prime\")\n if A[k]:\n n += 1\n\n B = [0] * n\n n = 0\n for k in range(x):\n if A[k]:\n B[n] = k\n n += 1\n return B", "def primes(count):\n\n primes = []\n number_to_check = 2\n\n while len(primes) < count:\n # check if number is prime\n # if prime, add to list\n # if not prime, move on\n # increment number to check\n \n is_prime = True\n\n for num in range(2,number_to_check):\n if number_to_check % num == 0 and num != number_to_check:\n is_prime = False\n break\n \n if is_prime == True:\n primes.append(number_to_check)\n\n number_to_check += 1\n\n return primes" ]
[ "0.62043977", "0.60420763", "0.6040586", "0.59742284", "0.5961007", "0.59362507", "0.5920342", "0.5811969", "0.5799837", "0.57528454", "0.5740674", "0.5735719", "0.5734782", "0.5734576", "0.5731913", "0.5719384", "0.5716878", "0.5673636", "0.5661564", "0.56440914", "0.5617547", "0.5578173", "0.5564181", "0.5558922", "0.55565923", "0.5552255", "0.555117", "0.5508194", "0.5504439", "0.55032545" ]
0.78329885
0
Reject unsuported chain parts
def _select_simple_chainparts(chain_parts): for cp in chain_parts: if reject_substr_res.search(cp['chainPartName']): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_chain():", "def reject(self):\n pass", "def test_blind_sig_chain_wrong_intermediary(self): # pylint: disable=too-many-locals\n\n test_levels = 4\n msg = os.urandom(1024)\n wrong_level = 2\n\n ca = ECCBlind()\n signer_obj = ca\n fake_intermediary = ECCBlind()\n\n output = bytearray()\n\n for level in range(test_levels):\n if not level:\n output.extend(ca.pubkey())\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n child_obj = ECCBlind()\n point_r = signer_obj.signer_init()\n pubkey = child_obj.pubkey()\n\n if level == test_levels - 1:\n msg_blinded = requester_obj.create_signing_request(point_r,\n msg)\n else:\n msg_blinded = requester_obj.create_signing_request(point_r,\n pubkey)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n if level == wrong_level:\n output.extend(fake_intermediary.pubkey())\n elif level != test_levels - 1:\n output.extend(pubkey)\n output.extend(signature)\n signer_obj = child_obj\n verifychain = ECCBlindChain(ca=ca.pubkey(), chain=str(output))\n self.assertFalse(verifychain.verify(msg, 1))", "def _filter_committees_failing_weak_representation(self, profile: list[set[int]], committees: list[list[int]]) -> list[list[int]]:\n unique_approval_scores = self._compute_unique_approval_scores(profile)\n parties_deserving_representation = {party for party in self.parties if unique_approval_scores[party] >= self.n / self.k}\n possible_committees = [committee for committee in committees if parties_deserving_representation.issubset(set(committee))]\n return possible_committees", "def test_exact_nonsupercontrolled_decompose(self):\n with self.assertWarns(UserWarning, msg=\"Supposed to warn when basis non-supercontrolled\"):\n TwoQubitBasisDecomposer(UnitaryGate(Ud(np.pi / 4, 0.2, 0.1)))", "def test_blind_sig_chain_wrong_msg(self): # pylint: disable=too-many-locals\n\n test_levels = 4\n msg = os.urandom(1024)\n fake_msg = os.urandom(1024)\n\n ca = ECCBlind()\n signer_obj = ca\n\n output = bytearray()\n\n for level in range(test_levels):\n if not level:\n output.extend(ca.pubkey())\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n child_obj = ECCBlind()\n point_r = signer_obj.signer_init()\n pubkey = child_obj.pubkey()\n\n if level == test_levels - 1:\n msg_blinded = requester_obj.create_signing_request(point_r,\n msg)\n else:\n msg_blinded = requester_obj.create_signing_request(point_r,\n pubkey)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n if level != test_levels - 1:\n output.extend(pubkey)\n output.extend(signature)\n signer_obj = child_obj\n verifychain = ECCBlindChain(ca=ca.pubkey(), chain=str(output))\n self.assertFalse(verifychain.verify(fake_msg, 1))", "def valid_chain(chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n #print(last_block)\n #print(block)\n #print(\"\\n-----------\\n\")\n # Check that the hash of the block is correct\n if block['previous_hash'] != hash(last_block):\n return False\n\n # Check that the Proof of Work is correct\n #Delete the reward transaction\n transactions = block['transactions'][:-1]\n # Need to make sure that the dictionary is ordered. Otherwise we'll get a different hash\n transaction_elements = ['sender_address', 'recipient_address', 'value']\n transactions = [OrderedDict((k, transaction[k]) for k in transaction_elements) for transaction in transactions]\n\n if not valid_proof(transactions, block['previous_hash'], block['nonce'], MINING_DIFFICULTY):\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def squeeze_accept(partition):\n Write a function that\n - Sort districts by most Democratic heavy and most Republican heavy\n\n - Assign a base value of competitiveness for each district\n - Run chain, accept only if districts satisfy values under or order\n \"\"\"\n\n#--- CONSTRAINTS\n\n\"\"\"", "def remove_incompatible_operations(pipelines):\n\n def find_duplicates(pipelines):\n for idx in range(len(pipelines)):\n for idx_ in range(idx + 1, len(pipelines)):\n if pipelines[idx] == pipelines[idx_]:\n return idx\n return -1\n\n\n def _remove_illegal_combination(pipelines, combination):\n illegal_pipes = []\n pipelines_ = []\n for idx, pipeline in enumerate(pipelines):\n combination_ = list(set.intersection(set(pipeline.keys()), set(combination)))\n actives = [pipeline[key] != None for key in pipeline if key in combination_]\n\n if sum(actives) > 1:\n illegal_pipes.append(idx) # Store the index of bad combination\n for param in combination_: # Generate substituting legal combinations\n if pipeline[param] != None: # we need to make new pipe\n pipeline_ = pipeline.copy()\n for param_ in combination_: # Set ALL conflicting parameters to None\n pipeline_[param_] = None\n pipeline_[param] = pipeline[param] # Set current parameter back to original value\n pipelines_.append(pipeline_)\n\n new_pipelines = [i for j, i in enumerate(pipelines) if j not in illegal_pipes]\n # new_pipelines.extend(pipelines_)\n return new_pipelines, pipelines_\n\n illegal_combinations = [['BASELINE', 'MSC', 'EMSC', 'RNV', 'SNV', 'LSNV'],\n ['SMOOTH', 'SAVGOL']]\n\n for combination in illegal_combinations:\n pipelines, new_pipes = _remove_illegal_combination(pipelines, combination)\n\n pipelines.extend(new_pipes)\n pipelines_set = {json.dumps(pipeline, sort_keys=True) for pipeline in pipelines}\n pipelines = [json.loads(item) for item in pipelines_set]\n\n\n return pipelines", "def extract_mixed_chains(raw_chains):\n chain_isolation_regex = re.compile(r'^\\w+\\s+\\d+\\s+(.*)')\n\n mixed_chains = [\n re.search(chain_isolation_regex,\n raw_chain).group(1).strip() # remove whitespace\n for raw_chain in raw_chains\n ]\n return mixed_chains", "def test_unrequired_chain_delete(self):\n self.txn.store_delete(\"felix-c\")\n self.assertEqual(self.txn.affected_chains, set([\"felix-c\"]))\n self.assertEqual(self.txn.chains_to_stub_out, set([]))\n self.assertEqual(self.txn.chains_to_delete, set([\"felix-c\"]))\n self.assertEqual(self.txn.referenced_chains,\n set([\"felix-b\", \"felix-stub\"]))\n self.assertEqual(\n self.txn.prog_chains,\n {\n \"felix-a\": [],\n \"felix-b\": [],\n })\n self.assertEqual(self.txn.required_chns,\n {\"felix-a\": set([\"felix-b\", \"felix-stub\"])})\n self.assertEqual(self.txn.requiring_chns,\n {\"felix-b\": set([\"felix-a\"]),\n \"felix-stub\": set([\"felix-a\"])})", "def valid_chain(chain):\n\n for i in range(len(chain) - 1):\n parent_edge = chain[i]\n child_edge = chain[i + 1]\n # verify that the child of the parent edge (second node) matches the parent of the child edge (first node)\n if not parent_edge[1] == child_edge[0]:\n # if this isn't\n return False\n return True", "def resolve_conflicts(self):\n neighbours = self.nodes\n new_chain = None\n # Look only for chains longer than this\n max_length = len(self.chain)\n # Get and verify the chains from all the nodes in the network\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n # Check if chain is longer and valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n # Replace this chain if a longer valid chain is discovered\n if new_chain:\n self.chain = new_chain\n return True\n return False", "def validate_blockchain(chain):\n assert isinstance(chain, list)\n\n for hook in chain[::-1]:\n pass", "def valid_chain(self, chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n print(f'{last_block}')\n print(f'{block}')\n print(\"\\n----------------\\n\")\n # verify hash integrity\n if block['previous_hash'] != self.hash(last_block):\n return False\n\n # verify proof integrity\n if not self.valid_proof(last_block['proof'], block['proof']):\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def route_rejected(self, prefix, next_hop, as_path):", "def test_fail_missing_signature_fragment_underflow(self):\n # Adjust bundle balance, since we will also remove the change\n # transaction.\n self.bundle[0].value += self.bundle[-1].value\n\n # Remove the last input's second signature fragment, and the change\n # transaction.\n del self.bundle.transactions[-2:]\n for txn in self.bundle:\n txn.last_index -= 2\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Transaction 4 has invalid signature (using 2 fragments).',\n ],\n )", "def resolve_conflicts(self):\n neighbours = self.nodes\n new_chain = None\n\n # We are only looking for chains longer that ours\n max_length = len(self.chain)\n\n # Checking for the length of each chain in our network\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n if length > max_length and self.validate_chain(chain):\n new_chain = chain\n max_length = length\n\n # Replace our chain with a new, longer, valid chain in our network (if present)\n if new_chain:\n self.chain = new_chain\n return True\n\n return False", "def chain(self):\n return ValueError(\"chain function not set.\")", "def chain_rangeValid(start, stop):\r\n for i in range(start, stop):\r\n chain = chain_153(i)\r\n if len(chain) > 1 or chain[0] == 153:\r\n for j in chain_153(i):\r\n print(j)", "def resolve_conflicts(self):\n\n neighbours = self.nodes\n new_chain = None\n\n # We're only looking for chains longer than ours\n max_length = len(self.chain)\n\n # Grab and verify the chains from all the nodes in our network\n for node in neighbours:\n response = requests.get(f'http://{node}:5000/chain')\n\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n # Check if the length is longer and the chain is valid\n if length > max_length:\n max_length = length\n new_chain = chain\n\n # Replace our chain if we discovered a new, valid chain longer than ours\n if new_chain:\n self.chain = new_chain\n return True\n\n return False", "def test_blind_sig_chain_wrong_ca(self): # pylint: disable=too-many-locals\n\n test_levels = 4\n msg = os.urandom(1024)\n\n ca = ECCBlind()\n fake_ca = ECCBlind()\n signer_obj = fake_ca\n\n output = bytearray()\n\n for level in range(test_levels):\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n child_obj = ECCBlind()\n if not level:\n # unlisted CA, but a syntactically valid pubkey\n output.extend(fake_ca.pubkey())\n point_r = signer_obj.signer_init()\n pubkey = child_obj.pubkey()\n\n if level == test_levels - 1:\n msg_blinded = requester_obj.create_signing_request(point_r,\n msg)\n else:\n msg_blinded = requester_obj.create_signing_request(point_r,\n pubkey)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n if level != test_levels - 1:\n output.extend(pubkey)\n output.extend(signature)\n signer_obj = child_obj\n verifychain = ECCBlindChain(ca=ca.pubkey(), chain=str(output))\n self.assertFalse(verifychain.verify(msg, 1))", "def valid_chain(self, chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n # print(f'{last_block}')\n # print(f'{block}')\n # print(\"\\n-----------\\n\")\n # Check that the hash of the block is correct\n last_block_hash = self.hash(last_block)\n if block['previous_hash'] != self.hash(last_block):\n return False\n\n # Check that the Proof of Work is correct\n if not self.valid_proof(last_block['proof'], block['proof'], last_block_hash):\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def valid_chain(self, chain):\n\n last_block = chain[0]\n current_index = 1\n \n while current_index < len(chain):\n block = chain[current_index]\n # Check correctness of last block's hash\n if block['previous_hash'] != self.hash(last_block): \n return False\n # Check correctness of proof-of-work\n if not self.valid_proof(last_block['proof'], block['proof'], block['previous_hash']):\n return False\n last_block = block \n current_index += 1\n\n return True", "def valid_chain(self, chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n print(last_block)\n print(block)\n print(\"\\n--------\\n\")\n \n #check that the hash of the previous block is correct\n\n if block[\"previous_hash\"] != self.hash(last_block):\n print(\"Previous hash does not match\")\n return False\n\n if not self.valid_proof(block):\n print(\"Block proof of work is invalid\")\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def resolve_conflicts(self):\n neighbors = self.nodes\n print(neighbors)\n new_chain = None\n\n # We only care about chains longer than our own\n max_length = len(self.chain)\n\n # Get and verify all neighbors chains\n for node in neighbors:\n try:\n url = 'http://{}/chain'.format(node)\n response = requests.get(url)\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n # Check if longer and chain is valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n except:\n return False\n # Replace our chain if necessary\n if new_chain:\n self.chain = new_chain\n self._write_chain()\n return True\n\n return False", "def resolve_conflicts(self):\n\n neighbours = self.nodes\n new_chain = None\n\n # We're only looking for chains longer than ours\n max_length = len(self.chain)\n\n # Grab and verify the chains from all the nodes in our network\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n # Check if the length is longer and the chain is valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n\n # Replace our chain if we discovered a new, valid chain longer than ours\n if new_chain:\n self.rewrite_chain(new_chain)\n return True\n\n return False", "def test_sort_chain_two_content_3():\n chain = N.Node(2, N.Node(2))\n result = A8.sort_chain(chain)\n\n assert result.data <= result.next.data, \"sort_chain returned chain out of order given input chain size 2 with dupicates\"", "def skip_sub_components(self, reason):\n pass", "def test_sort_chain_multiple_content_decreasing():\n n = 17\n data = range(n)\n chain = None\n for item in data:\n chain = N.Node(item, chain)\n\n result = A8.sort_chain(chain)\n\n walker = result\n prev = None\n seen = [False]*n\n for i in range(n):\n assert walker.data in data, \"sort_chain created extraneous data {} given chain with values decreasing\".format(walker.data)\n seen[walker.data] = True\n if prev is not None:\n assert prev.data <= walker.data, \"sort_chain placed {} before {} given chain with values decreasing\".format(prev.data, walker.data)\n prev = walker\n walker = walker.next\n\n for i,b in enumerate(seen):\n assert b, \"sort_chain omitted data value {} from returned chain given chain with values decreasing\".format(i)" ]
[ "0.67994744", "0.5958347", "0.5583729", "0.5497606", "0.54252285", "0.54134977", "0.53917265", "0.53915894", "0.53751105", "0.53494143", "0.53485787", "0.5343248", "0.5343216", "0.53384286", "0.5323294", "0.5297185", "0.5291411", "0.52887464", "0.5273759", "0.5243703", "0.52368015", "0.5216658", "0.5214974", "0.52129567", "0.5207504", "0.52067095", "0.52059996", "0.5198885", "0.51921374", "0.51851195" ]
0.6198813
1
Marshal information deom the selected chainParts to create a 'simple_partition' label.
def _make_simple_partition_label(chain_dict): cps = chain_dict['chainParts'] if not (_select_simple_chainparts(cps)): raise NotImplementedError( 'chain fails substring selection: not "simple": %s' % ( chain_dict['chainName'])) label = 'simplepartition([' for cp in cps: smcstr = str(cp['smc']) if smcstr == 'nosmc': smcstr = '' for i in range(int(cp['multiplicity'])): # condition_str = '(%set,%s,%s)' % (str(cp['threshold']), # str(cp['etaRange']), # smcstr,) condition_str = '(%set,%s' % (str(cp['threshold']), str(cp['etaRange']),) if smcstr: condition_str += ',%s)' else: condition_str += ')' label += condition_str label += '])' return label
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_partitionsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'partitionsTest'\n\n \n\n return \"\"\"\n partgen(\n [(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def provide_partition_info(self):\n self.partition_info = True", "def __str__(self) -> str:\n return str(self.my_partition)", "def _make_simple_label(chain_parts):\n \n if not _select_simple_chainparts(chain_parts):\n msg = 'Jet Configuration error: '\\\n 'chain fails substring selection: not \"simple\" '\n\n raise NotImplementedError(msg)\n \n label = 'simple(['\n for cp in chain_parts:\n smcstr = str(cp['smc'])\n jvtstr = str(cp['jvt'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr: # Run 2 chains have \"INF\" in the SMC substring\n condition_str += ',%s)' % smcstr.replace('INF','')\n elif jvtstr:\n condition_str += ',%s)' % jvtstr\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def FormatPartition(self, partition):\n\n fstab = self.fstab\n if fstab:\n p = fstab[partition]\n self.script.append('format(\"%s\", \"%s\", %s, \"%s\", \"%s\");' %\n (p.fs_type, common.PARTITION_TYPES[p.fs_type],\n self._GetSlotSuffixDeviceForEntry(p),\n p.length, p.mount_point))", "def __str__(self):\n \n return \"Part ID: %s, %s\" % (self.part_id, self.name)", "def create(self, disk):\n logging.info('Adding type %d partition to disk image: %s' % (self.type, disk.filename))\n run_cmd('parted', '--script', '--', disk.filename, 'mkpart', 'primary', self.parted_fstype(), self.begin, self.end)", "def print_partition(t, par=[]):\n\n if is_leaf(t):\n if label(t) == True:\n print(' + '.join(par))\n else:\n left, right = branches(t)[0], branches(t)[1]\n print_partition(left, [str(label(t))] + par)\n print_partition(right, par)\n #print(\"total partitions: \", str(count_leaves(t)))", "def partid2nids(self, partid, ntype=...):\n ...", "def partid2nids(self, partid, ntype=...):\n ...", "def _setPartedPartition(self, partition):\n log_method_call(self, self.name)\n\n if partition is not None and not isinstance(partition, parted.Partition):\n raise ValueError(\"partition must be None or a parted.Partition instance\")\n\n log.debug(\"device %s new partedPartition %s\", self.name, partition)\n self._partedPartition = partition\n self.updateName()", "def dump_parts(self, io):\n\n # XXX refactor with Tempita\n title = \"Parts created by the docutils writer '%s'\" % self.strategy.name\n io.say(title + os.linesep)\n io.say(len(title) * '-')\n io.say(2 * os.linesep)\n io.say('Part keys: ' + 2 * os.linesep)\n\n parts = self.publish_parts(io)\n io.say(os.linesep.join(sorted(parts.keys())))\n io.say(2 * os.linesep)\n for part in parts:\n io.say(\"Value of part '%s':%s\" % (part, os.linesep))\n io.say(parts[part].encode('utf-8') + os.linesep)\n io.say(80*'-'+os.linesep)\n io.say(os.linesep)", "def _make_simple_comb_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n simple_strs = []\n\n for cp in cps:\n print(cp)\n simple_strs.append(_make_simple_label([cp]))\n\n label = 'combgen([(%d)]' % len(cps)\n for s in simple_strs:\n label += ' %s ' % s\n label += ')'\n return label", "def partid(self): # -> Unknown:\n ...", "def get_partition():\n if selection is None:\n warning(\"You need to pick something first.\")\n return\n if not selection.obj_type in ['partition']:\n warning(\"You need to partition the selection first.\")\n return\n res = askItems([['property',[1]]],\n caption='Partition property')\n if res:\n prop = res['property']\n getPartition(selection,prop)\n highlightPartitions(selection)", "def _wrap_partitions(self, partitions):\n return [\n self.partition_type(object_id, length, width, ip)\n for (object_id, length, width, ip) in zip(*[iter(partitions)] * 4)\n ]", "def bootpart(disks):\n return path_to_partition(disks, '/boot/foo')", "def choose_partition():\n # Ask the user wether the partitions should be taken from the original partitions, or from the home-made partitions\n file_name = selector([\"The original partition given by the instructor\", \"The homemade partition file\"], [\"ORIGINAL\", \"HOMEMADE\"])\n\n # Open the corresponding file\n if file_name == \"1\" or file_name == \"ORIGINAL\":\n file = open(\"./assets/partitions.txt\", \"r\")\n elif file_name == \"2\" or file_name == \"HOMEMADE\":\n file = open(\"./assets/homemade_partitions.txt\", \"r\")\n\n skip_lines(-1)\n\n # Print all song's names in the partitions\n lines = file.readlines()\n file.close()\n for i in range(0, len(lines), 2):\n print(lines[i][:-1])\n\n # Ask the user to choose for a song\n song_index = choose_number(len(lines) / 2)\n\n # Get the corresponding song's partition and convert notes to Note instances\n partition = lines[song_index * 2 - 1][:-1].replace(' ', '')\n raw_notes = get_notes_from_line(partition)\n parsed_notes = [Note(note) for note in raw_notes]\n return parsed_notes", "def partid2nids(self, partid, ntype): # -> None:\n ...", "def create(data):\n \n return Part(\n part_id = data['part_num'],\n category_id = data['part_cat_id'],\n external_ids = data.get('external_ids', {}),\n name = data['name'],\n year_from = data.get('year_from', None),\n year_to = data.get('year_to', None),\n url = data.get('part_url', None),\n img_url = data.get('part_img_url', None),\n print_of = data.get('print_of', None),\n prints = data.get('prints', []),\n molds = data.get('molds', []),\n alternates = data.get('alternates', []))", "def _wrap_partitions(self, partitions):\n return [\n self.partition_type(future, length, width, ip)\n for (future, length, width, ip) in zip(*[iter(partitions)] * 4)\n ]", "def parse_part(self):\n parts = []\n for part in re.split(r'\\*\\*\\* ([A-Z- ]+) \\*\\*\\*', self.hand_file): # return [ 'part1', 'splitter1', 'part2',..\n parts.append(part)\n\n for i in range(0, len(parts)):\n if i == 0:\n self.part_dict['HEADER'] = parts[i]\n if i % 2 != 0: # number is odd\n self.part_dict[parts[i]] = parts[i + 1]", "def preCommitFixup(self):\n log_method_call(self, self.name)\n if not self.exists or not self.disklabelSupported:\n return\n\n # find the correct partition on the original parted.Disk since the\n # name/number we're now using may no longer match\n _disklabel = self.disk.originalFormat\n\n if self.isExtended:\n # getPartitionBySector doesn't work on extended partitions\n _partition = _disklabel.extendedPartition\n log.debug(\"extended lookup found partition %s\",\n devicePathToName(getattr(_partition, \"path\", None) or \"(none)\"))\n else:\n # lookup the partition by sector to avoid the renumbering\n # nonsense entirely\n _sector = self.partedPartition.geometry.start\n _partition = _disklabel.partedDisk.getPartitionBySector(_sector)\n log.debug(\"sector-based lookup found partition %s\",\n devicePathToName(getattr(_partition, \"path\", None) or \"(none)\"))\n\n self.partedPartition = _partition", "def prep_disk_for_formatting(disk=None):\n disk['Format Warnings'] = '\\n'\n width = len(str(len(disk['Partitions'])))\n\n # Bail early\n if disk is None:\n raise Exception('Disk not provided.')\n\n # Set boot method and partition table type\n disk['Use GPT'] = True\n if (get_boot_mode() == 'UEFI'):\n if (not ask(\"Setup Windows to use UEFI booting?\")):\n disk['Use GPT'] = False\n else:\n if (ask(\"Setup Windows to use BIOS/Legacy booting?\")):\n disk['Use GPT'] = False\n\n # Set Display and Warning Strings\n if len(disk['Partitions']) == 0:\n disk['Format Warnings'] += 'No partitions found\\n'\n for partition in disk['Partitions']:\n display = '{size} {fs}'.format(\n num = partition['Number'],\n width = width,\n size = partition['Size'],\n fs = partition['FileSystem'])\n\n if is_bad_partition(partition):\n # Set display string using partition description & OS type\n display += '\\t\\t{q}{name}{q}\\t{desc} ({os})'.format(\n display = display,\n q = '\"' if partition['Name'] != '' else '',\n name = partition['Name'],\n desc = partition['Description'],\n os = partition['OS'])\n else:\n # List space used instead of partition description & OS type\n display += ' (Used: {used})\\t{q}{name}{q}'.format(\n used = partition['Used Space'],\n q = '\"' if partition['Name'] != '' else '',\n name = partition['Name'])\n # For all partitions\n partition['Display String'] = display", "def parse_partition(partition):\n partition_data = partition.split(\":\")\n if len(partition_data) != 2:\n raise ValueError(\"Partitions line parts format is 'size:mount'\")\n return partition_data", "def nid2partid(self, nids, ntype): # -> None:\n ...", "def nid2partid(self, nids, ntype=...):\n ...", "def nid2partid(self, nids, ntype=...):\n ...", "def usableparts(self):\n # First get the partition type-id for all hard disk partitions\n partid = {}\n for pline in self.fdiskl():\n partid[pline[0]] = pline[4]\n ups = {}\n for s in self.xlist(\"get-blkinfo\")[1]:\n mo = re.match(r'(/dev/[^:]*):(?: LABEL=\"([^\"]*)\")?(?:'\n ' UUID=\"([^\"]*)\")?(?: TYPE=\"([^\"]*)\")?', s)\n if mo:\n dev, label, uuid, fstype = mo.groups()\n if fstype in (None, \"linux_raid_member\", \"LVM2_member\"):\n continue\n if dev.startswith(\"/dev/loop\"):\n continue\n rem = None\n if dev.startswith(\"/dev/sd\"):\n if partid.get(dev) == \"fd\":\n # This test seems to be necessary because blkid\n # sometimes returns an fs-type, rather than\n # linux_raid_member\", for the the first device\n # in a formatted raid array\n continue\n rem = self.xlist(\"removable\", dev)[1][0].strip() == \"1\"\n ups[dev] = (fstype, label, uuid, rem)\n return ups", "def partition_pair_to_spart(part_pair):\n part_star = list(part_pair[0])\n part_circ_star = list(part_pair[1])\n add_zeros = len(part_circ_star) - len(part_star)\n if add_zeros != 0:\n new_star = part_star + [0]\n else:\n new_star = part_star\n diff_list = [a - b for a, b in zip(part_circ_star, new_star)]\n fermionic_parts = []\n bosonic_parts = []\n for k in range(len(diff_list)):\n if diff_list[k] == 0:\n bosonic_parts += [part_circ_star[k]]\n elif diff_list[k] == 1:\n fermionic_parts += [new_star[k]]\n else:\n raise Exception(\"This should not happen.\")\n # sparts = Superpartitions()\n return _Superpartitions([fermionic_parts, bosonic_parts])" ]
[ "0.65343463", "0.53522164", "0.52719545", "0.52471644", "0.51894504", "0.5177348", "0.5096431", "0.5087608", "0.4973218", "0.4973218", "0.4970022", "0.4951215", "0.49493623", "0.49468526", "0.49310818", "0.492718", "0.49157664", "0.49112102", "0.49103266", "0.48851392", "0.4877146", "0.48584715", "0.48566926", "0.48450437", "0.48346877", "0.482512", "0.4820094", "0.4820094", "0.47875524", "0.47756955" ]
0.6581646
0
Marshal information from the selected chainParts to create a vbenf label. Use a Reducer for elimination of unusable jets
def _make_vbenf_label(chain_parts): # toy label for development: run simple and dijet independently. # simple makes Et cuts on two jets. Independently (sharing possible) # of jets choosean by simple, the dijet # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6 assert len(chain_parts) == 1 scenario = chain_parts[0]['hypoScenario'] assert scenario.startswith('vbenf') args = _args_from_scenario(scenario) if not args: return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' arg_res = [ re.compile(r'(?P<lo>\d*)(?P<key>fbet)(?P<hi>\d*)'), re.compile(r'(?P<lo>\d*)(?P<key>mass)(?P<hi>\d*)'), re.compile(r'(?P<lo>\d*)(?P<key>et)(?P<hi>\d*)'), ] defaults = { 'et': ('101', 'inf'), 'mass': ('800', 'inf'), 'fbet': ('501', 'inf'), } argvals = {} while args: assert len(args) == len(arg_res) arg = args.pop() for r in arg_res: m = r.match(arg) if m is not None: arg_res.remove(r) gd = m.groupdict() key = gd['key'] try: lo = float(gd['lo']) except ValueError: lo = defaults[key][0] argvals[key+'lo'] = lo try: hi = float(gd['hi']) except ValueError: hi = defaults[key][1] argvals[key+'hi'] = hi assert len(args) == len(arg_res) assert len(args) == 0 return """ and ( [] simple ( [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)] ) combgen ( [(10et, 0eta320)] dijet ( [(%(masslo).0fdjmass, 26djdphi)] ) simple ( [(10et, 0eta320)(20et, 0eta320)] ) ) )""" % argvals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chainDict2jetLabel(chain_dict):\n\n # suported scenarios \n router = {\n 'simple': _make_simple_label,\n 'HT': _make_ht_label,\n 'vbenf': _make_vbenf_label,\n 'dijet': _make_dijet_label,\n 'combinationsTest': _make_combinationsTest_label,\n 'partitionsTest': _make_partitionsTest_label,\n }\n\n # chain_part - scenario association\n cp_sorter = {}\n for k in router: cp_sorter[k] = []\n\n for cp in chain_dict['chainParts']:\n if cp['signature'] != 'Jet' and cp['signature'] != 'Bjet': \n continue\n for k in cp_sorter:\n if cp['hypoScenario'].startswith(k):\n cp_sorter[k].append(cp)\n break\n\n # obtain labels by scenario.\n labels = []\n for k, chain_parts in cp_sorter.items():\n if chain_parts: labels.append(router[k](chain_parts))\n\n assert labels\n nlabels = len(labels)\n if nlabels == 1: return labels[0]\n if nlabels == 2:\n alabel = \"\"\"\\\nand([]\n %s\n %s)\"\"\" % (tuple(labels))\n return alabel\n\n # more than 2 labels is not expected\n assert False", "def _make_partitionsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'partitionsTest'\n\n \n\n return \"\"\"\n partgen(\n [(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def _make_simple_label(chain_parts):\n \n if not _select_simple_chainparts(chain_parts):\n msg = 'Jet Configuration error: '\\\n 'chain fails substring selection: not \"simple\" '\n\n raise NotImplementedError(msg)\n \n label = 'simple(['\n for cp in chain_parts:\n smcstr = str(cp['smc'])\n jvtstr = str(cp['jvt'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr: # Run 2 chains have \"INF\" in the SMC substring\n condition_str += ',%s)' % smcstr.replace('INF','')\n elif jvtstr:\n condition_str += ',%s)' % jvtstr\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def _make_simple_partition_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n label = 'simplepartition(['\n for cp in cps:\n smcstr = str(cp['smc'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr:\n condition_str += ',%s)'\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def _text_write_preprocess(self):\n self.check()\n\n max_name_len = np.max([len(name) for name in self.name])\n fieldtypes = [\"U\" + str(max_name_len), \"f8\", \"f8\"]\n comp_names = self._get_lon_lat_component_names()\n frame_obj = self._get_frame_obj()\n frame_desc_str = _get_frame_desc_str(frame_obj)\n\n component_fieldnames = []\n for comp_name in comp_names:\n # This will add e.g. ra_J2000 and dec_J2000 for FK5\n component_fieldnames.append(comp_name + \"_\" + frame_desc_str)\n fieldnames = [\"source_id\"] + component_fieldnames\n stokes_names = [\"I\", \"Q\", \"U\", \"V\"]\n fieldshapes = [()] * 3\n\n if self.stokes_error is not None:\n stokes_error_names = [(f\"{k}_error\") for k in [\"I\", \"Q\", \"U\", \"V\"]]\n\n n_stokes = 0\n stokes_keep = []\n for si, total in enumerate(np.nansum(self.stokes.to(\"Jy\"), axis=(1, 2))):\n if total > 0:\n fieldnames.append(stokes_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n if self.stokes_error is not None:\n fieldnames.append(stokes_error_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n n_stokes += 1\n stokes_keep.append(total > 0)\n\n assert n_stokes >= 1, \"No components with nonzero flux.\"\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n fieldnames.append(\"subband_frequency\")\n else:\n fieldnames.append(\"frequency\")\n fieldtypes.append(\"f8\")\n fieldshapes.extend([(self.Nfreqs,)])\n elif self.reference_frequency is not None:\n fieldnames.extend([(\"reference_frequency\")])\n fieldtypes.extend([\"f8\"])\n fieldshapes.extend([()] * n_stokes + [()])\n if self.spectral_index is not None:\n fieldnames.append(\"spectral_index\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_rise_lst\"):\n fieldnames.append(\"rise_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_set_lst\"):\n fieldnames.append(\"set_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n dt = np.dtype(list(zip(fieldnames, fieldtypes, fieldshapes)))\n\n arr = np.empty(self.Ncomponents, dtype=dt)\n arr[\"source_id\"] = self.name\n\n for comp_ind, comp in enumerate(comp_names):\n arr[component_fieldnames[comp_ind]] = getattr(self.skycoord, comp).deg\n\n for ii in range(4):\n if stokes_keep[ii]:\n arr[stokes_names[ii]] = self.stokes[ii].T.to(\"Jy\").value\n if self.stokes_error is not None:\n arr[stokes_error_names[ii]] = self.stokes_error[ii].T.to(\"Jy\").value\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n arr[\"subband_frequency\"] = self.freq_array.to(\"Hz\").value\n else:\n arr[\"frequency\"] = self.freq_array.to(\"Hz\").value\n elif self.reference_frequency is not None:\n arr[\"reference_frequency\"] = self.reference_frequency.to(\"Hz\").value\n if self.spectral_index is not None:\n arr[\"spectral_index\"] = self.spectral_index\n\n if hasattr(self, \"_rise_lst\"):\n arr[\"rise_lst\"] = self._rise_lst\n if hasattr(self, \"_set_lst\"):\n arr[\"set_lst\"] = self._set_lst\n\n return arr", "def _body(self, x, ensembled_batch, non_ensembled_batch, idx):\n i, current_representations = x\n del x\n feats = self._slice_batch(i, ensembled_batch, non_ensembled_batch)\n representations_update = self.evoformer(*self.batch_expand(feats))\n new_representations = {}\n for k in current_representations:\n new_representations[k] = (\n current_representations[k] + representations_update[k])\n del representations_update\n return i+1, new_representations", "def get_mapped_feature_name(self):\n\n # open a h5 file in case we need it\n f5 = h5py.File(self.train_database[0], 'r')\n mol_name = list(f5.keys())[0]\n mapped_data = f5.get(mol_name + '/mapped_features/')\n chain_tags = ['_chain1', '_chain2']\n\n # if we select all the features\n if self.select_feature == \"all\":\n\n # redefine dict\n self.select_feature = {}\n\n # loop over the feat types and add all the feat_names\n for feat_type, feat_names in mapped_data.items():\n self.select_feature[feat_type] = [\n name for name in feat_names]\n\n # if a selection was made\n else:\n\n # we loop over the input dict\n for feat_type, feat_names in self.select_feature.items():\n\n # if for a given type we need all the feature\n if feat_names == 'all':\n if feat_type in mapped_data:\n self.select_feature[feat_type] = list(\n mapped_data[feat_type].keys())\n else:\n self.print_possible_features()\n raise KeyError('Feature type %s not found')\n\n # if we have stored the individual\n # chainA chainB data we need to expand the feature list\n # however when we reload a pretrained model we already\n # come with _chainA, _chainB features.\n # So then we shouldn't add the tags\n else:\n # TODO to refactor this part\n if feat_type not in mapped_data:\n self.print_possible_features()\n raise KeyError('Feature type %s not found')\n\n self.select_feature[feat_type] = []\n\n # loop over all the specified feature names\n for name in feat_names:\n\n # check if there is not _chainA or _chainB in the name\n cond = [tag not in name for tag in chain_tags]\n\n # if there is no chain tag in the name\n if np.all(cond):\n\n # if we have a wild card e.g. PSSM_*\n # we check the matches and add them\n if '*' in name:\n match = name.split('*')[0]\n possible_names = list(\n mapped_data[feat_type].keys())\n match_names = [\n n for n in possible_names\n if n.startswith(match)]\n self.select_feature[feat_type] += match_names\n\n # if we don't have a wild card we append\n # <feature_name>_chainA and <feature_name>_chainB\n # to the list\n else:\n self.select_feature[feat_type] += [\n name + tag for tag in chain_tags]\n # if there is a chain tag in the name\n # (we probably relaod a pretrained model)\n # and we simply append the feaature name\n else:\n self.select_feature[feat_type].append(\n name)\n\n f5.close()", "def DecodeStage():\n\n io = Io({\n 'if_id': Input(if_bundle),\n 'inst': Input(Bits(32)),\n 'stall': Input(Bits(1)),\n 'reg_write': Input(reg_write_bundle),\n 'ras_ctrl': Output(ras_ctrl_bundle),\n 'id_ex': Output(id_ex_bundle),\n 'rs1_data': Output(Bits(C['core-width'])),\n 'rs2_data': Output(Bits(C['core-width'])),\n })\n\n inst = Wire(Bits(32))\n\n with io.if_id.valid:\n inst <<= io.inst\n with otherwise:\n inst <<= 0\n\n regfile = Instance(RegisterFile())\n\n itype = Wire(Bits(ITypes.bitwidth))\n\n regfile.r0_addr <<= Rs1(inst)\n regfile.r0_en <<= ~io.stall\n regfile.r1_addr <<= Rs2(inst)\n regfile.r1_en <<= ~io.stall\n\n regfile.w0_addr <<= io.reg_write.w_addr\n regfile.w0_en <<= io.reg_write.w_en & ~io.stall\n regfile.w0_data <<= io.reg_write.w_data\n\n #\n # inst_data is metadata about the current instruction that is passed through\n # the pipeline unrelated to control signals. It's primary use is for hazard\n # detection and data forwarding.\n #\n\n io.id_ex.ctrl.valid <<= io.if_id.valid\n io.id_ex.ctrl.inst <<= inst\n io.id_ex.ctrl.pc <<= io.if_id.pc\n\n #\n # Hook up the register read outputs.\n #\n\n io.rs1_data <<= regfile.r0_data\n io.rs2_data <<= regfile.r1_data\n\n #\n # Control is a Python function that produces the primary decode logic. It\n # matches against a set of known instructions to produce control signals for\n # later stages in the pipeline. The known instructions are encoded in the\n # 'instructions' variable above.\n #\n\n Control(inst, itype, io.id_ex.ctrl)\n\n #\n # TODO: Documentation\n #\n\n HandleRasCtrl(io.ras_ctrl, inst, io.if_id.pc)\n\n #\n # GenerateImmediate produces logic that consume the itype (instruction\n # type, which is R, I, S, B, U, or J) and produces the immediate value for\n # this instruction.\n #\n\n io.id_ex.imm <<= GenerateImmediate(inst, itype)\n\n NameSignals(locals())", "def _make_dijet_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario.startswith('dijet')\n\n arg_res = [\n re.compile(r'^(?P<lo>\\d*)(?P<key>djmass)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j1et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j1eta)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j2et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j2eta)(?P<hi>\\d*)$'),\n ]\n\n defaults = {\n 'j1et': ('100', 'inf'),\n 'j2et': ('100', 'inf'),\n 'j1eta': ('0', '320'),\n 'j2eta': ('0', '320'),\n 'djmass': ('1000', 'inf'),\n }\n\n\n args = _args_from_scenario(scenario)\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n combgen(\n [(2)(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n (%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n ]\n \n dijet(\n [(%(djmasslo).0fdjmass)])\n simple([(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n (%(j2etlo).0fet, %(j2etalo).0feta%(j2etahi).0f)])\n )\"\"\" % argvals", "def _indentity_block(self, X, filters, f, stage, block):\n\t\tconv_layer_name = 'res' + str(stage) + block + '_branch'\n\t\tbn_layer_name = 'bm' + str(stage) + block + '_branch'\n\n\t\tX_shortcut = X\n\n\t\tF1, F2, F3 = filters\n\n\t\t# First component of main path\n\t\tX = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1, 1), padding = 'valid',\n\t\t\tname = conv_layer_name + '2a', kernel_initializer = glorot_uniform(seed=0))(X)\n\t\tX = BatchNormalization(axis = 3, name = bn_layer_name + '2a')(X)\n\t\tX = Activation('relu')(X)\n\n\t\t# Second component of main path\n\t\tX = Conv2D(filters = F2, kernel_size = (1, 1), strides = (1, 1), padding = 'same',\n\t\t\tname = conv_layer_name + '2b', kernel_initializer = glorot_uniform(seed=0))(X)\n\t\tX = BatchNormalization(axis = 3, name = bn_layer_name + '2b')(X)\n\t\tX = Activation('relu')(X)\n\n\t\t# Third component of main path\n\t\tX = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1, 1), padding = 'valid',\n\t\t\tname = conv_layer_name + '2c', kernel_initializer = glorot_uniform(seed=0))(X)\n\t\tX = BatchNormalization(axis = 3, name = bn_layer_name + '2c')(X)\n\n\t\t# Final step : adding the shortcut componet to X and applying 'relu' activation on the combination \n\t\tX = Add()([X_shortcut,X])\n\t\tX = Activation('relu')(X)\n\n\t\treturn X", "def output_fluent(fil,nodes,elems):\n print \"Nodal coordinates\"\n print nodes\n print \"Element connectivity\"\n print elems\n faces = array(Tet4.faces) # Turning faces into an array is important !\n print \"Tetraeder faces\"\n print faces\n elf = elems.take(faces,axis=1)\n # Remark: the shorter syntax elems[faces] takes its elements along the\n # axis 0. Then we would need to transpose() first (and probably\n # swap axes again later)\n print \"The faces of the elements:\"\n print elf\n # We need a copy to sort the nodes (sorting is done in-place)\n elfs = elf.copy()\n elfs.sort(axis=2) \n print \"The faces with sorted nodes:\"\n print elfs\n magic = elems.max()+1\n print \"Magic number = %d\" % magic\n code = encode(elfs[:,:,0],elfs[:,:,1],elfs[:,:,2],magic)\n # Remark how nice the encode function works on the whole array\n print \"Encoded faces:\"\n print code\n code = code.ravel()\n print code\n print \"Just A Check:\"\n print \"Element 5 face 2 is %s \" % elf[5,2]\n print \"Element 5 face 2 is %s \" % list(decode(code[4*5+2],magic))\n srt = code.argsort()\n print srt\n print code[srt]\n # Now shipout the faces in this order, removing the doubles\n j = -1 \n for i in srt:\n if j < 0: # no predecessor (or predecessor already shipped)\n j = i\n else:\n e1,f1 = j/4, j%4\n if code[i] == code[j]:\n e2,f2 = i/4, i%4\n j = -1\n else:\n e2 = -1\n j = i\n print \"Face %s belongs to el %s and el %s\" % ( elf[e1,f1], e2, e1 )", "def pre_pipeline(self, results):\n results[\"img_prefix\"] = self.img_prefix\n results[\"seg_prefix\"] = self.seg_prefix\n results[\"proposal_file\"] = self.proposal_file\n results[\"bbox_fields\"] = []\n results[\"mask_fields\"] = []\n results[\"seg_fields\"] = []\n results[\"site_fields\"] = []\n results[\"label_fields\"] = []", "def build_head(self):\n stages = [f'stage{i}' for i in range(1, 7)]\n for stage in stages:\n block = getattr(self.arch, stage)\n PAF, CFM = block.keys()\n PAF = build_blocks(block[PAF], 'head')\n CFM = build_blocks(block[CFM], 'head')\n setattr(self, f\"{stage}_PAF\", PAF)\n setattr(self, f\"{stage}_CFM\", CFM)", "def _make_combinationsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'combinationsTest'\n\n \n\n return \"\"\"\n combgen(\n [(2)(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def _make_simple_comb_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n simple_strs = []\n\n for cp in cps:\n print(cp)\n simple_strs.append(_make_simple_label([cp]))\n\n label = 'combgen([(%d)]' % len(cps)\n for s in simple_strs:\n label += ' %s ' % s\n label += ')'\n return label", "def generate_code(self, parts: list):\n for i in range(len(parts)):\n\n if not self._involves_this_party(parts[i][0]):\n # not our data, skip job\n continue\n\n if parts[i][1] == \"python\":\n cg = PythonCodeGen(\n self.config,\n parts[i][0],\n f\"{self.config.system_configs['CODEGEN'].workflow_name}-python-job-{i}\"\n )\n cg.generate()\n elif parts[i][1] == \"jiff\":\n cg = JiffCodeGen(\n self.config,\n parts[i][0],\n f\"{self.config.system_configs['CODEGEN'].workflow_name}-jiff-job-{i}\"\n )\n cg.generate()\n else:\n raise Exception(f\"Unrecognized backend from partition: {parts[i][1]}.\")", "def __call__(self, blocks, with_cab=False):\n\n # for k, v in blocks.items():\n # print(k, v.shape)\n\n #down fpn\n f_down = self.FPN_Down_Fusion(blocks)\n # print(\"f_down shape: {}\".format(f_down.shape))\n #up fpn\n f_up = self.FPN_Up_Fusion(blocks)\n # print(\"f_up shape: {}\".format(f_up.shape))\n #fusion\n f_common = fluid.layers.elementwise_add(x=f_down, y=f_up)\n f_common = fluid.layers.relu(f_common)\n # print(\"f_common: {}\".format(f_common.shape))\n\n if self.with_cab:\n # print('enhence f_common with CAB.')\n f_common = self.cross_attention(f_common)\n\n f_score, f_border = self.SAST_Header1(f_common)\n f_tvo, f_tco = self.SAST_Header2(f_common)\n\n predicts = OrderedDict()\n predicts['f_score'] = f_score\n predicts['f_border'] = f_border\n predicts['f_tvo'] = f_tvo\n predicts['f_tco'] = f_tco\n return predicts", "def __call__(self, node):\n\n # should throw an error\n if node.cfgInterface == None:\n return\n\n # //\n # // Extract LFN base from included WorkflowSpec parameters\n #//\n if self.unmerged:\n base = node.getParameter(\"UnmergedLFNBase\")[0]\n else:\n base = node.getParameter(\"MergedLFNBase\")[0]\n mergedBase = node.getParameter(\"MergedLFNBase\")[0]\n\n acqEra=None\n if node.hasParameter(\"AcquisitionEra\"):\n acqEra = node.getParameter(\"AcquisitionEra\")[0]\n\n\n # //\n # // iterate over outputmodules/data tiers\n #// Generate LFN, PFN and Catalog for each module\n\n for modName, outModule in node.cfgInterface.outputModules.items():\n\n if ( not outModule.has_key('fileName') ):\n msg = \"OutputModule %s does not contain a fileName entry\" % modName\n raise RuntimeError, msg\n\n preserveLfnGroup = str(self.lfnGroup)\n lastBit = outModule['processedDataset']\n # //\n # // Skip if the file does not stage out. (i.e.\n #// --stageout-intermediates=False)\n if lastBit is None:\n msg = \"OutputModule does not stage out. Skipping.\"\n logging.debug(msg)\n continue\n # //but this guy has the AcquisitionEra at the beginning... delimited\n # // by a dash... we don't need it twice... we try to safely\n #// remove it from the beginning, basically punting if its not\n #\\\\ disadvantage of getting this from the ds name is having to\n # \\\\ then strip off -unmerged\n if acqEra is not None:\n thingtoStrip=\"%s-\" % acqEra\n mypieces = lastBit.split(thingtoStrip, 1)\n if len(mypieces) > 1: \n lastBit = mypieces[1]\n remainingBits = lastBit.split(\"-unmerged\", 1)[0]\n \n outModule['LFNBase'] = os.path.join(base,\n outModule['primaryDataset'],\n outModule['dataTier'],\n remainingBits,\n preserveLfnGroup)\n outModule['MergedLFNBase'] = os.path.join(mergedBase,\n outModule['primaryDataset'],\n outModule['dataTier'],\n remainingBits,\n preserveLfnGroup)\n\n return", "def build_stage2_6(self):\n paf, cfm = self.stage2_6.values()\n for i in range(2, 7):\n paf_ = OrderedDict([(k.replace('i', str(i)),paf[k]) for k in paf])\n cfm_ = OrderedDict([(k.replace('i', str(i)),cfm[k]) for k in cfm])\n stage_ = OrderedDict(PAF=paf_, CFM=cfm_)\n setattr(self, f'stage{i}', stage_)", "def slice_graph_bwd( endea, reg ): \r\n\tgraph = vcg_Graph.vcgGraph({\"title\":'\"Slice for %s\"' % reg, \\\r\n\t\t\"manhattan_edges\":\"no\", \"layoutalgorithm\":\"maxdepth\"})\r\n\t#\r\n\t# Retrieve the name of the current basic block\r\n\t# \r\n\tworklist = []\r\n\tdata_bib = {}\r\n\t\r\n\tstartnode = slice_node( 0, endea, reg )\t\t# start at the end of the slice node\r\n\trootnode = graph.Add_Node( startnode.to_name() )\r\n\tdata_bib[ startnode.to_name() ] = startnode\r\n\tworklist.insert( 0, rootnode )\r\n\twhile len( worklist ) > 0:\r\n\t\tcurrnode = worklist.pop()\r\n\t\tcurrslice = data_bib[ currnode.get_name() ]\r\n\t\t[tgt_reg, split] = currslice.get_target_reg_bwd()\r\n\t\tprint tgt_reg\r\n\t\tprint split\r\n\t\tif tgt_reg == \"END\":\r\n\t\t\t# Do not process this node any further\r\n\t\t\tpass\r\n\t\telif tgt_reg == \"\" or (( len( currslice.get_lines()) > 0) and \\\r\n\t\t\tcurrslice.startea != currslice.get_lines()[0][0]):\r\n\t\t\t# Do process this node further, nothing really going on \r\n\t\t\tprint \"ZEZ\"\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( 0,ref, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name() )\r\n\t\telse:\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( 0,ref, tgt_reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name())\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tif split:\r\n\t\t\t\tfor ref in xrefs:\r\n\t\t\t\t\tnewslice = slice_node( 0,ref, currslice.reg )\r\n\t\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name())\r\n\treturn [ graph, data_bib ]", "def identity_block(self,X, f, filters, stage, block):\n # define name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n # Retrieve filters\n f1,f2,f3 = filters\n\n # Save the input value. This needs to be added back to the main path later.\n X_shortcut = X\n\n # First component of the main path\n X = Conv2D(filters= f1, kernel_size=(1,1), strides=(1,1), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n # Second component of the main path\n X = Conv2D(filters= f2, kernel_size=(f,f), strides=(1,1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of the main path\n X = Conv2D(filters= f3, kernel_size=(1,1), strides=(1,1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)\n\n # Add the shortcut value to the main path\n X = Add()([X_shortcut, X])\n X = Activation('relu')(X) \n\n return X", "def onBuildModels(self):\n if self.refSeriesNumber != '-1':\n ref = self.refSeriesNumber\n refLongName = self.seriesMap[ref]['LongName']\n labelNodes = slicer.util.getNodes('*'+refLongName+'*-label*')\n\n numNodes = slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelHierarchyNode\" )\n outHierarchy = None\n\n for n in xrange(numNodes):\n node = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelHierarchyNode\" )\n if node.GetName() == 'mpReview-'+refLongName:\n outHierarchy = node\n break\n\n # Remove the previous models\n if outHierarchy:\n collection = vtk.vtkCollection()\n outHierarchy.GetChildrenModelNodes(collection)\n n = collection.GetNumberOfItems()\n if n != 0:\n for i in xrange(n):\n modelNode = collection.GetItemAsObject(i)\n slicer.mrmlScene.RemoveNode(modelNode)\n\n # if models hierarchy does not exist, create it.\n else:\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\n outHierarchy.SetScene( slicer.mrmlScene )\n outHierarchy.SetName( 'mpReview-'+refLongName )\n slicer.mrmlScene.AddNode( outHierarchy )\n\n progress = self.makeProgressIndicator(len(labelNodes))\n step = 0\n for label in labelNodes.values():\n labelName = label.GetName().split(':')[1]\n structureName = labelName[labelName[:-6].rfind(\"-\")+1:-6]\n # Only save labels with known structure names\n if any(structureName in s for s in self.structureNames):\n parameters = {}\n parameters[\"InputVolume\"] = label.GetID()\n parameters['FilterType'] = \"Sinc\"\n parameters['GenerateAll'] = True\n\n parameters[\"JointSmoothing\"] = False\n parameters[\"SplitNormals\"] = True\n parameters[\"PointNormals\"] = True\n parameters[\"SkipUnNamed\"] = True\n\n # create models for all labels\n parameters[\"StartLabel\"] = -1\n parameters[\"EndLabel\"] = -1\n\n parameters[\"Decimate\"] = 0\n parameters[\"Smooth\"] = 0\n\n parameters[\"ModelSceneFile\"] = outHierarchy\n\n progress.labelText = '\\nMaking Model for %s' % structureName\n progress.setValue(step)\n if progress.wasCanceled:\n break\n\n try:\n modelMaker = slicer.modules.modelmaker\n self.CLINode = slicer.cli.run(modelMaker, self.CLINode,\n parameters, wait_for_completion=True)\n except AttributeError:\n qt.QMessageBox.critical(slicer.util.mainWindow(),'Editor', 'The ModelMaker module is not available<p>Perhaps it was disabled in the application settings or did not load correctly.')\n step += 1\n progress.close()\n #\n\n if outHierarchy:\n collection = vtk.vtkCollection()\n outHierarchy.GetChildrenModelNodes(collection)\n n = collection.GetNumberOfItems()\n if n != 0:\n for i in xrange(n):\n modelNode = collection.GetItemAsObject(i)\n displayNode = modelNode.GetDisplayNode()\n displayNode.SetSliceIntersectionVisibility(1)\n displayNode.SetSliceIntersectionThickness(2)\n self.modelsVisibilityButton.checked = False\n self.updateViewRenderer()", "def make_label_data(self):\n from xml.etree.ElementTree import Element, SubElement, dump, ElementTree, parse\n\n if not self.graphicsView.hasImage():\n self.showImageSelectionMessageBox()\n return\n\n app_doc_data = AppDocData.instance()\n project = app_doc_data.getCurrentProject()\n\n smalls = []\n bigs = []\n\n symbol_list = app_doc_data.getTargetSymbolList(all=True)\n for symbol in symbol_list:\n if symbol.width and symbol.height:\n if symbol.width > 300 or symbol.height > 300:\n bigs.append(symbol.getName())\n else:\n smalls.append(symbol.getName())\n\n symbols = [item for item in self.graphicsView.scene().items() if issubclass(type(item), SymbolSvgItem)]\n names = [smalls, bigs]\n\n img = app_doc_data.activeDrawing.image_origin\n\n small_size = 500\n big_size = 850\n\n save_path = project.getTrainingSymbolFilePath()\n\n index = 0\n for size in [small_size, big_size]:\n offsets = [0, int(size / 2)]\n\n width, height = img.shape[1], img.shape[0]\n width_count, height_count = width // size + 2, height // size + 2\n b_width, b_height = width_count * size, height_count * size\n b_img = np.zeros((b_height, b_width), np.uint8) + 255\n b_img[:height, :width] = img[:, :]\n\n for offset in offsets:\n for row in range(height_count):\n for col in range(width_count):\n x, y = col * size + offset, row * size + offset\n tile_rect = QRectF(x, y, size, size)\n tile_symbols = []\n for symbol in [symbol for symbol in symbols if symbol.name in names[index]]:\n if tile_rect.contains(symbol.sceneBoundingRect()):\n tile_symbols.append(symbol)\n symbols.remove(symbol)\n\n if tile_symbols:\n training_uid = str(uuid.uuid4())\n training_image_path = os.path.join(save_path, training_uid + '.png')\n training_xml_path = os.path.join(save_path, training_uid + '.xml')\n\n # save image\n #_img = b_img[round(tile_rect.top()):round(tile_rect.bottom()),\n # round(tile_rect.left()):round(tile_rect.right())]\n #cv2.imwrite(training_image_path, _img)\n _img = self.graphicsView.image().copy(round(tile_rect.left()), round(tile_rect.top()), round(tile_rect.width()), round(tile_rect.height()))\n _img.save(training_image_path)\n\n # save label\n xml = Element('annotation')\n SubElement(xml, 'folder').text = 'None'\n SubElement(xml, 'filename').text = os.path.basename(save_path)\n\n pathNode = Element('path')\n pathNode.text = save_path.replace('/', '\\\\')\n xml.append(pathNode)\n\n sourceNode = Element('source')\n databaseNode = Element('database')\n databaseNode.text = 'Unknown'\n sourceNode.append(databaseNode)\n xml.append(sourceNode)\n\n sizeNode = Element('size')\n widthNode = Element('width')\n widthNode.text = str(int(tile_rect.width()))\n sizeNode.append(widthNode)\n heightNode = Element('height')\n heightNode.text = str(int(tile_rect.height()))\n sizeNode.append(heightNode)\n depthNode = Element('depth')\n depthNode.text = '3'\n sizeNode.append(depthNode)\n xml.append(sizeNode)\n\n segmentedNode = Element('segmented')\n segmentedNode.text = '0'\n xml.append(segmentedNode)\n\n labelContent = []\n counts = {}\n for item in tile_symbols:\n rect = item.sceneBoundingRect()\n label, xMin, yMin, xMax, yMax = item.name, int(rect.x() - 5 - x), int(rect.y() - 5 - y), int(rect.x() + rect.width() + 5 - x), int(rect.y() + rect.height() + 5 - y)\n xMin = xMin if xMin > 0 else 0\n yMin = yMin if yMin > 0 else 0\n xMax = xMax if xMax < size else size\n yMax = yMax if yMax < size else size\n\n if label == 'None' or label == '':\n continue\n if label not in labelContent:\n labelContent.append(label)\n counts[label] = 1\n else:\n counts[label] = counts[label] + 1\n\n objectNode = Element('object')\n nameNode = Element('name')\n nameNode.text = label\n objectNode.append(nameNode)\n poseNode = Element('pose')\n poseNode.text = 'Unspecified'\n objectNode.append(poseNode)\n truncatedNode = Element('truncated')\n truncatedNode.text = '0'\n objectNode.append(truncatedNode)\n difficultNode = Element('difficult')\n difficultNode.text = '0'\n objectNode.append(difficultNode)\n\n bndboxNode = Element('bndbox')\n xminNode = Element('xmin')\n xminNode.text = str(xMin)\n bndboxNode.append(xminNode)\n yminNode = Element('ymin')\n yminNode.text = str(yMin)\n bndboxNode.append(yminNode)\n xmaxNode = Element('xmax')\n xmaxNode.text = str(xMax)\n bndboxNode.append(xmaxNode)\n ymaxNode = Element('ymax')\n ymaxNode.text = str(yMax)\n bndboxNode.append(ymaxNode)\n objectNode.append(bndboxNode)\n\n xml.append(objectNode)\n\n ElementTree(xml).write(training_xml_path)\n\n index += 1\n\n QMessageBox.about(self, self.tr(\"Notice\"), self.tr('Successfully applied. '))", "def gexf_graph():\n # you must replace these lines and supply your own graph\n \n \n \n my_gexf = Gexf(\"JiajiaXie\", \"My awesome graph\")\n graph=my_gexf.addGraph(\"undirected\", \"static\", \"My awesome networks\")\n \n atr1=graph.addNodeAttribute('Type',type='string')\n\n\n for set in data_specific:\n if graph.nodeExists(set['set_num']) ==0:\n tm1=graph.addNode(set['set_num'], set['name'], r='0', g='0', b='0')\n tm1.addAttribute(atr1,\"set\")\n\n\n\n counter_test=1\n for set, part in data_parts.items():\n for key, part_list in part.items():\n interme =part_list['color']\n red=interme[0]+interme[1]\n green=interme[2]+interme[3]\n blue=interme[4]+interme[5]\n\n red_de=str(int(red,16))\n green_de=str(int(green,16))\n blue_de=str(int(blue,16))\n if graph.nodeExists(part_list['id'])==0:\n tm2=graph.addNode(part_list['id'], part_list['part_name'],r=red_de, g=green_de, b = blue_de)\n tm2.addAttribute(atr1,\"part\")\n\n\n counter_test+=1\n graph.addEdge(\"_\"+str(counter_test), set, part_list['id'], part_list['quantity'])\n\n\n\n f=open('bricks_graph.gexf','wb')\n my_gexf.write(f)\n\n\n return my_gexf.graphs[0]", "def DontuseThis():\n BCM_outputs = ['phi','rho','theta',\n 'r_probabilityMaps','l_probabilityMaps',\n 'models']\n BCM_Models = pe.Node(interface=nio.DataGrabber(input_names=['structures'],\n outfields=BCM_outputs),\n name='10_BCM_Models')\n BCM_Models.inputs.base_directory = atlas_fname_wpath\n BCM_Models.inputs.template_args['phi'] = [['spatialImages','phi','nii.gz']]\n BCM_Models.inputs.template_args['rho'] = [['spatialImages','rho','nii.gz']]\n BCM_Models.inputs.template_args['theta'] = [['spatialImages','theta','nii.gz']]\n BCM_Models.inputs.template_args['r_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['l_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['models'] = [['structures']]\n\n BRAINSCut_structures = ['caudate','thalamus','putamen','hippocampus']\n #BRAINSCut_structures = ['caudate','thalamus']\n BCM_Models.iterables = ( 'structures', BRAINSCut_structures )\n BCM_Models.inputs.template = '%s/%s.%s'\n BCM_Models.inputs.field_template = dict(\n r_probabilityMaps='probabilityMaps/r_%s_ProbabilityMap.nii.gz',\n l_probabilityMaps='probabilityMaps/l_%s_ProbabilityMap.nii.gz',\n models='modelFiles/%sModel*',\n )\n\n \"\"\"\n The xml creation and BRAINSCut need to be their own mini-pipeline that gets\n executed once for each of the structures in BRAINSCut_structures. This can be\n accomplished with a map node and a new pipeline.\n \"\"\"\n \"\"\"\n Create xml file for BRAINSCut\n \"\"\"\n\n\n BFitAtlasToSubject = pe.Node(interface=BRAINSFit(),name=\"BFitAtlasToSubject\")\n BFitAtlasToSubject.inputs.costMetric=\"MMI\"\n BFitAtlasToSubject.inputs.maskProcessingMode=\"ROI\"\n BFitAtlasToSubject.inputs.numberOfSamples=100000\n BFitAtlasToSubject.inputs.numberOfIterations=[1500,1500]\n BFitAtlasToSubject.inputs.numberOfHistogramBins=50\n BFitAtlasToSubject.inputs.maximumStepLength=0.2\n BFitAtlasToSubject.inputs.minimumStepLength=[0.005,0.005]\n BFitAtlasToSubject.inputs.transformType= [\"Affine\",\"BSpline\"]\n BFitAtlasToSubject.inputs.maxBSplineDisplacement= 7\n BFitAtlasToSubject.inputs.maskInferiorCutOffFromCenter=65\n BFitAtlasToSubject.inputs.splineGridSize=[28,20,24]\n BFitAtlasToSubject.inputs.outputVolume=\"Trial_Initializer_Output.nii.gz\"\n BFitAtlasToSubject.inputs.outputTransform=\"Trial_Initializer_Output.mat\"\n cutWF.connect(SplitAvgBABC,'avgBABCT1',BFitAtlasToSubject,'fixedVolume')\n cutWF.connect(BABC,'outputLabels',BFitAtlasToSubject,'fixedBinaryVolume')\n cutWF.connect(BAtlas,'template_t1',BFitAtlasToSubject,'movingVolume')\n cutWF.connect(BAtlas,'template_brain',BFitAtlasToSubject,'movingBinaryVolume')\n cutWF.connect(BLI,'outputTransformFilename',BFitAtlasToSubject,'initialTransform')\n\n CreateBRAINSCutXML = pe.Node(Function(input_names=['rho','phi','theta',\n 'model',\n 'r_probabilityMap',\n 'l_probabilityMap',\n 'atlasT1','atlasBrain',\n 'subjT1','subjT2',\n 'subjT1GAD','subjT2GAD',\n 'subjSGGAD','subjBrain',\n 'atlasToSubj','output_dir'],\n output_names=['xml_filename','rl_structure_filename_list'],\n function = create_BRAINSCut_XML),\n overwrite = True,\n name=\"CreateBRAINSCutXML\")\n\n ## HACK Makde better directory\n CreateBRAINSCutXML.inputs.output_dir = \".\" #os.path.join(cutWF.base_dir, \"BRAINSCut_output\")\n cutWF.connect(BCM_Models,'models',CreateBRAINSCutXML,'model')\n cutWF.connect(BCM_Models,'rho',CreateBRAINSCutXML,'rho')\n cutWF.connect(BCM_Models,'phi',CreateBRAINSCutXML,'phi')\n cutWF.connect(BCM_Models,'theta',CreateBRAINSCutXML,'theta')\n cutWF.connect(BCM_Models,'r_probabilityMaps',CreateBRAINSCutXML,'r_probabilityMap')\n cutWF.connect(BCM_Models,'l_probabilityMaps',CreateBRAINSCutXML,'l_probabilityMap')\n cutWF.connect(BAtlas,'template_t1',CreateBRAINSCutXML,'atlasT1')\n cutWF.connect(BAtlas,'template_brain',CreateBRAINSCutXML,'atlasBrain')\n cutWF.connect(SplitAvgBABC,'avgBABCT1',CreateBRAINSCutXML,'subjT1')\n cutWF.connect(SplitAvgBABC,'avgBABCT2',CreateBRAINSCutXML,'subjT2')\n cutWF.connect(GADT1,'outputVolume',CreateBRAINSCutXML,'subjT1GAD')\n cutWF.connect(GADT2,'outputVolume',CreateBRAINSCutXML,'subjT2GAD')\n cutWF.connect(SGI,'outputFileName',CreateBRAINSCutXML,'subjSGGAD')\n cutWF.connect(BABC,'outputLabels',CreateBRAINSCutXML,'subjBrain')\n cutWF.connect(BFitAtlasToSubject,'outputTransform',CreateBRAINSCutXML,'atlasToSubj')\n #CreateBRAINSCutXML.inputs.atlasToSubj = \"INTERNAL_REGISTER.mat\"\n #cutWF.connect(BABC,'atlasToSubjectTransform',CreateBRAINSCutXML,'atlasToSubj')\n\n \"\"\"\n ResampleNACLabels\n \"\"\"\n ResampleAtlasNACLabels=pe.Node(interface=BRAINSResample(),name=\"ResampleAtlasNACLabels\")\n ResampleAtlasNACLabels.inputs.interpolationMode = \"NearestNeighbor\"\n ResampleAtlasNACLabels.inputs.outputVolume = \"atlasToSubjectNACLabels.nii.gz\"\n\n cutWF.connect(cutWF,'OutputSpec.atlasToSubjectTransform',ResampleAtlasNACLabels,'warpTransform')\n cutWF.connect(cutWF,'OutputSpec.t1_corrected',ResampleAtlasNACLabels,'referenceVolume')\n cutWF.connect(BAtlas,'template_nac_lables',ResampleAtlasNACLabels,'inputVolume')\n\n \"\"\"\n BRAINSMush\n \"\"\"\n BMUSH=pe.Node(interface=BRAINSMush(),name=\"BMUSH\")\n BMUSH.inputs.outputVolume = \"MushImage.nii.gz\"\n BMUSH.inputs.outputMask = \"MushMask.nii.gz\"\n BMUSH.inputs.lowerThresholdFactor = 1.2\n BMUSH.inputs.upperThresholdFactor = 0.55\n\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BMUSH,'inputFirstVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.t2_corrected',BMUSH,'inputSecondVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.outputLabels',BMUSH,'inputMaskVolume')\n\n \"\"\"\n BRAINSROIAuto\n \"\"\"\n BROI = pe.Node(interface=BRAINSROIAuto(), name=\"BRAINSROIAuto\")\n BROI.inputs.closingSize=12\n BROI.inputs.otsuPercentileThreshold=0.01\n BROI.inputs.thresholdCorrectionFactor=1.0\n BROI.inputs.outputROIMaskVolume = \"temproiAuto_t1_ACPC_corrected_BRAINSABC.nii.gz\"\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BROI,'inputVolume')\n\n \"\"\"\n Split the implicit outputs of BABCext\n \"\"\"\n SplitAvgBABC = pe.Node(Function(input_names=['in_files','T1_count'], output_names=['avgBABCT1','avgBABCT2'],\n function = get_first_T1_and_T2), run_without_submitting=True, name=\"99_SplitAvgBABC\")\n SplitAvgBABC.inputs.T1_count = 1 ## There is only 1 average T1 image.\n\n cutWF.connect(myLocalTCWF,'OutputSpec.outputAverageImages',SplitAvgBABC,'in_files')\n\n\n\n def printFullPath(outFileFullPath):\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"{0}\".format(outFileFullPath))\n return outFileFullPath\n printOutImage = pe.Node( Function(function=printFullPath, input_names = ['outFileFullPath'], output_names = ['genoutFileFullPath']), run_without_submitting=True, name=\"99_printOutImage\")\n cutWF.connect( GADT2, 'outputVolume', printOutImage, 'outFileFullPath' )", "def identity_block(X, f, filters, stage, block):\n \n # Defines name basis.\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieves Filters.\n F1, F2, F3 = filters\n \n # Saves the input value. This is needed later to add back to the main path. \n X_shortcut = X\n \n ##### MAIN PATH #####\n # First component of main path.\n X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n \n \n # Second component of main path.\n X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n \n # Third component of main path.\n X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n\n\n # Final step: Adds shortcut value to main path, and pass it through a RELU activation.\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n\n return X", "def identity_block(X, f, filters, stage, block):\n \n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieve Filters\n F1, F2, F3 = filters\n \n # Save the input value. You'll need this later to add back to the main path. \n X_shortcut = X\n \n # First component of main path\n X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n \n # Second component of main path\n X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path\n X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation\n X = Add()([X,X_shortcut])\n X = Activation('relu')(X)\n \n return X", "def make_mixture_info(parts, operation='+'):\n # type: (List[ModelInfo], str) -> ModelInfo\n # Build new parameter list\n combined_pars = []\n\n # When creating a mixture model that is a sum of product models (ie (1*2)+(3*4))\n # the parameters for models 1 & 2 will be prefixed with A & B respectively,\n # but so will the parameters for models 3 & 4. We need to rename models 3 & 4\n # so that they are prefixed with C & D to avoid overlap of parameter names.\n used_prefixes = []\n for part in parts:\n if part.composition and part.composition[0] == 'mixture':\n i = 0\n for submodel in part.composition[1]:\n npars = len(submodel.parameters.kernel_parameters)\n # List of params of one of the constituent models of part\n submodel_pars = part.parameters.kernel_parameters[i:i+npars]\n # Prefix of the constituent model\n prefix = submodel_pars[0].name[0]\n if prefix not in used_prefixes: # Haven't seen this prefix so far\n used_prefixes.append(prefix)\n i += npars\n continue\n # TODO: don't modify submodel --- it may be used elsewhere\n # Existing code probably doesn't keep a handle on the model\n # parts so its probably okay, but it's possible that a mix\n # on user defined mixture models models will change the\n # parameters used for the parts in the GUI. Even worse if the\n # same plugin is used twice. For example, twosphere.py\n # contains sphere+sphere and you create twosphere+twosphere.\n while prefix in used_prefixes:\n # This prefix has been already used, so change it to the\n # next letter that hasn't been used\n prefix = chr(ord(prefix) + 1)\n used_prefixes.append(prefix)\n prefix += \"_\"\n # Update the parameters of this constituent model to use the\n # new prefix\n for par in submodel_pars:\n # Strip {prefix}_ using par.name[2:], etc.\n # TODO: fails for AB_scale\n par.id = prefix + par.id[2:]\n par.name = prefix + par.name[2:]\n if par.length_control is not None:\n par.length_control = prefix + par.length_control[2:]\n i += npars\n\n for part in parts:\n # Parameter prefix per model, A_, B_, ...\n # Note that prefix must also be applied to id and length_control\n # to support vector parameters\n prefix = ''\n if not part.composition or part.composition[0] == 'product':\n # Model isn't a composition model, so its parameters don't have a\n # a prefix. Add the next available prefix\n prefix = chr(ord('A')+len(used_prefixes))\n used_prefixes.append(prefix)\n prefix += '_'\n\n if operation == '+':\n # If model is a sum model, each constituent model gets its own scale parameter\n scale_prefix = prefix\n if prefix == '' and getattr(part, \"operation\", '') == '*':\n # `part` is a composition product model. Find the prefixes of\n # its parameters to form a new prefix for the scale.\n # For example, a model with A*B*C will have ABC_scale.\n sub_prefixes = []\n for param in part.parameters.kernel_parameters:\n # Prefix of constituent model\n sub_prefix = param.id.split('_')[0]\n if sub_prefix not in sub_prefixes:\n sub_prefixes.append(sub_prefix)\n # Concatenate sub_prefixes to form prefix for the scale\n scale_prefix = ''.join(sub_prefixes) + '_'\n scale = Parameter(scale_prefix + 'scale', default=1.0,\n description=\"model intensity for \" + part.name)\n combined_pars.append(scale)\n for p in part.parameters.kernel_parameters:\n p = copy(p)\n p.name = prefix + p.name\n p.id = prefix + p.id\n if p.length_control is not None:\n p.length_control = prefix + p.length_control\n combined_pars.append(p)\n parameters = ParameterTable(combined_pars)\n # Allow for the scenario in which each component has all its PD parameters\n # active simultaneously. details.make_details() will throw an error if\n # too many are used from any one component.\n parameters.max_pd = sum(part.parameters.max_pd for part in parts)\n\n def random():\n \"\"\"Random set of model parameters for mixture model\"\"\"\n combined_pars = {}\n for k, part in enumerate(parts):\n prefix = chr(ord('A')+k) + '_'\n pars = part.random()\n combined_pars.update((prefix+k, v) for k, v in pars.items())\n return combined_pars\n\n model_info = ModelInfo()\n model_info.id = operation.join(part.id for part in parts)\n model_info.operation = operation\n model_info.name = '(' + operation.join(part.name for part in parts) + ')'\n model_info.filename = None\n model_info.title = 'Mixture model with ' + model_info.name\n model_info.description = model_info.title\n model_info.docs = model_info.title\n model_info.category = \"custom\"\n model_info.parameters = parameters\n model_info.random = random\n #model_info.single = any(part['single'] for part in parts)\n model_info.structure_factor = False\n #model_info.tests = []\n #model_info.source = []\n # Remember the component info blocks so we can build the model\n model_info.composition = ('mixture', parts)\n return model_info", "def tied_featurize(batch, device, chain_dict, fixed_position_dict=None, omit_AA_dict=None, tied_positions_dict=None, pssm_dict=None, bias_by_res_dict=None, ca_only=False):\n alphabet = 'ACDEFGHIKLMNPQRSTVWYX'\n B = len(batch)\n lengths = np.array([len(b['seq']) for b in batch], dtype=np.int32) #sum of chain seq lengths\n L_max = max([len(b['seq']) for b in batch])\n if ca_only:\n X = np.zeros([B, L_max, 1, 3])\n else:\n X = np.zeros([B, L_max, 4, 3])\n residue_idx = -100*np.ones([B, L_max], dtype=np.int32)\n chain_M = np.zeros([B, L_max], dtype=np.int32) #1.0 for the bits that need to be predicted\n pssm_coef_all = np.zeros([B, L_max], dtype=np.float32) #1.0 for the bits that need to be predicted\n pssm_bias_all = np.zeros([B, L_max, 21], dtype=np.float32) #1.0 for the bits that need to be predicted\n pssm_log_odds_all = 10000.0*np.ones([B, L_max, 21], dtype=np.float32) #1.0 for the bits that need to be predicted\n chain_M_pos = np.zeros([B, L_max], dtype=np.int32) #1.0 for the bits that need to be predicted\n bias_by_res_all = np.zeros([B, L_max, 21], dtype=np.float32)\n chain_encoding_all = np.zeros([B, L_max], dtype=np.int32) #1.0 for the bits that need to be predicted\n S = np.zeros([B, L_max], dtype=np.int32)\n omit_AA_mask = np.zeros([B, L_max, len(alphabet)], dtype=np.int32)\n # Build the batch\n letter_list_list = []\n visible_list_list = []\n masked_list_list = []\n masked_chain_length_list_list = []\n tied_pos_list_of_lists_list = []\n for i, b in enumerate(batch):\n if chain_dict != None:\n masked_chains, visible_chains = chain_dict[b['name']] #masked_chains a list of chain letters to predict [A, D, F]\n else:\n masked_chains = [item[-1:] for item in list(b) if item[:10]=='seq_chain_']\n visible_chains = []\n masked_chains.sort() #sort masked_chains \n visible_chains.sort() #sort visible_chains \n all_chains = masked_chains + visible_chains\n for i, b in enumerate(batch):\n mask_dict = {}\n a = 0\n x_chain_list = []\n chain_mask_list = []\n chain_seq_list = []\n chain_encoding_list = []\n c = 1\n letter_list = []\n global_idx_start_list = [0]\n visible_list = []\n masked_list = []\n masked_chain_length_list = []\n fixed_position_mask_list = []\n omit_AA_mask_list = []\n pssm_coef_list = []\n pssm_bias_list = []\n pssm_log_odds_list = []\n bias_by_res_list = []\n l0 = 0\n l1 = 0\n for step, letter in enumerate(all_chains):\n if letter in visible_chains:\n letter_list.append(letter)\n visible_list.append(letter)\n chain_seq = b[f'seq_chain_{letter}']\n chain_seq = ''.join([a if a!='-' else 'X' for a in chain_seq])\n chain_length = len(chain_seq)\n global_idx_start_list.append(global_idx_start_list[-1]+chain_length)\n chain_coords = b[f'coords_chain_{letter}'] #this is a dictionary\n chain_mask = np.zeros(chain_length) #0.0 for visible chains\n if ca_only:\n x_chain = np.array(chain_coords[f'CA_chain_{letter}']) #[chain_lenght,1,3] #CA_diff\n if len(x_chain.shape) == 2:\n x_chain = x_chain[:,None,:]\n else:\n x_chain = np.stack([chain_coords[c] for c in [f'N_chain_{letter}', f'CA_chain_{letter}', f'C_chain_{letter}', f'O_chain_{letter}']], 1) #[chain_lenght,4,3]\n x_chain_list.append(x_chain)\n chain_mask_list.append(chain_mask)\n chain_seq_list.append(chain_seq)\n chain_encoding_list.append(c*np.ones(np.array(chain_mask).shape[0]))\n l1 += chain_length\n residue_idx[i, l0:l1] = 100*(c-1)+np.arange(l0, l1)\n l0 += chain_length\n c+=1\n fixed_position_mask = np.ones(chain_length)\n fixed_position_mask_list.append(fixed_position_mask)\n omit_AA_mask_temp = np.zeros([chain_length, len(alphabet)], np.int32)\n omit_AA_mask_list.append(omit_AA_mask_temp)\n pssm_coef = np.zeros(chain_length)\n pssm_bias = np.zeros([chain_length, 21])\n pssm_log_odds = 10000.0*np.ones([chain_length, 21])\n pssm_coef_list.append(pssm_coef)\n pssm_bias_list.append(pssm_bias)\n pssm_log_odds_list.append(pssm_log_odds)\n bias_by_res_list.append(np.zeros([chain_length, 21]))\n if letter in masked_chains:\n masked_list.append(letter)\n letter_list.append(letter)\n chain_seq = b[f'seq_chain_{letter}']\n chain_seq = ''.join([a if a!='-' else 'X' for a in chain_seq])\n chain_length = len(chain_seq)\n global_idx_start_list.append(global_idx_start_list[-1]+chain_length)\n masked_chain_length_list.append(chain_length)\n chain_coords = b[f'coords_chain_{letter}'] #this is a dictionary\n chain_mask = np.ones(chain_length) #1.0 for masked\n if ca_only:\n x_chain = np.array(chain_coords[f'CA_chain_{letter}']) #[chain_lenght,1,3] #CA_diff\n if len(x_chain.shape) == 2:\n x_chain = x_chain[:,None,:]\n else:\n x_chain = np.stack([chain_coords[c] for c in [f'N_chain_{letter}', f'CA_chain_{letter}', f'C_chain_{letter}', f'O_chain_{letter}']], 1) #[chain_lenght,4,3] \n x_chain_list.append(x_chain)\n chain_mask_list.append(chain_mask)\n chain_seq_list.append(chain_seq)\n chain_encoding_list.append(c*np.ones(np.array(chain_mask).shape[0]))\n l1 += chain_length\n residue_idx[i, l0:l1] = 100*(c-1)+np.arange(l0, l1)\n l0 += chain_length\n c+=1\n fixed_position_mask = np.ones(chain_length)\n if fixed_position_dict!=None:\n fixed_pos_list = fixed_position_dict[b['name']][letter]\n if fixed_pos_list:\n fixed_position_mask[np.array(fixed_pos_list)-1] = 0.0\n fixed_position_mask_list.append(fixed_position_mask)\n omit_AA_mask_temp = np.zeros([chain_length, len(alphabet)], np.int32)\n if omit_AA_dict!=None:\n for item in omit_AA_dict[b['name']][letter]:\n idx_AA = np.array(item[0])-1\n AA_idx = np.array([np.argwhere(np.array(list(alphabet))== AA)[0][0] for AA in item[1]]).repeat(idx_AA.shape[0])\n idx_ = np.array([[a, b] for a in idx_AA for b in AA_idx])\n omit_AA_mask_temp[idx_[:,0], idx_[:,1]] = 1\n omit_AA_mask_list.append(omit_AA_mask_temp)\n pssm_coef = np.zeros(chain_length)\n pssm_bias = np.zeros([chain_length, 21])\n pssm_log_odds = 10000.0*np.ones([chain_length, 21])\n if pssm_dict:\n if pssm_dict[b['name']][letter]:\n pssm_coef = pssm_dict[b['name']][letter]['pssm_coef']\n pssm_bias = pssm_dict[b['name']][letter]['pssm_bias']\n pssm_log_odds = pssm_dict[b['name']][letter]['pssm_log_odds']\n pssm_coef_list.append(pssm_coef)\n pssm_bias_list.append(pssm_bias)\n pssm_log_odds_list.append(pssm_log_odds)\n if bias_by_res_dict:\n bias_by_res_list.append(bias_by_res_dict[b['name']][letter])\n else:\n bias_by_res_list.append(np.zeros([chain_length, 21]))\n\n \n letter_list_np = np.array(letter_list)\n tied_pos_list_of_lists = []\n tied_beta = np.ones(L_max)\n if tied_positions_dict!=None:\n tied_pos_list = tied_positions_dict[b['name']]\n if tied_pos_list:\n set_chains_tied = set(list(itertools.chain(*[list(item) for item in tied_pos_list])))\n for tied_item in tied_pos_list:\n one_list = []\n for k, v in tied_item.items():\n start_idx = global_idx_start_list[np.argwhere(letter_list_np == k)[0][0]]\n if isinstance(v[0], list):\n for v_count in range(len(v[0])):\n one_list.append(start_idx+v[0][v_count]-1)#make 0 to be the first\n tied_beta[start_idx+v[0][v_count]-1] = v[1][v_count]\n else:\n for v_ in v:\n one_list.append(start_idx+v_-1)#make 0 to be the first\n tied_pos_list_of_lists.append(one_list)\n tied_pos_list_of_lists_list.append(tied_pos_list_of_lists)\n\n\n \n x = np.concatenate(x_chain_list,0) #[L, 4, 3]\n all_sequence = \"\".join(chain_seq_list)\n m = np.concatenate(chain_mask_list,0) #[L,], 1.0 for places that need to be predicted\n chain_encoding = np.concatenate(chain_encoding_list,0)\n m_pos = np.concatenate(fixed_position_mask_list,0) #[L,], 1.0 for places that need to be predicted\n\n pssm_coef_ = np.concatenate(pssm_coef_list,0) #[L,], 1.0 for places that need to be predicted\n pssm_bias_ = np.concatenate(pssm_bias_list,0) #[L,], 1.0 for places that need to be predicted\n pssm_log_odds_ = np.concatenate(pssm_log_odds_list,0) #[L,], 1.0 for places that need to be predicted\n\n bias_by_res_ = np.concatenate(bias_by_res_list, 0) #[L,21], 0.0 for places where AA frequencies don't need to be tweaked\n\n l = len(all_sequence)\n x_pad = np.pad(x, [[0,L_max-l], [0,0], [0,0]], 'constant', constant_values=(np.nan, ))\n X[i,:,:,:] = x_pad\n\n m_pad = np.pad(m, [[0,L_max-l]], 'constant', constant_values=(0.0, ))\n m_pos_pad = np.pad(m_pos, [[0,L_max-l]], 'constant', constant_values=(0.0, ))\n omit_AA_mask_pad = np.pad(np.concatenate(omit_AA_mask_list,0), [[0,L_max-l]], 'constant', constant_values=(0.0, ))\n chain_M[i,:] = m_pad\n chain_M_pos[i,:] = m_pos_pad\n omit_AA_mask[i,] = omit_AA_mask_pad\n\n chain_encoding_pad = np.pad(chain_encoding, [[0,L_max-l]], 'constant', constant_values=(0.0, ))\n chain_encoding_all[i,:] = chain_encoding_pad\n\n pssm_coef_pad = np.pad(pssm_coef_, [[0,L_max-l]], 'constant', constant_values=(0.0, ))\n pssm_bias_pad = np.pad(pssm_bias_, [[0,L_max-l], [0,0]], 'constant', constant_values=(0.0, ))\n pssm_log_odds_pad = np.pad(pssm_log_odds_, [[0,L_max-l], [0,0]], 'constant', constant_values=(0.0, ))\n\n pssm_coef_all[i,:] = pssm_coef_pad\n pssm_bias_all[i,:] = pssm_bias_pad\n pssm_log_odds_all[i,:] = pssm_log_odds_pad\n\n bias_by_res_pad = np.pad(bias_by_res_, [[0,L_max-l], [0,0]], 'constant', constant_values=(0.0, ))\n bias_by_res_all[i,:] = bias_by_res_pad\n\n # Convert to labels\n indices = np.asarray([alphabet.index(a) for a in all_sequence], dtype=np.int32)\n S[i, :l] = indices\n letter_list_list.append(letter_list)\n visible_list_list.append(visible_list)\n masked_list_list.append(masked_list)\n masked_chain_length_list_list.append(masked_chain_length_list)\n\n\n isnan = np.isnan(X)\n mask = np.isfinite(np.sum(X,(2,3))).astype(np.float32)\n X[isnan] = 0.\n\n # Conversion\n pssm_coef_all = torch.from_numpy(pssm_coef_all).to(dtype=torch.float32, device=device)\n pssm_bias_all = torch.from_numpy(pssm_bias_all).to(dtype=torch.float32, device=device)\n pssm_log_odds_all = torch.from_numpy(pssm_log_odds_all).to(dtype=torch.float32, device=device)\n\n tied_beta = torch.from_numpy(tied_beta).to(dtype=torch.float32, device=device)\n\n jumps = ((residue_idx[:,1:]-residue_idx[:,:-1])==1).astype(np.float32)\n bias_by_res_all = torch.from_numpy(bias_by_res_all).to(dtype=torch.float32, device=device)\n phi_mask = np.pad(jumps, [[0,0],[1,0]])\n psi_mask = np.pad(jumps, [[0,0],[0,1]])\n omega_mask = np.pad(jumps, [[0,0],[0,1]])\n dihedral_mask = np.concatenate([phi_mask[:,:,None], psi_mask[:,:,None], omega_mask[:,:,None]], -1) #[B,L,3]\n dihedral_mask = torch.from_numpy(dihedral_mask).to(dtype=torch.float32, device=device)\n residue_idx = torch.from_numpy(residue_idx).to(dtype=torch.long,device=device)\n S = torch.from_numpy(S).to(dtype=torch.long,device=device)\n X = torch.from_numpy(X).to(dtype=torch.float32, device=device)\n mask = torch.from_numpy(mask).to(dtype=torch.float32, device=device)\n chain_M = torch.from_numpy(chain_M).to(dtype=torch.float32, device=device)\n chain_M_pos = torch.from_numpy(chain_M_pos).to(dtype=torch.float32, device=device)\n omit_AA_mask = torch.from_numpy(omit_AA_mask).to(dtype=torch.float32, device=device)\n chain_encoding_all = torch.from_numpy(chain_encoding_all).to(dtype=torch.long, device=device)\n if ca_only:\n X_out = X[:,:,0]\n else:\n X_out = X\n return X_out, S, mask, lengths, chain_M, chain_encoding_all, letter_list_list, visible_list_list, masked_list_list, masked_chain_length_list_list, chain_M_pos, omit_AA_mask, residue_idx, dihedral_mask, tied_pos_list_of_lists_list, pssm_coef_all, pssm_bias_all, pssm_log_odds_all, bias_by_res_all, tied_beta", "def compiler_output(input_ckt, hier_graph_dict, design_name:str, result_dir:pathlib.Path, pdk_dir:pathlib.Path, uniform_height=False):\n layers_json = pdk_dir / 'layers.json'\n with open(layers_json,\"rt\") as fp:\n pdk_data=json.load(fp)\n design_config = pdk_data[\"design_info\"]\n\n if not result_dir.exists():\n result_dir.mkdir()\n logger.debug(f\"Writing results in dir: {result_dir} {hier_graph_dict}\")\n input_dir = input_ckt.parents[0]\n\n verilog_tbl = { 'modules': [], 'global_signals': []}\n\n design_setup = read_setup(input_dir / (design_name + '.setup'))\n try:\n POWER_PINS = [design_setup['GND'][0],design_setup['POWER'][0]]\n except (IndexError, ValueError):\n POWER_PINS = []\n logger.info(\"Power and ground nets not found. Power grid will not be constructed.\")\n\n #read lef to not write those modules as macros\n lef_path = pathlib.Path(__file__).resolve().parent.parent / 'config'\n all_lef = read_lef(lef_path)\n logger.debug(f\"Available library cells: {', '.join(all_lef)}\")\n\n primitives = {}\n for name,member in hier_graph_dict.items():\n logger.debug(f\"Found module: {name} {member['graph'].nodes()}\")\n graph = member[\"graph\"]\n constraints = member[\"constraints\"]\n\n for const in constraints:\n if isinstance(const, constraint.GuardRing):\n primitives['guard_ring'] = {'primitive':'guard_ring'}\n\n logger.debug(f\"Reading nodes from graph: {name}\")\n for node, attr in graph.nodes(data=True):\n if 'net' in attr['inst_type']: continue\n #Dropping floating ports\n lef_name = attr['inst_type']\n\n if \"values\" in attr and (lef_name in all_lef):\n block_name, block_args = generate_lef(lef_name, attr, primitives, design_config, uniform_height)\n #block_name_ext = block_name.replace(lef_name,'')\n logger.debug(f\"Created new lef for: {block_name} {lef_name}\")\n #Multiple instances of same module\n if 'inst_copy' in attr:\n for nm in list(hier_graph_dict.keys()):\n if nm == lef_name + attr['inst_copy']:\n if block_name not in hier_graph_dict.keys():\n logger.debug('Trying to modify a dictionary while iterating over it!')\n hier_graph_dict[block_name] = hier_graph_dict.pop(nm)\n else:\n #For cells with extra parameters than current primitive naming convention\n all_lef.append(nm)\n graph.nodes[node][\"inst_type\"] = block_name\n all_lef.append(block_name)\n\n # Only unit caps are generated\n if block_name.lower().startswith('cap'):\n graph.nodes[node]['inst_type'] = block_args['primitive']\n block_args['primitive'] = block_name\n else:\n graph.nodes[node]['inst_type'] = block_name\n\n if block_name in primitives:\n if block_args != primitives[block_name]:\n logging.warning(f\"two different primitve {block_name} of size {primitives[block_name]} {block_args}got approximated to same unit size\")\n else:\n primitives[block_name] = block_args\n elif \"values\" in attr and 'inst_copy' in attr:\n member[\"graph\"].nodes[node][\"inst_type\"]= lef_name + attr[\"inst_copy\"]\n all_lef.append(block_name)\n\n else:\n logger.debug(f\"No physical information found for: {name}\")\n logger.debug(f\"generated data for {name} : {pprint.pformat(primitives, indent=4)}\")\n logger.debug(f\"All available cell generator with updates: {all_lef}\")\n for name,member in hier_graph_dict.items():\n graph = member[\"graph\"]\n logger.debug(f\"Found module: {name} {graph.nodes()}\")\n inoutpin = []\n floating_ports=[]\n if \"ports_match\" in member and member[\"ports_match\"]:\n for key in member[\"ports_match\"].keys():\n if key not in POWER_PINS:\n inoutpin.append(key)\n if member[\"ports\"]:\n logger.debug(f'Found module ports: {member[\"ports\"]} {member[\"name\"]}')\n floating_ports = set(inoutpin) - set(member[\"ports\"]) - set(design_setup['POWER']) -set(design_setup['GND'])\n if len(list(floating_ports))> 0:\n logger.error(f\"floating ports found: {name} {floating_ports}\")\n raise SystemExit('Please remove floating ports')\n else:\n inoutpin = member[\"ports\"]\n if name not in all_lef:\n\n ## Removing constraints to fix cascoded cmc\n if name not in design_setup['DIGITAL']:\n logger.debug(f\"call constraint generator writer for block: {name}\")\n stop_points = design_setup['POWER'] + design_setup['GND'] + design_setup['CLOCK']\n constraints = member[\"constraints\"]\n if name not in design_setup['NO_CONST']:\n constraints = FindConst(graph, name, inoutpin, member[\"ports_weight\"], constraints, stop_points)\n constraints = CapConst(graph, name, design_config[\"unit_size_cap\"], constraints, design_setup['MERGE_SYMM_CAPS'])\n hier_graph_dict[name] = hier_graph_dict[name].copy(\n update={'constraints': constraints}\n )\n ## Write out modified netlist & constraints as JSON\n logger.debug(f\"call verilog writer for block: {name}\")\n wv = WriteVerilog(name, inoutpin, hier_graph_dict, POWER_PINS)\n verilog_tbl['modules'].append( wv.gen_dict())\n if len(POWER_PINS)>0:\n for i, nm in enumerate(POWER_PINS):\n verilog_tbl['global_signals'].append( { 'prefix' :'global_power', 'formal' : f'supply{i}', 'actual' : nm})\n\n with (result_dir / f'{design_name}.verilog.json').open( 'wt') as fp:\n json.dump( verilog_tbl, fp=fp, indent=2)\n\n with (result_dir / f'{design_name}.v').open( 'wt') as fp:\n write_verilog( verilog_tbl, fp)\n\n logger.info(\"Completed topology identification.\")\n logger.debug(f\"OUTPUT verilog json netlist at: {result_dir}/{design_name}.verilog.json\")\n logger.debug(f\"OUTPUT verilog netlist at: {result_dir}/{design_name}.v\")\n logger.debug(f\"OUTPUT const file at: {result_dir}/{design_name}.pnr.const.json\")\n return primitives" ]
[ "0.53284836", "0.52070326", "0.50032526", "0.4924041", "0.48997957", "0.48227805", "0.4819547", "0.4782699", "0.47695082", "0.4746857", "0.4743185", "0.47308764", "0.4726845", "0.46611047", "0.46597615", "0.463954", "0.46242067", "0.46234703", "0.45942166", "0.45858887", "0.4571889", "0.45699766", "0.45352846", "0.45307684", "0.45059124", "0.44992357", "0.4491267", "0.44876143", "0.44860741", "0.4449634" ]
0.61593163
0
dijet label. supports dijet cuts, and cuts on particpating jets
def _make_dijet_label(chain_parts): assert len(chain_parts) == 1 scenario = chain_parts[0]['hypoScenario'] assert scenario.startswith('dijet') arg_res = [ re.compile(r'^(?P<lo>\d*)(?P<key>djmass)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>j1et)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>j1eta)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>j2et)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>j2eta)(?P<hi>\d*)$'), ] defaults = { 'j1et': ('100', 'inf'), 'j2et': ('100', 'inf'), 'j1eta': ('0', '320'), 'j2eta': ('0', '320'), 'djmass': ('1000', 'inf'), } args = _args_from_scenario(scenario) argvals = {} while args: assert len(args) == len(arg_res) arg = args.pop() for r in arg_res: m = r.match(arg) if m is not None: arg_res.remove(r) gd = m.groupdict() key = gd['key'] try: lo = float(gd['lo']) except ValueError: lo = defaults[key][0] argvals[key+'lo'] = lo try: hi = float(gd['hi']) except ValueError: hi = defaults[key][1] argvals[key+'hi'] = hi assert len(args) == len(arg_res) assert len(args) == 0 return """ combgen( [(2)(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f) (%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f) ] dijet( [(%(djmasslo).0fdjmass)]) simple([(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f) (%(j2etlo).0fet, %(j2etalo).0feta%(j2etahi).0f)]) )""" % argvals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_info(config, cut, label):\n cfg = filter(lambda c: c['name'] == cut, config['physics']['cuts'])[0]\n text = \"\"\n if 'max' not in cfg:\n text += \"#geq \"\n text += str(cfg['min'])\n if 'max' in cfg and cfg['max'] != cfg['min']:\n text += '-' + str(cfg['max']) + ' ' + label + 's'\n elif cfg['min'] != 1:\n text += ' ' + label + 's'\n else:\n text += ' ' + label\n return text", "def makeDPartial( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n _Kcuts1 = \"~ISMUON & (PT > %(DaugPtLoose)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2Loose)s)\" % locals()['config']\n _KcutsPIDK = \" & (PIDK > %(HighPIDK)s)\" % locals()['config']\n _Kcuts2 = \" & (ISLONG) & (P > %(DaugPLoose)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2Loose)s)\" % locals()['config']\n _Kcuts = _Kcuts1 + _KcutsPIDK + _Kcuts2\n _Picuts1 = \"~ISMUON & (PT > %(DaugPtMin)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2)s)\" % locals()['config']\n _PicutsPIDK = \" & (PIDK < %(LowPIDK)s)\" % locals()['config']\n _Picuts2 = \" & (ISLONG) & (P > %(DaugP)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2)s)\" % locals()['config']\n _Picuts = _Picuts1 + _PicutsPIDK + _Picuts2\n _dauCuts = { 'K+': _Kcuts, 'pi+': _Picuts }\n #_Kcuts1 = \"~ISMUON & (PT > 500* MeV) & (MIPCHI2DV(PRIMARY) > 4)\"\n #_KcutsPIDK = \" & (PIDK > 5)\"\n #_Kcuts2 = \" & (ISLONG) & (P > 5000* MeV) & (TRCHI2DOF < 5)\"\n #_Kcuts = _Kcuts1 + _KcutsPIDK + _Kcuts2\n #_Picuts1 = \"~ISMUON & (PT > 500* MeV) & (MIPCHI2DV(PRIMARY) > 4)\"\n #_PicutsPIDK = \" & (PIDK < 0)\"\n #_Picuts2 = \" & (ISLONG) & (P > 5000* MeV) & (TRCHI2DOF < 5)\"\n #_Picuts = _Picuts1 + _PicutsPIDK + _Picuts2\n #_dauCuts = { 'K+': _Kcuts, 'pi+': _Picuts }\n\n _combCuts = \"(APT > %(D0PtLoose)s* MeV)\" \\\n \"& (AP > %(D0P)s* MeV)\" % locals()['config']\n\n _motherCuts = \"(VFASPF(VCHI2PDOF) < %(D0VtxChi2Ndof)s)\" \\\n \"& (BPVVDCHI2 > %(D0FDChi2)s)\" % locals()['config']\n\n\n _Dminus = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = _dauCuts\n , CombinationCut = _combCuts\n , MotherCut = _motherCuts\n )\n\n return Selection( name+'Sel',\n Algorithm = _Dminus,\n RequiredSelections = inputSel\n )", "def x_group_label(\n x_gr: int, cut: int = 20, name_dict: Dict[AnyStr, AnyStr] = names_dict\n) -> AnyStr:\n name = name_dict[str(x_gr)]\n if len(name) > cut:\n return f\"{name[:cut-3]}...\"\n else:\n return name", "def assignLabels(self):\n clusters = np.arange(0, len(self.V))[self.V < self.V1] #indexes self.V, volumes_sorted, and oldOrder\n self.clusterV = self.volumes_sorted[clusters]\n clusters = self.oldOrder[clusters] #indexes volumes\n self.clusters = self.nonBI[clusters] #indexes self.vor and self.data\n self.easyLabel = np.zeros(len(self.data))\n self.easyLabel[self.clusters] = 1\n print('Out of ' + str(len(self.data)) + ' particles, ' + str(len(self.clusters)) + ' (' + str(round(len(self.clusters)*100/len(self.data), 3)) +' %) are labelled as cluster particles.')", "def __init__(self, data_cfg, pipeline_cfg, root_path, sel_index=0):\n\n super(DetRetailOneDataset, self).__init__(\n data_cfg, pipeline_cfg, root_path, sel_index\n )\n\n self.cat2label = {cat: i for i, cat in enumerate(self.class_names)}\n self.ORI_CLASSES = (\n \"asamu\",\n \"baishikele\",\n \"baokuangli\",\n \"aoliao\",\n \"bingqilinniunai\",\n \"chapai\",\n \"fenda\",\n \"guolicheng\",\n \"haoliyou\",\n \"heweidao\",\n \"hongniu\",\n \"hongniu2\",\n \"hongshaoniurou\",\n \"kafei\",\n \"kaomo_gali\",\n \"kaomo_jiaoyan\",\n \"kaomo_shaokao\",\n \"kaomo_xiangcon\",\n \"kele\",\n \"laotansuancai\",\n \"liaomian\",\n \"lingdukele\",\n \"maidong\",\n \"mangguoxiaolao\",\n \"moliqingcha\",\n \"niunai\",\n \"qinningshui\",\n \"quchenshixiangcao\",\n \"rousongbing\",\n \"suanlafen\",\n \"tangdaren\",\n \"wangzainiunai\",\n \"weic\",\n \"weitanai\",\n \"weitaningmeng\",\n \"wulongcha\",\n \"xuebi\",\n \"xuebi2\",\n \"yingyangkuaixian\",\n \"yuanqishui\",\n \"xuebi-b\",\n \"kebike\",\n \"tangdaren3\",\n \"chacui\",\n \"heweidao2\",\n \"youyanggudong\",\n \"baishikele-2\",\n \"heweidao3\",\n \"yibao\",\n \"kele-b\",\n \"AD\",\n \"jianjiao\",\n \"yezhi\",\n \"libaojian\",\n \"nongfushanquan\",\n \"weitanaiditang\",\n \"ufo\",\n \"zihaiguo\",\n \"nfc\",\n \"yitengyuan\",\n \"xianglaniurou\",\n \"gudasao\",\n \"buding\",\n \"ufo2\",\n \"damaicha\",\n \"chapai2\",\n \"tangdaren2\",\n \"suanlaniurou\",\n \"bingtangxueli\",\n \"weitaningmeng-bottle\",\n \"liziyuan\",\n \"yousuanru\",\n \"rancha-1\",\n \"rancha-2\",\n \"wanglaoji\",\n \"weitanai2\",\n \"qingdaowangzi-1\",\n \"qingdaowangzi-2\",\n \"binghongcha\",\n \"aerbeisi\",\n \"lujikafei\",\n \"kele-b-2\",\n \"anmuxi\",\n \"xianguolao\",\n \"haitai\",\n \"youlemei\",\n \"weiweidounai\",\n \"jindian\",\n \"3jia2\",\n \"meiniye\",\n \"rusuanjunqishui\",\n \"taipingshuda\",\n \"yida\",\n \"haochidian\",\n \"wuhounaicha\",\n \"baicha\",\n \"lingdukele-b\",\n \"jianlibao\",\n \"lujiaoxiang\",\n \"3+2-2\",\n \"luxiangniurou\",\n \"dongpeng\",\n \"dongpeng-b\",\n \"xianxiayuban\",\n \"niudufen\",\n \"zaocanmofang\",\n \"wanglaoji-c\",\n \"mengniu\",\n \"mengniuzaocan\",\n \"guolicheng2\",\n \"daofandian1\",\n \"daofandian2\",\n \"daofandian3\",\n \"daofandian4\",\n \"yingyingquqi\",\n \"lefuqiu\",\n )", "def _make_vbenf_label(chain_parts):\n\n # toy label for development: run simple and dijet independently.\n # simple makes Et cuts on two jets. Independently (sharing possible)\n # of jets choosean by simple, the dijet\n # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n assert scenario.startswith('vbenf')\n args = _args_from_scenario(scenario)\n if not args:\n return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' \n arg_res = [\n re.compile(r'(?P<lo>\\d*)(?P<key>fbet)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>mass)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)'),\n ]\n\n defaults = {\n 'et': ('101', 'inf'),\n 'mass': ('800', 'inf'),\n 'fbet': ('501', 'inf'),\n }\n\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n and\n (\n []\n simple\n (\n [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)]\n )\n combgen\n (\n [(10et, 0eta320)]\n dijet\n (\n [(%(masslo).0fdjmass, 26djdphi)]\n ) \n simple\n (\n [(10et, 0eta320)(20et, 0eta320)]\n )\n )\n )\"\"\" % argvals", "def create_labelled_dataset(self):\n\n print(\"-------------------------------------------------------------------\")\n print(\" How to Use the Pole Hull Label Tool\")\n print(\"-------------------------------------------------------------------\")\n print(\"- If a hull is NOT associated to a pole: press the 1 button\")\n print(\"- If a hull IS associated to a pole: press the 2 button\")\n print(\"\\n- If any other key is pressed, the program EXITS\")\n print(\"-------------------------------------------------------------------\")\n\n detector = gate_detector.GateDetector(im_resize=3.0/4)\n\n imgs = []\n labels = []\n directory = os.path.dirname(os.getcwd())\n \n # Get absolute path of all images in the images folder\n for dirpath,_,filenames in os.walk(os.path.join(directory, 'images', 'gate')):\n for f in filenames:\n imgs.append(os.path.abspath(os.path.join(dirpath, f)))\n\n # Get the hulls from the segmented image and run the display and label program for each image\n for img in imgs:\n src = cv.imread(img, 1)\n pre = detector.preprocess(src)\n seg = detector.segment(pre)\n mor = detector.morphological(seg)\n hulls = detector.create_convex_hulls(seg)\n labels += self.display_and_label_hulls(hulls, pre)\n return labels", "def setContourLabels(mode='none', ndigits=1):\n odict = {'none':'NONE', 'float':'FLOAT', 'string':'CONLAB'}\n dislin.labdig(ndigits, 'CONTUR')\n dislin.labels(odict[mode], 'CONTUR')", "def _add_labels(self):\n coords = self['pore.coords']\n self['pore.front'] = coords[:,0]<(0.1*self._Lx)\n self['pore.back'] = coords[:,0]>(0.9*self._Lx)\n self['pore.left'] = coords[:,1]<(0.1*self._Ly)\n self['pore.right'] = coords[:,1]>(0.9*self._Ly)\n self['pore.bottom'] = coords[:,2]<(0.1*self._Lz)\n self['pore.top'] = coords[:,2]>(0.9*self._Lz)\n bnds = self.pores(labels=['front','back','left','right','bottom','top'])\n self['pore.boundary'] = False\n self['pore.boundary'] = bnds", "def plot_data_assemble(self,kwargs_seg, add_mask ,img_name='data.pdf',cutout_text='lensed image',font_size=28):\n mask = self.data_mask\n image = self.raw_image\n picked_data = self.data\n selem = np.ones((add_mask, add_mask))\n img_mask = ndimage.binary_dilation(mask.astype(np.bool), selem)\n fig, (ax1, ax2, ax3,ax4) = plt.subplots(1, 4, figsize=(19, 10))\n ax1.imshow(image, origin='lower', cmap=\"gist_heat\")\n ax1.set_title('Cutout Image',fontsize =font_size)\n ax1.text(image.shape[0] * 0.2, image.shape[0] * 0.05, cutout_text,size=20, color='white',weight=\"bold\")\n ax1.axis('off')\n segments_deblend_list, xcenter, ycenter, c_index=kwargs_seg\n ax2.imshow(segments_deblend_list, origin='lower')\n for i in range(len(xcenter)):\n ax2.text(xcenter[i] * 1.1, ycenter[i], 'Seg' + repr(i), size=20,color='w',weight=\"bold\")\n ax2.text(image.shape[0] * 0.2, image.shape[0] * 0.9, 'Seg' + repr(c_index) + ' ' + 'in center',\n size=20, color='white',weight=\"bold\")\n ax2.set_title('Segmentations',fontsize =font_size)\n ax2.axis('off')\n ax3.imshow(img_mask+mask, origin='lower',cmap=\"gist_heat\")\n ax3.set_title('Selected pixels',fontsize =font_size)\n ax3.text(image.shape[0] * 0.1, image.shape[0] * 0.05, 'pixels (S/N >' + repr(self.snr) + ')',size=20, color='white',weight=\"bold\")\n ax3.text(image.shape[0] * 0.1, image.shape[0] * 0.9, 'additional pixels', size=20, color='r',weight=\"bold\")\n ax3.axis('off')\n ax4.imshow(picked_data, origin='lower',cmap=\"gist_heat\")\n ax4.set_title('Processed Image',fontsize =font_size)\n ax4.axis('off')\n plt.show()\n fig.savefig(img_name)\n return 0", "def BaseLabel(self, *args):\n return _XCAFDoc.XCAFDoc_DimTolTool_BaseLabel(self, *args)", "def main():\n # Directory where the DICOM files are being stored (in this\n input_path = './Inputs/valve'\n\n # Original image from the filepath\n img_original = read_image(input_path)\n\n # Image with smoothing applied to reduce noise\n img_smooth = sitk.CurvatureFlow(image1=img_original, timeStep=0.125, numberOfIterations=10)\n\n # Create labels on our smoothed image for cardiac tissue and tissue with blood\n labels_tissue = sitk.BinaryThreshold(image1=img_smooth, lowerThreshold=325, upperThreshold=470, insideValue=1)\n labels_blood = sitk.BinaryThreshold(image1=img_smooth, lowerThreshold=450, upperThreshold=800, insideValue=1, outsideValue=0)\n\n # IMPORTANT STEP: essentially, this is the key to our algorithm. By finding the \"blood\" without cardiac tissue,\n # and then using binary hole filling with a fairly large radius, we are able to label a lot of the mitral valve\n # area without labeling too much of the other cardiac tissue. Thus, THIS is what lets us single out the mitral\n # valve tissue from the rest - all we need is the overlap of the two labels\n labels_tissue_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_tissue, radius=[2] * 3, majorityThreshold=1, backgroundValue=0, foregroundValue=1)\n labels_blood_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_blood, radius=[4] * 3, majorityThreshold=1, backgroundValue=0, foregroundValue=1)\n labels_valve = retrieve_overlap(labels_blood_no_holes, labels_tissue_no_holes)\n labels_valve_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_valve, radius=[2] * 3, majorityThreshold=1, backgroundValue=0, foregroundValue=1)\n labels_valve_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_valve_no_holes, radius=[1] * 3, majorityThreshold=0, backgroundValue=1, foregroundValue=0)\n\n # Fix intensity scaling on our original smoothed image for pretty diagram purposes\n img_smooth = sitk.Cast(sitk.RescaleIntensity(img_smooth), labels_tissue_no_holes.GetPixelID())\n\n # Use a density-based clustering algorithm to attempt to remove as much noise as possible\n labels_valve_filtered = dbscan_filter(labels_valve_no_holes, eps=2, use_z=False)\n labels_valve_filtered = dbscan_filter(labels_valve_filtered, eps=4)\n\n # Find likely start and end points of our image by setting a mininum number of labeled pixels\n start, end = filter_by_label_count(labels_valve_filtered, 10)\n img_smooth = img_smooth[:, :, start:end]\n labels_valve_filtered = labels_valve_filtered[:, :, start:end]\n\n # Remove all values distant from the center of our starting location by taking advantage of kmeans\n df = get_df_from_img(labels_valve_filtered[:, :, 0], dimensions=2)\n x_mid = df['x'].mean()\n y_mid = df['y'].mean()\n df = get_df_from_img(labels_valve_filtered)\n distance_df = df.drop('z', axis=1)\n distance_df['x_dist'] = abs(distance_df['x'] - x_mid)\n distance_df['y_dist'] = abs(distance_df['y'] - y_mid)\n fit = cluster.KMeans(n_clusters=2).fit(distance_df.drop(['x', 'y'], axis=1))\n labels = fit.labels_\n df['label'] = pd.Series(labels)\n counts = df['label'].value_counts().to_dict()\n largest_cluster = max(counts.iterkeys(), key=(lambda key: counts[key]))\n update_img_from_df(df, labels_valve_filtered, keep=largest_cluster)\n\n # Find likely start and end points of our image by setting a mininum number of labeled pixels\n start, end = filter_by_label_count(labels_valve_filtered, 10)\n img_smooth = img_smooth[:, :, start:end]\n labels_valve_filtered = labels_valve_filtered[:, :, start:end]\n\n # Use a segmentation-based clustering algorithm to attempt to find each valve\n label_segments, x_max = kmeans_segment(labels_valve_filtered, use_z=False)\n\n left, right = (label_segments[0], label_segments[1])\n if x_max[0] > x_max[1]:\n left, right = right, left\n\n # Finally, we can simply take the furthest point from the likely start/end points in order to get our annulus\n # this can be done by every z value\n left_points = {'x': [], 'y': [], 'z': []}\n right_points = {'x': [], 'y': [], 'z': []}\n zlen = len(sitk.GetArrayFromImage(left))\n for z in xrange(zlen):\n left_df = get_df_from_img(left[:, :, z], dimensions=2)\n if len(left_df['y']) > 0:\n index = left_df['y'].idxmin()\n row = left_df.iloc[index]\n left_points['x'].append(int(row['x']))\n left_points['y'].append(int(row['y']))\n left_points['z'].append(z)\n\n right_df = get_df_from_img(right[:, :, z], dimensions=2)\n if len(right_df['x']) > 0:\n index = right_df['x'].idxmax()\n row = right_df.iloc[index]\n right_points['x'].append(int(row['x']))\n right_points['y'].append(int(row['y']))\n right_points['z'].append(z)\n\n # These both represent the coordinates of our annulus ring. A simple spline can be used for interpolation between\n # points\n final_left = pd.DataFrame.from_dict(left_points)\n final_right = pd.DataFrame.from_dict(right_points)\n print('Coordinates for one side of the ring')\n print(final_left)\n print('\\n\\nCoordinates for the other side of the ring')\n print(final_right)\n\n final_image = make_empty_img_from_img(left)\n x = left_points['x'] + right_points['x']\n y = left_points['y'] + right_points['y']\n z = left_points['z'] + right_points['z']\n for x, y, z in zip(x, y, z):\n final_image.SetPixel(x, y, z, 1)\n\n show_all(img_smooth, final_image)", "def label(self, cfg):\n rep = \"\"\n nl = \"\"\n for node in cfg.nodes:\n rep += nl + \"{}\\tgen={}\\tkill={}\\tout={}\".format(\n node, \n set(self.gen.get(node)),\n set(self.kill.get(node)),\n set(self.out.get(node)))\n nl = \"\\n\"\n return rep", "def test_get_dim_label_with_label(self):\n\n dim = self.oecd_datasets['oecd']['dimension']['id'][0]\n dims_df = pyjstat.get_dim_label(self.oecd_datasets['oecd'], dim)\n self.assertTrue(dims_df.iloc[0]['id'] == 'UNR')\n self.assertTrue(dims_df.iloc[-1]['label'] == 'Unemployment rate')", "def LabelDisks(self):\n pass", "def visualize_detection(self, img, dets, seg, classes=[], thresh=0.6):\n from dataset.cs_labels import labels\n lut = np.zeros((256,3))\n for l in labels:\n if l.trainId<255 and l.trainId>=0:\n lut[l.trainId,:]=list(l.color)\n palette = lut\n # det2seg = {0:6,1:7,2:11,3:12,4:13,5:14,6:15,7:16,8:17,9:18,}\n det2seg = {0:11,1:12,2:13,3:14,4:15,5:16,6:17,7:18,}\n \n import cv2\n import random\n tic = time.time()\n color_white = (255, 255, 255)\n im = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # change to bgr\n yscale, xscale, ch = im.shape\n color = (0,0,128)\n fontFace = cv2.FONT_HERSHEY_PLAIN\n fontScale = .8*(yscale/float(320))\n thickness = 2 if yscale>320 else 1\n idx = np.argsort(dets[:,6],axis=0)[::-1] ## draw nearest first !!\n dets = dets[idx,:]\n for det in dets:\n cls_id = int(det[0])\n bbox = [det[2]*xscale,det[3]*yscale,det[4]*xscale,det[5]*yscale]\n score = det[1]\n distance = det[-1]\n if score > thresh:\n bbox = map(int, bbox)\n color = palette[det2seg[int(det[0])],(2,1,0)]\n cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=thickness)\n text = '%s %.0fm' % (short_class_name[classes[cls_id]], distance*255., )\n textSize, baseLine = cv2.getTextSize(text, fontFace, fontScale, thickness=1)\n cv2.rectangle(im, (bbox[0], bbox[1]-textSize[1]), (bbox[0]+textSize[0], bbox[1]), color=(128,0,0), thickness=-1)\n cv2.putText(im, text, (bbox[0], bbox[1]),\n color=color_white, fontFace=fontFace, fontScale=fontScale, thickness=1)\n disp = im.copy()\n if False: #disp.shape[1]>1000:\n hh, ww, ch = disp.shape\n resized = cv2.resize(disp, (int(round(ww*.92)),int(round(hh*.92))))\n else:\n resized = disp\n cv2.imshow(\"result\", resized)\n # cv2.imwrite(\"data/cityscapes/Results/stuttgart_%06d.png\" % (self.imgidx,), resized)\n # self.imgidx += 1", "def kohonen():\n# plb.close('all')\n \n dim = 28*28\n data_range = 255.0\n \n # load in data and labels \n data = np.array(np.loadtxt('data.txt'))\n labels = np.loadtxt('labels.txt')\n\n # select 4 digits \n name = \"Stettler\"\n targetdigits = name2digits(name) # assign the four digits that should be used\n print(targetdigits) # output the digits that were selected\n\n # this selects all data vectors that corresponds to one of the four digits\n data = data[np.logical_or.reduce([labels==x for x in targetdigits]),:]\n \n dy, dx = data.shape\n \n #set the size of the Kohonen map. In this case it will be 6 X 6\n size_k = 6\n \n #set the width of the neighborhood via the width of the gaussian that\n #describes it\n sigma = 2.0\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n\n #set the learning rate\n eta = 0.9 # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE\n \n #set the maximal iteration count\n tmax = 5000 # this might or might not work; use your own convergence criterion\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n for t, i in enumerate(i_random):\n som_step(centers, data[i,:],neighbor,eta,sigma)\n\n # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw()", "def convert_kicad_coor(edif_pt):\n scale = 10\n return [edif_pt[0] * scale, +edif_pt[1] * scale]", "def create_dimension_labels(gll, parameters: list):\n dimstr = '[ ' + ' | '.join(parameters) + ' ]'\n gll['MODEL/data'].dims[0].label = 'element'\n gll['MODEL/data'].dims[1].label = dimstr\n gll['MODEL/data'].dims[2].label = 'point'", "def add_hdv(self, ROI_id, type_hdv='cum', checkbox_mode=False):\n\n appartenance_contourage = self.dicom_navigation.slice.get_appartenance_contourage(ROI_id)\n \n contourage = Contourage_from_matrice(appartenance_contourage, ROI_id) # On crée un objet 'Contourage_from_matrice' à partir du de la matrice booléenne\n\n dose_matrix = self.dicom_navigation.slice.get_dose_matrix()\n\n # Cas ou on ajoute pour la premiere fois un contourage\n if dose_matrix is None:\n return\n \n doses = Doses_from_matrice(dose_matrix) # On crée un objet 'Doses_from_matrice' à partir de la matrice de doses mise à jour\n\n var = tk.StringVar() # À VENIR... VARIABLE D'ÉTAT QUI INDIQUE SI ON EST EN MODE 'VOLUME RELATF' OU 'VOLUME ABSOLU'. CODÉ EN DUR POUR LE MOMENT\n var.set('r')\n\n self.ddc = Doses_dans_contourage(doses, contourage) # Triage des doses qui sont dans le contourage.\n\n if self.ddc.dose_max == 0: # Si la dose max est 0, on sait qu'on est à l'extérieur de la zone réduite. *** \n return\n\n if not ROI_id in self.dict_graph: \n self.dict_graph[ROI_id] = {} \n self.dict_plot[ROI_id] = {} \n self.dict_doses_max[ROI_id] = {} \n if self.dicom_navigation.var_etat_abs_rel.get() == 'a':\n self.dict_volumes_max[ROI_id] = {} \n\n self.dict_doses_max[ROI_id][type_hdv] = self.ddc.dose_max\n\n ###\n\n if self.dicom_navigation.var_etat_abs_rel.get() == 'r': # si on est en mode 'volume relatif', le range des axes sera définit différemment\n facteur = 100.0/self.ddc.nb_voxels # comme l'instance 'axe_volume' créée par les classes hdv_cumulatif et hdv_differentiel contient des données en NOMBRE DE VOXELS\n # (et non en pourcentage ou en volume réel), il faut multiplier ces données par le facteur de conversion approprié (il dépend\n # de si l'on est en mode 'relatf' ou 'absolu').\n\n if self.dicom_navigation.var_etat_abs_rel.get() == 'a': # si on est en mode 'volume absolu'.\n facteur = self.ddc.v_voxel\n self.dict_volumes_max[ROI_id][type_hdv] = self.ddc.v_voxel * self.ddc.nb_voxels \n self.y_lim = get_max_2D_dic(self.dict_volumes_max)\n\n ###\n\n if type_hdv == 'cum':\n hdv = HDV_cumulatif(self.ddc, 100)\n\n if type_hdv == 'diff':\n hdv = HDV_differentiel(self.ddc, 50)\n\n\n self.dict_graph[ROI_id][type_hdv] = hdv\n self.dict_plot[ROI_id][type_hdv], = self.fig.plot(hdv.axe_doses, facteur * hdv.axe_volume)\n\n ###\n\n self.x_lim = get_max_2D_dic(self.dict_doses_max) \n\n self.fig.set_xlim([0, 1.02*self.x_lim]) # dimension de l'axe des x\n self.fig.set_ylim([0, 1.02*self.y_lim]) # dimension de l'axe des y\n\n # Contraintes\n if self.got_contraintes and type_hdv == 'cum': # 'got_contraintes' SERA INITALISÉE À 'TRUE' LORSQUE L'ON AURA RÉCUPÉRÉ LE FICHIER DE CONTRAINTES\n self.dicom_navigation.get_dicom_contraintes().verifier_contraintes_sur_une_ROI(ROI_id)\n\n # Modifier\n if checkbox_mode:\n self.refresh_HDV()", "def instance_label(task, pred, k=15, n_iters=1, dist_thresh=5, watershed=False):\n mask = pred\n\n # noise removal\n if k > 1 and n_iters > 0:\n kernel = np.ones((k, k), np.uint8)\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel,\n iterations=n_iters)\n\n if watershed:\n from clab.live import filters\n mask = filters.watershed_filter(mask, dist_thresh=dist_thresh)\n\n mask = mask.astype(np.uint8)\n n_ccs, cc_labels = cv2.connectedComponents(mask, connectivity=4)\n return cc_labels", "def old_ideal_label(I):\n a, c, d = ideal_HNF(I)\n return \"%s.%s.%s\" % (a * d, c, d)", "def setContourLabelString(text=''):\n dislin.conlab(text)", "def label(cmd):\r\n cmd = cmd.replace('make][.DP', 'make1][.NP')\r\n cmd = cmd.replace('make][.SC', 'make2][.SC')\r\n cmd = re.sub('(draw.*)one','\\\\1one1',cmd)\r\n cmd = re.sub('(make1.*)one','\\\\1one1',cmd)\r\n cmd = re.sub('(make2.*)one','\\\\1one2',cmd)\r\n cmd = re.sub('(move.*)one','\\\\1one2',cmd)\r\n cmd = re.sub('(hide.*)one','\\\\1one2',cmd)\r\n cmd = '[result ' + cmd + ']' #dummy function for plop\r\n return cmd", "def label(gt_dataset, volume_dim, voxel_dim, labeling_params):\n labeled_volumes = dict()\n labeled_cells = dict()\n #Use global density and reduce the size of gt_dataset here\n global_density = labeling_params[\"global_density\"]\n gt_dataset = {k: v for k,v in gt_dataset.items() if random_sample() < global_density}\n #Label in the order specified in the configuration\n layers = sorted(labeling_params.keys())\n #Remove global_density\n layers.remove(\"global_density\")\n for layer in layers:\n print \"Labeling {}\".format(layer)\n fluorophore = labeling_params[layer]['fluorophore']\n volume, cells = brainbow(gt_dataset, volume_dim, voxel_dim, **labeling_params[layer])\n if fluorophore in labeled_volumes:\n labeled_volumes[fluorophore] += volume\n labeled_cells[fluorophore] |= cells\n else:\n labeled_volumes[fluorophore] = volume\n labeled_cells[fluorophore] = cells\n return labeled_volumes, labeled_cells", "def make_data_label(self):\n data_label = \"\"\n if self.detector is not None:\n data_label += \"%s \"%self.detector\n if self.selection is not None:\n data_label += \"%s Event Selection\"%self.selection\n if data_label == \"\":\n data_label = \"IceCube\"\n return data_label", "def write_label(self, contig_name, width, height, font, title_width, upper_left, vertical_label,\n strand, canvas, horizontal_centering=False, center_vertical=False, chop_text=True,\n label_color=(50, 50, 50, 255)):\n upper_left = list(upper_left) # to make it mutable\n shortened = contig_name[-title_width:] # max length 18. Last characters are most unique\n txt = Image.new('RGBA', (width, height))#, color=(0,0,0,50))\n txt_canvas = ImageDraw.Draw(txt)\n text_width = txt_canvas.textsize(shortened, font)[0]\n if not chop_text and text_width > width:\n txt = Image.new('RGBA', (text_width, height)) # TODO performance around txt_canvas\n txt_canvas = ImageDraw.Draw(txt)\n if center_vertical or vertical_label: # Large labels are centered in the column to look nice,\n # rotation indicates strand in big text\n vertically_centered = (height // 2) - multi_line_height(font, shortened, txt)//2\n else: # Place label at the beginning of gene based on strand\n vertically_centered = height - multi_line_height(font, shortened, txt) # bottom\n if strand == \"+\":\n vertically_centered = 0 # top of the box\n txt_canvas.multiline_text((0, max(0, vertically_centered)), shortened, font=font,\n fill=label_color)\n if vertical_label:\n rotation_direction = 90 if strand == '-' else -90\n txt = txt.rotate(rotation_direction, expand=True)\n upper_left[1] += -4 if strand == '-' else 4\n if horizontal_centering:\n margin = width - text_width\n upper_left[0] += margin // 2\n canvas.paste(txt, (upper_left[0], upper_left[1]), txt)", "def convert_medical_decathlon_labels(mask, cohort, keep_all_label=False):\n label = 12*[0]\n if keep_all_label:\n label += [0,0]\n \n if cohort == 'liver':\n mask[mask == 2] = 6\n mask[mask == 1] = 6\n label[6] = 1\n \n elif cohort == 'pancreas':\n mask[mask == 2] = 11\n mask[mask == 1] = 11\n label[11] = 1\n\n elif cohort == 'spleen':\n mask[mask != 1] = 0\n label[1] = 1 \n\n elif cohort == 'hepatic':\n mask[mask == 2] = 0\n mask[mask == 1] = 0\n \n return mask, label", "def get_labels(self):\n if self.option == \"term\":\n return ['platform characteristics', 'atmospheric winds', 'radio wave','weather events', 'geomagnetism', 'atmospheric electricity','microwave', 'atmospheric temperature', 'atmospheric water vapor','atmospheric pressure', 'aerosols', 'atmospheric radiation','atmospheric chemistry', 'precipitation', 'sensor characteristics','radar', 'infrared wavelengths', 'visible wavelengths','weather/climate advisories', 'clouds', 'lidar', 'ocean optics','ultraviolet wavelengths', 'cryospheric indicators','land use/land cover', 'topography', 'surface thermal properties','spectral/engineering', 'soils', 'snow/ice', 'geothermal dynamics','natural hazards', 'surface water', 'vegetation','land surface/agriculture indicators','gravity/gravitational field', 'marine advisories', 'altitude','water quality/water chemistry', 'ocean temperature','ocean winds', 'atmospheric/ocean indicators', 'coastal processes','erosion/sedimentation', 'marine sediments', 'ocean chemistry','salinity/density', 'ocean color', 'aquatic ecosystems','vegetation2', 'landscape', 'cloud properties','surface radiative properties', 'geodetics','agricultural plant science', 'forest science','ecological dynamics', 'environmental impacts', 'sustainability','boundaries', 'ecosystems', 'air quality', 'population','infrastructure', 'environmental governance/management','public health', 'economic resources', 'socioeconomics','environmental vulnerability index (evi)', 'human settlements','agricultural chemicals', 'animal science','habitat conversion/fragmentation', 'animals/vertebrates','earth gases/liquids', 'rocks/minerals/crystals','social behavior', 'ground water', 'frozen ground','terrestrial hydrosphere indicators', 'ocean heat budget','biospheric indicators', 'animal commodities', 'fungi', 'plants','carbon flux', 'geomorphic landforms/processes','paleoclimate indicators', 'ocean circulation', 'sea ice','geochemistry', 'visualization/image processing','subsetting/supersetting', 'transformation/conversion','ocean pressure', 'glaciers/ice sheets', 'protists','solar activity', 'sun-earth interactions','sea surface topography', 'solar energetic particle properties','solar energetic particle flux','ionosphere/magnetosphere dynamics']\n elif self.option == \"mostdepth\":\n return ['flight data logs', 'turbulence', 'radio wave flux', 'lightning', 'magnetic field', 'atmospheric conductivity', 'electric field', 'data synchronization time', 'brightness temperature', 'vertical profiles', 'water vapor profiles', 'air temperature', 'upper level winds', 'atmospheric pressure measurements', 'upper air temperature', 'humidity', 'dew point temperature', 'aerosol particle properties', 'emissivity', 'trace gases/trace species', 'liquid precipitation', 'cloud liquid water/ice', 'microwave radiance', 'sensor counts', 'total pressure', 'airspeed/ground speed', 'total temperature', 'static pressure', 'wind speed', 'wind direction', 'radar reflectivity', 'doppler velocity', 'infrared imagery', 'visible imagery', 'water vapor', 'vertical wind velocity/speed', 'aerosol backscatter', 'weather forecast', 'tropical cyclones', 'visible radiance', 'infrared radiance', 'total precipitable water', 'boundary layer temperature', 'atmospheric temperature indices', 'cloud height', 'flight level winds', 'cloud droplet distribution', 'cloud droplet concentration/size', 'cloud condensation nuclei', 'cloud microphysics', 'hydrometeors', 'ozone', 'wind profiles', 'cloud base temperature', 'cloud base height', 'liquid water equivalent', 'solar radiation', 'planetary boundary layer height', 'surface winds', 'precipitation amount', 'precipitation rate', 'surface pressure', 'rain', 'cloud optical depth/thickness', 'aerosol extinction', 'aerosol optical depth/thickness', 'cirrus cloud systems', 'lidar depolarization ratio', 'radar backscatter', 'radar cross-section', 'return power', 'mean radial velocity', 'radiance', 'air quality', 'climate advisories', 'atmospheric emitted radiation', 'optical depth/thickness', 'surface temperature', 'ultraviolet flux', 'spectrum width', 'microwave imagery', 'lidar backscatter', 'relative humidity', 'u/v wind components', 'wind speed/wind direction', 'radar imagery', 'snow depth', 'land use/land cover classification', 'digital elevation/terrain model (dem)', 'snow', 'droplet size', 'droplet concentration/size', 'drizzle', 'precipitation anomalies', 'snow water equivalent', 'solid precipitation', 'total surface precipitation rate', 'particle size distribution', 'skin temperature', 'attitude characteristics', 'land surface temperature', 'hail', 'reflectance', 'soil moisture/water content', 'soil temperature', 'soil bulk density', 'surface roughness', 'present weather', 'snow density', 'ambient temperature', 'aerosol forward scatter', 'floods', 'snow cover', 'sigma naught', 'precipitable water', 'stage height', 'rivers/streams', 'shortwave radiation', 'photosynthetically active radiation', 'longwave radiation', 'net radiation', 'hourly precipitation amount', '24 hour precipitation amount', 'soil moisture', 'satellite orbits/revolution', 'sea surface temperature', 'heat flux', 'latent heat flux', 'cloud fraction', '3 and 6 hour precipitation amount', 'geopotential height', 'particulate matter', 'particle images', 'water vapor indices', 'horizontal wind velocity/speed', 'electrical conductivity', 'dissolved carbon dioxide', 'hurricanes', 'tropical cyclone track', 'convective clouds/systems (observed/analyzed)', 'cloud top height', 'viewing geometry', 'temperature profiles', 'vertical wind shear', 'wind shear', 'carbon monoxide', 'sea level pressure', 'water vapor tendency', 'potential temperature', 'angstrom exponent', 'ultraviolet radiation', 'solar irradiance', 'scattering', 'absorption', 'water vapor mixing ratio profiles', 'sea surface temperature indices', 'extreme eastern tropical pacific sst', 'sedimentation', 'erosion', 'sediment transport', 'sediments', 'tropopause', 'ocean chemistry', 'ocean optics', 'ocean temperature', 'salinity/density', 'pigments', 'ocean color', 'attenuation/transmission', 'inorganic carbon', 'organic carbon', 'photosynthetically available radiation', 'chlorophyll', 'optical depth', 'fluorescence', 'vegetation index', 'gelbstoff', 'phytoplankton', 'vegetation index2', 'cloud precipitable water', 'landscape ecology', 'ultraviolet radiance', 'cloud ceiling', 'aerosol radiance', 'carbonaceous aerosols', 'dust/ash/smoke', 'nitrate particles', 'organic particles', 'sulfate particles', 'radiative flux', 'transmittance', 'atmospheric stability', 'cloud asymmetry', 'cloud frequency', 'cloud top pressure', 'cloud top temperature', 'cloud vertical distribution', 'cloud emissivity', 'cloud radiative forcing', 'cloud reflectance', 'rain storms', 'reflected infrared', 'thermal infrared', 'incoming solar radiation', 'clouds', 'cloud properties', 'cloud types', 'orbital characteristics', 'sensor characteristics', 'maximum/minimum temperature', 'condensation', 'platform characteristics', 'geolocation', 'geodetics', 'coordinate reference system', 'aerosols', 'topographical relief maps', 'terrain elevation', 'normalized difference vegetation index (ndvi)', 'infrared flux', 'visible flux', 'albedo', 'land use/land cover', 'topography', 'lidar', 'lidar waveform', 'plant phenology', 'vegetation cover', 'crop/plant yields', 'land use classes', 'landscape patterns', 'forest harvesting and engineering', 'forest management', 'total surface water', 'agricultural plant science', 'photosynthesis', 'primary production', 'leaf characteristics', 'evapotranspiration', 'fire occurrence', 'surface thermal properties', 'canopy characteristics', 'evergreen vegetation', 'crown', 'deciduous vegetation', 'anisotropy', 'fire ecology', 'biomass burning', 'wildfires', 'topographical relief', 'burned area', 'surface radiative properties', 'environmental sustainability', 'boundaries', 'anthropogenic/human influenced ecosystems', 'emissions', 'sulfur dioxide', 'population', 'infrastructure', 'environmental assessments', 'public health', 'conservation', 'agriculture production', 'administrative divisions', 'economic resources', 'socioeconomics', 'lake/pond', 'rivers/stream', 'political divisions', 'environmental vulnerability index (evi)', 'ecosystems', 'urban areas', 'sustainability', 'treaty agreements/results', 'human settlements', 'population estimates', 'nitrogen dioxide', 'cropland', 'pasture', 'particulates', 'cyclones', 'mortality', 'environmental impacts', 'droughts', 'earthquakes', 'population distribution', 'fertilizers', 'animal manure and waste', 'urbanization/urban sprawl', 'landslides', 'avalanche', 'urban lands', 'mangroves', 'volcanic eruptions', 'pesticides', 'population size', 'population density', 'lakes/reservoirs', 'surface water', 'rural areas', 'infant mortality rates', 'amphibians', 'mammals', 'carbon', 'sulfur oxides', 'methane', 'non-methane hydrocarbons/volatile organic compounds', 'nitrogen oxides', 'natural gas', 'coal', 'coastal elevation', 'biodiversity functions', 'nuclear radiation exposure', 'radiation exposure', 'poverty levels', 'malnutrition', 'wetlands', 'sea level rise', 'vulnerability levels/index', 'ground water', 'snow/ice', 'electricity', 'energy production/use', 'sustainable development', 'deforestation', 'household income', 'discharge/flow', 'hydropattern', 'nitrogen', 'phosphorus', 'carbon dioxide', 'alpine/tundra', 'forests', 'vegetation', 'permafrost', 'nutrients', 'plant characteristics', 'leaf area index (lai)', 'soil gas/air', 'ammonia', 'nitrous oxide', 'ecosystem functions', 'litter characteristics', 'soil chemistry', 'soil respiration', 'active layer', 'soil depth', 'cation exchange capacity', 'organic matter', 'soil porosity', 'soil texture', 'permafrost melt', 'land subsidence', 'freeze/thaw', 'surface water features', 'chlorinated hydrocarbons', 'methyl bromide', 'methyl chloride', 'molecular hydrogen', 'sulfur compounds', 'fire models', 'biomass', 'dominant species', 'vegetation species', 'sulfur', 'tree rings', 'soil classification', 'heat index', 'sea ice concentration', 'ocean heat budget', 'reforestation', 'even-toed ungulates', 'species recruitment', 'population dynamics', 'range changes', 'topographic effects', 'land resources', 'river ice depth/extent', 'snow melt', 'river ice', 'animal commodities', 'animal ecology and behavior', 'phenological changes', 'water depth', 'inundation', 'forest fire science', 'biogeochemical cycles', 'radiative forcing', 'soil heat budget', 'drainage', 'respiration rate', 'river/lake ice breakup', 'river/lake ice freeze', 'reclamation/revegetation/restoration', 'permafrost temperature', 'indigenous/native species', 'fire dynamics', 'lichens', 'plants', 'plant succession', 'carbon flux', 'coastal', 'salt marsh', 'degradation', 'altitude', 'carbon and hydrocarbon compounds', 'halocarbons and halogens', 'forest composition/vegetation structure', 'water vapor indicators', 'barometric altitude', 'atmospheric water vapor', 'terrestrial ecosystems', 'volatile organic compounds', 'boundary layer winds', 'forest fire danger index', 'periglacial processes', 'landscape processes', 'evaporation', 'soil horizons/profile', 'shrubland/scrub', 'soil ph', 'soils', 'soil water holding capacity', 'community structure', 'pingo', 'soil color', 'virtual temperature', 'formaldehyde', 'hydroxyl', 'photolysis rates', 'cloud dynamics', 'nitric oxide', 'molecular oxygen', 'smog', 'peroxyacyl nitrate', 'hydrogen compounds', 'nitrogen compounds', 'oxygen compounds', 'stable isotopes', 'chemical composition', 'actinic flux', 'tropospheric ozone', 'fossil fuel burning', 'industrial emissions', 'denitrification rate', 'sunshine', 'runoff', 'soil structure', 'mosses/hornworts/liverworts', 'peatlands', 'hydraulic conductivity', 'snow/ice temperature', 'vegetation water content', 'discharge', 'chlorophyll concentrations', 'outgoing longwave radiation', 'geomorphic landforms/processes', 'soil compaction', 'soil impedance', 'canopy transmittance', 'water table', 'decomposition', 'water temperature', 'dissolved gases', 'total dissolved solids', 'agricultural expansion', 'forest science', 'pressure tendency', 'visibility', 'biomass dynamics', 'agricultural lands', 'grasslands', 'savannas', 'grazing dynamics/plant herbivory', 'herbivory', 'paleoclimate reconstructions', 'drought indices', 'fire weather index', 'animal yields', 'multivariate enso index', 'dissolved solids', 'ocean currents', 'salinity', 'coastal processes', 'atmospheric pressure', 'afforestation/reforestation', 'fresh water river discharge', 'surface water chemistry', 'drainage basins', 'resource development site', 'dunes', 'flood plain', 'endangered species', 'precipitation indices', 'temperature indices', 'forest yields', 'stratigraphic sequence', 'freeze/frost', 'frost', 'hydrogen cyanide', 'land management', 'nutrient cycling', 'industrialization', 'suspended solids', 'deserts', 'weathering', 'gas flaring', 'atmospheric temperature', 'ice extent', 'fraction of absorbed photosynthetically active radiation (fapar)', 'marshes', 'swamps', 'lake ice', 'atmospheric winds', 'watershed characteristics', 'transportation', 'soil rooting depth', 'isotopes', 'cultural features', 'consumer behavior', 'boundary surveys', 'aquifers', 'land productivity', 'water quality/water chemistry', 'sediment composition', 'dissolved oxygen', 'surface water processes/measurements', 'turbidity', 'conductivity', 'ph', 'calcium', 'magnesium', 'potassium', 'micronutrients/trace elements', 'social behavior', 'sulfate', 'sediment chemistry', 'biogeochemical processes', 'water ion concentrations', 'cropping systems', 'percolation', 'groundwater chemistry', 'reforestation/revegetation', 'species/population interactions', 'soil infiltration', 'alkalinity', 'soil fertility', 'phosphorous compounds', 'radioisotopes', 'cooling degree days', 'angiosperms (flowering plants)', 'glacial landforms', 'glacial processes', 'contour maps', 'estuaries', 'methane production/use', 'natural gas production/use', 'petroleum production/use', 'visualization/image processing', 'subsetting/supersetting', 'transformation/conversion', 'forest mensuration', 'acid deposition', 'differential pressure', 'precipitation', 'marine ecosystems', 'consumption rates', 'radio wave', 'soil organic carbon (soc)', 'soil erosion', 'halocarbons', 'trace elements/trace metals', 'biomass energy production/use', 'riparian wetlands', 'soil consistence', 'snow stratigraphy', 'thermal conductivity', 'estuary', 'tidal height', 'plant diseases/disorders/pests', 'layered precipitable water', 'atmospheric chemistry', 'water vapor concentration profiles', 'specific humidity', 'total runoff', 'pressure thickness', 'wind stress', 'atmospheric heating', 'conduction', 'hydrogen chloride', 'nitric acid', 'radar', 'land surface/agriculture indicators', 'satellite soil moisture index', 'chlorine nitrate', 'chlorofluorocarbons', 'dinitrogen pentoxide', 'antenna temperature', 'glaciers', 'ice sheets', 'dimethyl sulfide', 'potential vorticity', 'ice fraction', 'atmospheric radiation', 'runoff rate', 'temperature tendency', 'wind dynamics', 'wind direction tendency', 'base flow', 'bromine monoxide', 'chlorine monoxide', 'methyl cyanide', 'hypochlorous acid', 'methanol', 'hydroperoxy', 'cloud base pressure', 'temperature anomalies', 'nitrate', 'ocean mixed layer', 'precipitation trends', 'temperature trends', 'convection', 'ground ice', 'oxygen', 'phosphate', 'solar induced fluorescence', 'chlorine dioxide', 'sun-earth interactions', 'uv aerosol index', 'volcanic activity', 'potential evapotranspiration', 'ultraviolet wavelengths', 'ice temperature', 'sea surface skin temperature', 'sea surface height', 'sublimation', 'convective surface precipitation rate', 'hydrogen fluoride', 'airglow', 'energy deposition', 'x-ray flux', 'electron flux', 'proton flux', 'magnetic fields/magnetic currents']\n else:\n return ['platform characteristics', 'atmospheric winds','radio wave', 'weather events', 'geomagnetism','atmospheric electricity', 'microwave', 'atmospheric temperature','atmospheric water vapor', 'atmospheric pressure', 'aerosols','atmospheric radiation', 'atmospheric chemistry', 'precipitation','sensor characteristics', 'radar', 'infrared wavelengths','visible wavelengths', 'weather/climate advisories', 'clouds','lidar', 'ocean optics', 'ultraviolet wavelengths','cryospheric indicators', 'land use/land cover', 'topography','surface thermal properties', 'spectral/engineering', 'soils','snow/ice', 'geothermal dynamics', 'natural hazards','surface water', 'vegetation','land surface/agriculture indicators','gravity/gravitational field', 'marine advisories', 'altitude','water quality/water chemistry', 'ocean temperature','ocean winds', 'atmospheric/ocean indicators', 'coastal processes','erosion/sedimentation', 'marine sediments', 'ocean chemistry','salinity/density', 'ocean color', 'aquatic ecosystems','vegetation2', 'landscape', 'cloud properties','surface radiative properties', 'geodetics','agricultural plant science', 'forest science','ecological dynamics', 'environmental impacts', 'sustainability','boundaries', 'ecosystems', 'air quality', 'population','infrastructure', 'environmental governance/management','public health', 'economic resources', 'socioeconomics','environmental vulnerability index (evi)', 'human settlements','agricultural chemicals', 'animal science','habitat conversion/fragmentation', 'animals/vertebrates','earth gases/liquids', 'rocks/minerals/crystals','social behavior', 'ground water', 'frozen ground','terrestrial hydrosphere indicators', 'ocean heat budget','biospheric indicators', 'animal commodities', 'fungi', 'plants','carbon flux', 'geomorphic landforms/processes','paleoclimate indicators', 'ocean circulation', 'sea ice','geochemistry', 'visualization/image processing','subsetting/supersetting', 'transformation/conversion','ocean pressure', 'glaciers/ice sheets', 'protists','solar activity', 'sun-earth interactions','sea surface topography', 'solar energetic particle properties','solar energetic particle flux','ionosphere/magnetosphere dynamics','flight data logs','wind dynamics', 'radio wave flux', 'lightning', 'magnetic field','atmospheric conductivity', 'electric field','data synchronization time', 'brightness temperature','upper air temperature', 'water vapor profiles','surface temperature', 'upper level winds','atmospheric pressure measurements', 'water vapor indicators','aerosol particle properties', 'emissivity','trace gases/trace species', 'liquid precipitation','cloud microphysics', 'microwave radiance', 'sensor counts','total pressure', 'airspeed/ground speed', 'total temperature','static pressure', 'humidity', 'radar reflectivity','doppler velocity', 'infrared imagery', 'visible imagery','aerosol backscatter', 'weather forecast', 'tropical cyclones','visible radiance', 'infrared radiance','atmospheric temperature indices', 'cloud droplet distribution','cloud condensation nuclei', 'hydrometeors', 'oxygen compounds','wind profiles', 'liquid water equivalent', 'solar radiation','planetary boundary layer height', 'surface winds','precipitation amount', 'precipitation rate', 'surface pressure','aerosol extinction', 'aerosol optical depth/thickness','tropospheric/high-level clouds (observed/analyzed)','lidar depolarization ratio', 'radar backscatter','radar cross-section', 'return power', 'radial velocity','radiance', 'climate advisories', 'atmospheric emitted radiation','optical depth/thickness', 'ultraviolet flux', 'spectrum width','microwave imagery', 'lidar backscatter', 'radar imagery','snow depth', 'land use/land cover classification','terrain elevation', 'solid precipitation', 'droplet size','droplet concentration/size', 'precipitation anomalies','snow water equivalent', 'total surface precipitation rate','skin temperature', 'water vapor', 'attitude characteristics','land surface temperature', 'reflectance','soil moisture/water content', 'soil temperature','soil bulk density', 'surface roughness', 'present weather','snow density', 'geothermal temperature','aerosol forward scatter', 'floods', 'snow cover', 'sigma naught','precipitable water', 'surface water processes/measurements','surface water features', 'shortwave radiation','photosynthetically active radiation', 'longwave radiation','net radiation', 'flight level winds', 'soil moisture','satellite orbits/revolution', 'heat flux','precipitation profiles', 'geopotential height','particulate matter', 'particle images', 'water vapor indices','electrical conductivity', 'gases', 'sea surface temperature','convective clouds/systems (observed/analyzed)','viewing geometry', 'wind shear','carbon and hydrocarbon compounds', 'sea level pressure','water vapor processes', 'ultraviolet radiation','solar irradiance', 'scattering', 'absorption','sea surface temperature indices', 'sedimentation', 'erosion','sediment transport', 'sediments', 'tropopause', 'nan', 'pigments','attenuation/transmission', 'inorganic carbon', 'organic carbon','photosynthetically available radiation', 'chlorophyll','optical depth', 'fluorescence', 'vegetation index', 'gelbstoff','plankton', 'vegetation index2', 'landscape ecology','ultraviolet radiance', 'aerosol radiance','carbonaceous aerosols', 'dust/ash/smoke', 'nitrate particles','organic particles', 'sulfate particles', 'radiative flux','transmittance', 'atmospheric stability','cloud radiative transfer', 'rain storms', 'reflected infrared','thermal infrared', 'incoming solar radiation', 'cloud types','orbital characteristics', 'geolocation','coordinate reference system', 'infrared flux', 'visible flux','albedo', 'lidar waveform', 'plant phenology', 'vegetation cover','crop/plant yields', 'land use classes', 'landscape patterns','forest harvesting and engineering', 'forest management','ecosystem functions', 'leaf characteristics', 'fire ecology','total surface water', 'primary production', 'photosynthesis','canopy characteristics', 'evergreen vegetation', 'crown','deciduous vegetation', 'anisotropy', 'biomass burning','wildfires', 'topographical relief','environmental sustainability','anthropogenic/human influenced ecosystems', 'emissions','sulfur compounds', 'environmental assessments', 'conservation','agriculture production', 'administrative divisions','freshwater ecosystems', 'political divisions', 'urban areas','treaty agreements/results', 'population estimates','nitrogen compounds', 'particulates', 'mortality', 'droughts','earthquakes', 'population distribution', 'fertilizers','animal manure and waste', 'urbanization/urban sprawl','landslides', 'avalanche', 'mangroves', 'volcanic eruptions','pesticides', 'population size', 'population density','rural areas', 'amphibians', 'mammals', 'carbon', 'sulfur oxides','land management', 'natural gas', 'sedimentary rocks','coastal elevation', 'community dynamics','nuclear radiation exposure', 'radiation exposure','poverty levels', 'malnutrition', 'sea level rise','vulnerability levels/index', 'electricity','energy production/use', 'sustainable development','deforestation', 'household income', 'nitrogen', 'phosphorus','terrestrial ecosystems', 'permafrost', 'nutrients','plant characteristics', 'soil gas/air', 'litter characteristics','soil chemistry', 'soil respiration', 'active layer', 'soil depth','cation exchange capacity', 'organic matter', 'soil porosity','soil texture', 'permafrost melt','ground water processes/measurements', 'freeze/thaw','halocarbons and halogens', 'hydrogen compounds', 'biomass','dominant species', 'vegetation species', 'sulfur', 'tree rings','soil classification', 'sea ice concentration', 'reforestation','species/population interactions', 'range changes','topographic effects', 'land resources', 'river ice depth/extent','snow melt', 'river ice', 'animal ecology and behavior','phenological changes', 'forest fire science', 'radiative forcing','soil heat budget', 'river/lake ice breakup','river/lake ice freeze', 'reclamation/revegetation/restoration','lichens', 'marine ecosystems', 'coastal landforms', 'degradation','forest composition/vegetation structure', 'barometric altitude','volatile organic compounds', 'forest fire danger index','periglacial processes', 'landscape processes','soil horizons/profile', 'soil ph', 'soil water holding capacity','fluvial landforms', 'soil color', 'glacial processes','photochemistry', 'cloud dynamics', 'nitrogen oxides', 'smog','chemical composition', 'actinic flux', 'tropospheric ozone','fossil fuel burning', 'industrial emissions','denitrification rate', 'sunshine', 'soil structure','mosses/hornworts/liverworts', 'hydraulic conductivity','snow/ice temperature', 'water characteristics','outgoing longwave radiation', 'soil compaction', 'soil impedance','canopy transmittance', 'ground water features', 'solids','agricultural expansion', 'pressure tendency', 'visibility','herbivory', 'paleoclimate reconstructions', 'drought indices','fire weather index', 'animal yields', 'teleconnections','carbon dioxide', 'dissolved solids', 'ocean currents', 'salinity','afforestation/reforestation', 'fresh water river discharge','surface water chemistry', 'aeolian landforms','precipitation indices', 'temperature indices', 'forest yields','stratigraphic sequence', 'freeze/frost', 'frost','industrialization', 'ice core records', 'suspended solids','weathering', 'gas flaring', 'ice extent', 'biogeochemical cycles','lake ice', 'isotopes', 'watershed characteristics','transportation', 'soil rooting depth', 'geochemical properties','carbon monoxide', 'cultural features', 'consumer behavior','boundary surveys', 'land productivity', 'sediment composition','calcium', 'magnesium', 'potassium','micronutrients/trace elements', 'sediment chemistry','biogeochemical processes', 'cropping systems','groundwater chemistry', 'reforestation/revegetation','soil infiltration', 'soil fertility','angiosperms (flowering plants)', 'glacial landforms','forest mensuration', 'acid deposition', 'differential pressure','soil erosion', 'trace elements/trace metals', 'soil consistence','snow stratigraphy', 'thermal conductivity', 'estuaries','tidal height', 'plant diseases/disorders/pests','pressure thickness', 'atmospheric heating', 'conduction','evaporation', 'turbulence', 'wind stress','satellite soil moisture index', 'antenna temperature', 'glaciers','ice sheets', 'nitrate', 'ocean mixed layer','precipitation indicators', 'temperature indicators', 'ground ice','alkalinity', 'dissolved gases', 'oxygen', 'ph', 'phosphate','solar induced fluorescence', 'volcanic activity','ice temperature', 'sea surface height', 'airglow','energy deposition', 'x-ray flux', 'electron flux', 'proton flux','magnetic fields/magnetic currents', 'vertical profiles','air temperature', 'dew point temperature','cloud liquid water/ice', 'wind speed', 'wind direction','vertical wind velocity/speed', 'total precipitable water','boundary layer temperature', 'cloud height','cloud droplet concentration/size', 'ozone','cloud base temperature', 'cloud base height', 'rain','cloud optical depth/thickness', 'cirrus/systems','mean radial velocity', 'relative humidity', 'u/v wind components','wind speed/wind direction','digital elevation/terrain model (dem)', 'snow', 'drizzle','particle size distribution', 'hail', 'ambient temperature','stage height', 'rivers/streams', 'hourly precipitation amount','24 hour precipitation amount', 'latent heat flux','cloud fraction', '3 and 6 hour precipitation amount','horizontal wind velocity/speed', 'dissolved carbon dioxide','hurricanes', 'tropical cyclone track', 'cloud top height','temperature profiles', 'vertical wind shear','water vapor tendency', 'potential temperature','angstrom exponent', 'water vapor mixing ratio profiles','extreme eastern tropical pacific sst', 'phytoplankton','cloud precipitable water', 'cloud asymmetry', 'cloud ceiling','cloud frequency', 'cloud top pressure', 'cloud top temperature','cloud vertical distribution', 'cloud emissivity','cloud radiative forcing', 'cloud reflectance','maximum/minimum temperature', 'condensation','topographical relief maps', 'evapotranspiration','fire occurrence', 'burned area', 'sulfur dioxide', 'lake/pond','rivers/stream', 'nitrogen dioxide', 'agricultural lands','cyclones', 'urban lands', 'lakes/reservoirs','infant mortality rates', 'methane','non-methane hydrocarbons/volatile organic compounds', 'coal','biodiversity functions', 'wetlands', 'discharge/flow','hydropattern', 'alpine/tundra', 'forests','leaf area index (lai)', 'ammonia', 'nitrous oxide','land subsidence', 'normalized difference vegetation index (ndvi)','chlorinated hydrocarbons', 'methyl bromide', 'methyl chloride','molecular hydrogen', 'fire models', 'heat index','even-toed ungulates', 'species recruitment','population dynamics', 'water depth', 'inundation', 'drainage','respiration rate', 'permafrost temperature','indigenous/native species', 'fire dynamics', 'plant succession','coastal', 'salt marsh', 'boundary layer winds', 'shrubland/scrub','community structure', 'pingo', 'virtual temperature','formaldehyde', 'hydroxyl', 'photolysis rates', 'nitric oxide','molecular oxygen', 'peroxyacyl nitrate', 'stable isotopes','runoff', 'vegetation water content', 'discharge','chlorophyll concentrations', 'water table', 'decomposition','water temperature', 'total dissolved solids', 'biomass dynamics','grasslands', 'savannas', 'grazing dynamics/plant herbivory','multivariate enso index', 'drainage basins','resource development site', 'dunes', 'flood plain','endangered species', 'hydrogen cyanide', 'nutrient cycling','deserts','fraction of absorbed photosynthetically active radiation (fapar)','aquifers', 'dissolved oxygen', 'turbidity', 'conductivity','sulfate', 'water ion concentrations', 'percolation','phosphorous compounds', 'radioisotopes', 'cooling degree days','contour maps', 'methane production/use','natural gas production/use', 'petroleum production/use','consumption rates', 'soil organic carbon (soc)', 'halocarbons','biomass energy production/use', 'estuary','layered precipitable water', 'water vapor concentration profiles','hydrogen chloride', 'nitric acid', 'chlorine nitrate','chlorofluorocarbons', 'dinitrogen pentoxide', 'dimethyl sulfide','vorticity', 'ice fraction', 'temperature tendency','wind direction tendency', 'bromine monoxide', 'chlorine monoxide','methyl cyanide', 'hypochlorous acid', 'methanol', 'hydroperoxy','cloud base pressure', 'temperature anomalies','precipitation trends', 'temperature trends', 'convection','chlorine dioxide', 'uv aerosol index','sea surface skin temperature', 'sublimation','convective surface precipitation rate', 'hydrogen fluoride']", "def setContourLabelDistance(distance=500):\n dislin.labdis(distance, 'CONTUR')" ]
[ "0.5650385", "0.54846936", "0.54270923", "0.5375306", "0.53342646", "0.5226105", "0.5209129", "0.51919734", "0.51784825", "0.5173263", "0.5163355", "0.51560795", "0.5154937", "0.51164854", "0.5109441", "0.5093374", "0.50911057", "0.5076122", "0.50540954", "0.50480634", "0.50421095", "0.5034301", "0.5032512", "0.5028899", "0.50236046", "0.50234383", "0.50142574", "0.50132155", "0.500397", "0.49963433" ]
0.6397082
0
ht label. ht cuts, and cuts on particpating jets
def _make_ht_label(chain_parts): assert len(chain_parts) == 1, '_make_ht_label, no. of chain parts != 1' scenario = chain_parts[0]['hypoScenario'] assert scenario.startswith('HT'), '_make_ht_label(): scenario does not start with HT' arg_res = [ re.compile(r'^(?P<lo>\d*)(?P<key>ht)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>et)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>eta)(?P<hi>\d*)$'), ] defaults = { 'ht': ('0', 'inf'), 'et': ('0', 'inf'), 'eta': ('0', 'inf'), } args = _args_from_scenario(scenario) argvals = {} nargs = len(args) assert len(args) <= len(arg_res), 'bad num of args %d, expected < %d' % (len(args), len(arg_res)) # obtain argument values frrom scenario while args: arg = args.pop() for r in arg_res: m = r.match(arg) if m is not None: arg_res.remove(r) gd = m.groupdict() key = gd['key'] try: lo = float(gd['lo']) except ValueError: lo = float(defaults[key][0]) argvals[key+'lo'] = lo try: hi = float(gd['hi']) except ValueError: hi = float(defaults[key][1]) argvals[key+'hi'] = hi print (argvals) assert len(argvals) == 2*nargs, 'no of args: %d, expected %d' % (len(argvals), 2*nargs) print ('sent 100') result = """ ht([(%(htlo).0fht) (%(etlo).0fet) (%(etalo).0feta%(etahi).0f) ])""" % argvals print (result) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def label_thin(self, orig_label):\n pil_thin = thin(orig_label)\n # Keep the original label and set non-thinning label as 0\n orig_label[~pil_thin] = 0\n\n return orig_label", "def kohonen():\n# plb.close('all')\n \n dim = 28*28\n data_range = 255.0\n \n # load in data and labels \n data = np.array(np.loadtxt('data.txt'))\n labels = np.loadtxt('labels.txt')\n\n # select 4 digits \n name = \"Stettler\"\n targetdigits = name2digits(name) # assign the four digits that should be used\n print(targetdigits) # output the digits that were selected\n\n # this selects all data vectors that corresponds to one of the four digits\n data = data[np.logical_or.reduce([labels==x for x in targetdigits]),:]\n \n dy, dx = data.shape\n \n #set the size of the Kohonen map. In this case it will be 6 X 6\n size_k = 6\n \n #set the width of the neighborhood via the width of the gaussian that\n #describes it\n sigma = 2.0\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n\n #set the learning rate\n eta = 0.9 # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE\n \n #set the maximal iteration count\n tmax = 5000 # this might or might not work; use your own convergence criterion\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n for t, i in enumerate(i_random):\n som_step(centers, data[i,:],neighbor,eta,sigma)\n\n # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw()", "def onCut(self):\n pass", "def odemis_to_hyperspy(filename='sampledata/cltest.h5',specbin=1) :\r\n\r\n f=h5.File(filename,'r')\r\n shome = 'Acquisition2//ImageData/'\r\n x = f[shome + 'Image']\r\n cdesc =f['Acquisition2/PhysicalData/ChannelDescription'].value[0].decode('utf-8')\r\n #print(cdesc)\r\n\r\n cltype = None\r\n if 'Spectrum' in cdesc :\r\n cltype = 'spectrum'\r\n elif 'CL intensity' in cdesc:\r\n cltype = 'panchrom'\r\n\r\n print('<' + filename + '> original shape :' ,x.shape, cltype)\r\n\r\n # strip unused dimensions and transpose/ reverse index order\r\n if cltype == 'panchrom' :\r\n xx=x[0,0,0,:,:].transpose((1,0))\r\n # just an image..\r\n else :\r\n xx=x[:,0,0,:,:].transpose((2,1,0))\r\n\r\n if cltype == 'spectrum' :\r\n #interpolate data to linearize the wavelength scale\r\n w = f[shome + 'DimensionScaleC'].value *1e9\r\n wx = np.linspace(w.min(),w.max(),w.size)\r\n for i in np.arange(xx.shape[0]) :\r\n for k in np.arange(xx.shape[1]) :\r\n xx[i,k,:] = np.interp(wx,w,xx[i,k,:])\r\n\r\n wslope = wx[1]-wx[0]\r\n woffset = wx.min()\r\n #wx = np.arange(w.size)\r\n #wslope,woffset=np.polyfit(wx,w,1)\r\n s = hs.signals.Signal1D(xx)\r\n\r\n elif cltype == 'panchrom' :\r\n s = hs.signals.Signal2D(xx)\r\n else :\r\n print('unknown type')\r\n\r\n print('hyperspy shape :' ,s.data.shape)\r\n\r\n\r\n s.metadata.General.title = 'Odemis: ' + cdesc\r\n s.metadata.General.original_filename = filename\r\n s.metadata.General.notes = cltype\r\n s.axes_manager[0].name = 'pos x'\r\n s.axes_manager[0].scale = f[shome + 'DimensionScaleX'].value * 1e6\r\n s.axes_manager[0].offset = f[shome + 'XOffset'].value * 1e6\r\n s.axes_manager[0].units = 'um'\r\n\r\n\r\n s.axes_manager[1].name = 'pos y'\r\n s.axes_manager[1].scale = f[shome + 'DimensionScaleX'].value * 1e6\r\n s.axes_manager[1].offset = f[shome + 'YOffset'].value * 1e6\r\n s.axes_manager[1].units = 'um'\r\n\r\n if cltype == 'spectrum' :\r\n s.axes_manager[2].name = 'wavelength'\r\n s.axes_manager[2].units = 'nm'\r\n s.axes_manager[2].offset = woffset\r\n s.axes_manager[2].scale = wslope\r\n s.metadata.signal_type = 'CL'\r\n\r\n f.close()\r\n if (specbin > 1) and (cltype == 'spectrum'):\r\n return( s.rebin(scale=[1,1,specbin]) )\r\n else :\r\n return( s )\r\n #end odemis_to_hyperspy\r\n #######################\r", "def cut(S, T, graph):\n ###TODO\n pass", "def __init__(self, label=None):\n super().__init__(\"h\", 1, [], label=label)", "def extract_info(config, cut, label):\n cfg = filter(lambda c: c['name'] == cut, config['physics']['cuts'])[0]\n text = \"\"\n if 'max' not in cfg:\n text += \"#geq \"\n text += str(cfg['min'])\n if 'max' in cfg and cfg['max'] != cfg['min']:\n text += '-' + str(cfg['max']) + ' ' + label + 's'\n elif cfg['min'] != 1:\n text += ' ' + label + 's'\n else:\n text += ' ' + label\n return text", "def ch(h1):\n return -(pic_height / float(h)) * h1", "def drawlabels(t, t1):\r\n t.fd(250)\r\n t.pd()\r\n t.write(\"Life\", font=(\"Arial\", 10, \"bold\"))\r\n t.pu()\r\n t.back(12)\r\n t.pd()\r\n t.write(\"Exp.\", font=(\"Arial\", 10, \"bold\"))\r\n t.pu()\r\n t.back(238)\r\n t.right(90)\r\n t.fd(80)\r\n t1.pu()\r\n t1.back(50)\r\n t1.rt(90)\r\n t1.fd(250)\r\n t1.pd()\r\n t1.write(\"Year\", font=(\"Arial\", 10, \"bold\"))\r\n t1.pu()\r\n t1.back(250)\r\n t1.left(90)\r\n t1.fd(50)", "def hxlcut():\n run_script(hxlcut_main)", "def write_label_ps(header_lines, base_lines, tail_lines, shape_list, title, outFn, cutofflist=[0.3,0.5,0.7], mode='fill'):\n OUT = open(outFn, \"w\")\n for header_line in header_lines:\n if r'{title}' in header_line:\n header_line = header_line.format(title=title)\n OUT.writelines(header_line)\n #print(len(shape_list), len())\n for shape,base_line in zip(shape_list,base_lines):\n if mode=='label':\n OUT.writelines( _color_command_segmented(shape, cutofflist)+\"\\n\" )\n elif mode=='heatmap':\n OUT.writelines( _color_command_heatmap(shape, Gradient_Colors, 0, 1)+\"\\n\" )\n else:\n raise RuntimeError(\"Sorry: mode='fill' Not applicant now\")\n OUT.writelines(base_line)\n for tail_line in tail_lines:\n OUT.writelines(tail_line)\n OUT.close()", "def __get_ohe_label__(self, label_idx) -> List[int]:\n\n label = [0] * self.n_classes\n label[label_idx] = 1\n\n return label", "def agglo_from_labelmask(\n h5path_in,\n h5path_lv='',\n ratio_threshold=0,\n h5path_out='',\n save_steps=False,\n protective=False,\n ):\n\n # check output paths\n outpaths = {'out': h5path_out}\n status = utils.output_check(outpaths, save_steps, protective)\n if status == \"CANCELLED\":\n return\n\n # open data for reading\n h5file_in, ds_in, elsize, axlab = utils.h5_load(h5path_in)\n h5file_lv, ds_lv, _, _ = utils.h5_load(h5path_lv)\n\n # open data for writing\n h5file_out, ds_out = utils.h5_write(None, ds_in.shape, ds_in.dtype,\n h5path_out,\n element_size_um=elsize,\n axislabels=axlab)\n\n ulabels = np.unique(ds_in)\n maxlabel = np.amax(ulabels)\n print(\"number of labels in watershed: {:d}\".format(maxlabel))\n\n fwmap = np.zeros(maxlabel + 1, dtype='i')\n\n areas_ws = np.bincount(ds_in.ravel())\n\n labelsets = {}\n rp_lw = regionprops(ds_lv, ds_in)\n for prop in rp_lw:\n\n maskedregion = prop.intensity_image[prop.image]\n counts = np.bincount(maskedregion)\n svoxs_in_label = [l for sl in np.argwhere(counts) for l in sl]\n\n ratios_svox_in_label = [float(counts[svox]) / float(areas_ws[svox])\n for svox in svoxs_in_label]\n fwmask = np.greater(ratios_svox_in_label, ratio_threshold)\n labelset = np.array(svoxs_in_label)[fwmask]\n labelsets[prop.label] = set(labelset) - set([0])\n\n basepath = h5path_in.split('.h5/')[0]\n utils.write_labelsets(labelsets, basepath + \"_svoxsets\",\n filetypes=['pickle'])\n\n ds_out[:] = utils.forward_map(np.array(fwmap), ds_in, labelsets)\n\n # close and return\n h5file_in.close()\n h5file_lv.close()\n try:\n h5file_out.close()\n except (ValueError, AttributeError):\n return ds_out", "def cut(self,cell):\r\n self.grid[cell[0]][cell[1]] = 1", "def o_wo_per_head(self):\n assert self.ff % self.heads == 0\n # fuse ff->e and projection layer of self-attention\n return (self.ff // (self.heads-self.padded_heads)) + self.qkv", "def ksh(i,t,htanses):\n for (zs,ys,zx,yx) in htanses[i]:\n alex.penup()\n alex.goto((zs%m)*20-10*m,(zs//m)*20-10*n)\n alex.pendown()\n alex.goto((ys%m+1)*20-10*m,(ys//m)*20-10*n)\n alex.goto((yx%m+1)*20-10*m,(yx//m+1)*20-10*n)\n alex.goto((zx%m)*20-10*m,(zx//m+1)*20-10*n)\n alex.goto((zs%m)*20-10*m,(zs//m)*20-10*n)\n alex.hideturtle()", "def create_teacher(self):\n\n #words = [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n\n #print(\"self.length: \", self.length)\n for i, image in enumerate(self.images):\n if False:#i % 2 == 0:\n img = list(chunks(image, 10))\n plt.imshow(img, interpolation=\"nearest\", origin=\"upper\")\n plt.colorbar()\n plt.title(self.labels[i])\n plt.show()\n label = np.argmax(self.labels[i]) + 1\n label_vector = self.labels[i]\n timesteps = label + 1\n\n for chain in list(self.traces[i]):\n count_tensor = list()\n input_tensor = list()\n target_tensor = list()\n \n #count_tensor.append([0] * z_size)\n #input_tensor.append(image)\n #target_tensor.append([None, None])\n\n #count_padding = [0 for x in range(z_size)]\n \n for count, link in enumerate(chain):\n count_vector = [0 if x is not count else 1 for x in range(z_size)]\n count_tensor.append(count_vector)\n x, y = link\n image[y*200+x] = 255\n input_tensor.append(image)\n target_tensor.append(list(link))\n\n # Fill in the rest of the list with the same (current one doe\n #for cont in range(count + 1, z_size):\n for cont in range(count + 1, z_size + 1):\n #count_tensor.append(count_padding)\n count_tensor.append(count_vector)\n input_tensor.append(image)\n target_tensor.append(list(link))\n\n \n\n self.explode_lbls.append(label)\n self.explode_labels.append(label_vector)\n self.explode_counts.append(count_tensor)\n self.explode_images.append(input_tensor)\n self.explode_traces.append(target_tensor)\n #print(target_tensor)\n\n \n \n self.explode_length = len(self.explode_images)", "def get_hbls_hbbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n u = self.u\n v = self.v\n \n v_upts = TTTW_func.v2u(v)\n Hz = z_u_w[:,1:] - z_u_w[:,:-1]\n\n\n\n # CALCULATE swr_frac\n self.swr_frac = TTTW_func.lmd_swr_frac(self.grid_dict)\n\n\n # WHOLE THING HAPPENS IN j loop through y-indices\n \n # INITIALIZE ARRAYS\n self.kmo = np.zeros([Ly])\n self.Cr = np.zeros([Ly])\n self.kbl = np.empty([Ly],dtype='int')\n self.C_h_MO = np.zeros([Ly])\n self.Cr = np.zeros([Ly,N+1]) # sum term\n self.FC = np.zeros([Ly,N+1])\n self.swdk_r = np.zeros([Ly,N+1])\n \n self.zscale = np.zeros([Ly,N])\n self.Kern = np.zeros([Ly,N])\n\n \n # --> LOOP THROUGH Y-INDICES\n for j in range(Ly):\n if self.LIMIT_MO_DEPTH:\n self.kmo[j] = 0\n self.C_h_MO[j] = self.C_MO *self.ustar[j]**3/self.vonKar\n \n self.kbl[j] = 0\n self.Cr[j,-1] = 0 # set top Cr\n self.Cr[j,0] = 0 # set bottom Cr\n \n # SEARCH FOR MIXED LAYER DEPTH\n self.FC[j,-1] = 0.\n\n\n # ---> LOOP TOP TO BOTTOM (FORTRAN ==> k=N-1,1,-1)\n for k in range(N-1,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n \n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n self.zscale[j,k_w] = zscale\n if self.LMD_KPP:\n if self.LMD_BKPP:\n zscaleb = z_u_r[j,k_r] - z_u_w[j,0]\n Kern = zscale * zscaleb**2 / ( (zscale + self.epssfcs*self.hbls_old[j]) * (zscaleb**2+(self.epssfcb**2*self.hbbl_old[j]**2)))\n else:\n Kern = zscale / (zscale + (self.epssfcs*self.hbls_old[j]))\n else:\n Kern = 1.\n \n\n\n self.Kern[j,k_w] = Kern\n self.FC[j,k_w] = self.FC[j,k_w+1] + Kern * (\\\n ( ( u[j,k_r+1] - u[j,k_r] )**2 + ( v_upts[j,k_r+1] - v_upts[j,k_r])**2 ) \\\n / (Hz[j,k_r] + Hz[j,k_r+1]) \\\n - 0.5 * ( Hz[j,k_r] + Hz[j,k_r+1]) * (self.Ri_inv * self.bvf[j,k_w] + self.C_Ek*self.f[j]*self.f[j]))\n\n\n #\t\tLOOP THAT FINDS BL DEPTH ##\n #----> LOOP TOP TO BOTTOM (start at free surface, w-level surface) \n \n if self.LMD_KPP:\n #swdk_r only used in this function so don't need to be class attribute\n # but for testing make it an attribute to see what it is\n \n # fortran equivlanet ===> k=N,1,-1 \n for k in range(N,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n ###################################################################### \n self.swdk_r[j,k_w] = np.sqrt( self.swr_frac[j,k_w] * self.swr_frac[j,k_w-1])\n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n Bfsfc = self.Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])\n \n self.bvf_max = np.sqrt(np.max([0,self.bvf[j,k_w-1]]))\n \n # CALCULATE TURBULENT VELOCITY SCALE FOR TRACERS\n \t\t\t self.ws = self.lmd_wscale_ws_only(Bfsfc, zscale,self.hbls_old[j],self.ustar[j])\n \n self.Vtsq = self.Vtc * self.ws* self.bvf_max + self.V0\n \n\n self.Cr[j,k_w] = self.FC[j,k_w] + self.Vtsq\n \n\n #######################################################################\n \n # SEARCH FOR hbls vertical level #\n '''\n kbl is specified at vertical w-level (via Cr which is at\n vertical w-levels)\n '''\n if self.kbl[j] == 0 and self.Cr[j,k_w] < 0:\n self.kbl[j] = k_w\n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] == 0 and Bfsfc*(z_u_w[j,N] - z_u_r[j,k_r]) > self.C_h_MO[j]:\n self.kmo[j] = k_w\n\n \n #--> still in j-loop\n #######################################################\n \n # \t\tGET SURFACE BOUNDARY LAYER DEPTH # \n self.hbls[j] = z_u_w[j,N] - z_u_w[j,0] + self.eps # set hbls as depth of entire water column\n if self.kbl[j] > 0:\n k_w = self.kbl[j]\n k_r = k_w - 1 \n if k_w == N: # set hbls at the surface btwn w- and rho-levels at surface\n self.hbls[j] = z_u_w[j,N] - z_u_r[j,N-1]\n \n else:\n self.hbls[j] = z_u_w[j,N] - ( z_u_r[j,k_r] * self.Cr[j,k_w+1] - z_u_r[j,k_r+1] * self.Cr[j,k_w]) / \\\n (self.Cr[j,k_w+1] - self.Cr[j,k_w])\n \n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] > 0:\n k_w = self.kmo[j]\n k_r = k_w-1\n if k_w == N:\n z_up = z_u_w[j,N]\n cff_up = np.max([0,Bo[j]])\n else:\n z_up = z_r[j,k_w+1]\n cff_up = np.max([0, Bo[j] + self.Bosol[j]*(1-self.swdk_r[j,(k_w-1)+1])])\n \n cff_dn = np.max([0,Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])]) \n h_MO = z_u_w[j,N] + self.C_h_MO[j] * ( cff_up*z_up - cff_dn * z_u_r[j,k_r] ) \\\n / ( cff_up * cff_dn * (z_up - z_u_r[j,k_r]) ) \\\n + self.C_h_MO[j] * (cff_dn - cff_up)\n\n self.hbls[j] = np.min([self.hbls[j],np.max([h_MO,0])])\n\n\n\n #### GET BOTTOM BOUNDARY LAYER DEPTH #######\n if self.LMD_BKPP:\n self.kbl[j] = 0 # reset Cr at bottom and kbl for BKPP\n self.Cr[j,0] = 0.\n self.FC[j,0] = 1.5 * self.FC[j,1] - 0.5 * self.FC[j,2] # linear extrapolation\n \n #---> LOOP BOTTOM TO TOP\n # FIND kbl for BBL\n for k in range(1,N+1):\n k_r = k-1\n k_w = k \n self.Cr[j,k_w] = self.FC[j,k_w] - self.FC[j,0]\n \n # LOOK FOR FIRST ZERO CROSSING FROM BOTTOM UP\n if self.kbl[j] == 0 and self.Cr[j,k_w] > 0:\n self.kbl[j] = k_w \n \n\n self.hbbl[j] = z_u_w[j,N] - z_u_w[j,0] # total depth\n if self.kbl[j] > 0 :\n k_w = self.kbl[j] \n k_r = k_w -1\n if k_w == 1: # NO BBL CASE\n self.hbbl[j] = z_u_r[j,0] - z_u_w[j,0] #in between bottom rho and w-level\n else:\n self.hbbl[j] = ( z_u_r[j,k_r-1] * self.Cr[j,k_w] - z_u_r[j,k_r] * self.Cr[j,k_w-1]) / \\\n (self.Cr[j,k_w] - self.Cr[j,k_w-1]) - z_u_w[j,0]", "def process_labels(ctx, tex, chapter):\n headings = ['chapter'] + ['sub'*i + 'section' for i in range(4)]\n reh = r'(' + '|'.join(headings) + r'){(.+?)}'\n environments = ['thm', 'lem', 'exc', 'figure', 'equation']\n ree = r'begin{(' + '|'.join(environments) + r')}'\n rel = r'(\\w+)label{(.+?)}'\n rel2 = r'label{(.+?)}'\n bigone = r'\\\\({})|\\\\({})|\\\\({})|\\\\(caption)|\\\\({})'.format(reh, ree, rel, rel2)\n rx = re.compile(bigone)\n\n sec_ctr = [chapter] + [0]*(len(headings))\n env_ctr = [0]*len(environments)\n blocks = catlist()\n lastlabel = None\n lastidx = 0\n m = rx.search(tex, lastidx)\n while m:\n blocks.append(tex[lastidx:m.start()])\n lastidx = m.start()\n cmd = next_command(tex, lastidx)\n lastidx = cmd.end\n if m.group(2):\n # This is a sectioning command (chapter, subsection,...)\n name = m.group(2)\n i = headings.index(name)\n if i == 0:\n env_ctr = [0]*len(env_ctr)\n sec_ctr[i:] = [sec_ctr[i]+1]+[0]*(len(headings)-i-1)\n number = \".\".join([str(x) for x in sec_ctr[:i+1]])\n idd = \"{}:{}\".format(name, number)\n lastlabel = idd\n blocks.append(\"<a id='{}'></a>\".format(idd))\n\n title = '{}&emsp;{}'.format(number, cmd.args[0])\n blocks.append(r'\\{}{{{}}}'.format(name, title))\n\n elif m.group(5):\n # This is an environment (thm, lem, ...)\n name = m.group(5)\n lastenv = name # save this for a caption command coming later...\n i = environments.index(name)\n env_ctr[i] += 1\n number = \"{}.{}\".format(sec_ctr[0], env_ctr[i])\n idd = \"{}:{}\".format(name, number)\n lastlabel = idd\n blocks.append(\"<a id='{}'></a>\".format(idd))\n\n if name in ctx.theoremlike_environments:\n nicename = ctx.named_entities[name]\n title = '{}&nbsp;{}'.format(nicename, number)\n blocks.append(r'\\begin{{{}}}[{}]'.format(name, title))\n else:\n blocks.append(r'\\begin{{{}}}'.format(name))\n\n elif m.group(6):\n # This is a labelling command (\\thmlabel, \\seclabel,...)\n label = \"{}:{}\".format(m.group(7), m.group(8))\n ctx.label_map[label] = (ctx.outputfile, lastlabel)\n\n elif m.group(9):\n # This is a caption command\n name = lastenv\n i = environments.index(name)\n number = \"{}.{}\".format(sec_ctr[0], env_ctr[i])\n idd = \"{}:{}\".format(name, number)\n lastlabel = idd\n nicename = ctx.named_entities[name]\n title = '<span class=\"title\">{}&nbsp;{}</span>'.format(nicename, number)\n text = '{}&emsp;{}'.format(title, cmd.args[0])\n blocks.append(r'\\caption{{{}}}'.format(text))\n\n elif m.group(10):\n # This is a \\label command, probably the target of a pageref\n idd = gen_unique_id()\n blocks.append(\"<a id={}></a>\".format(idd))\n ctx.label_map[m.group(11)] = (ctx.outputfile, idd)\n\n m = rx.search(tex, lastidx)\n blocks.append(tex[lastidx:])\n return \"\".join(blocks)", "def simple_core(block,cut,laser):\r\n\r\n\tlayers = int(block[\"thickness\"]/laser[\"z_spacing\"])\r\n\r\n\t#Since all cuts are square, the offsets are more obvious than in the general linear case.\r\n\ttaper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * laser[\"z_spacing\"]\r\n\tmax_delta = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * (block[\"thickness\"] + laser[\"z_final_overshoot\"]) * 2\r\n\t\r\n\tcutlist = []\r\n\tcutlist.append([\"a_abs\", \"0\"])\r\n\tcutlist.append([\"c_abs\", str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\", str(block[\"thickness\"])])\r\n\r\n\tfor a in range(layers):\r\n\t\tx1, y1 = cut[\"final_dimension_x\"]/2 + a*taper, cut[\"final_dimension_y\"]/2 + a*taper\r\n\t\twhile abs(x1-cut[\"final_dimension_x\"]/2) < abs(max_delta):\r\n\t\t\tcutlist.append([\"jump\", str(x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(x1 + block[\"origin_x\"]), str(-y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(-x1 + block[\"origin_x\"]), str(-y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(-x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tx1, y1 = x1 + laser[\"xy_spacing\"], y1 + laser[\"xy_spacing\"]\r\n\t\tcutlist.append([\"z_step\", str(-laser[\"z_spacing\"])])\r\n\t\tmax_delta = max_delta - taper \r\n\treturn json.dumps(cutlist)", "def LabelDisks(self):\n pass", "def split2(self, eccMap, patchName='patch00', cutStep=1, borderWidth=2, isplot=False):\r\n minMarker = localMin(eccMap, cutStep)\r\n\r\n connectivity = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])\r\n\r\n newLabel = sm.watershed(eccMap, minMarker, connectivity=connectivity, mask=self.array)\r\n\r\n border = ni.binary_dilation(self.array).astype(np.int8) - self.array\r\n\r\n for i in range(1, np.amax(newLabel) + 1):\r\n currArray = np.zeros(self.array.shape, dtype=np.int8)\r\n currArray[newLabel == i] = 1\r\n currBorder = ni.binary_dilation(currArray).astype(np.int8) - currArray\r\n border = border + currBorder\r\n\r\n border[border > 1] = 1\r\n border = sm.skeletonize(border)\r\n\r\n if borderWidth > 1:\r\n border = ni.binary_dilation(border, iterations=borderWidth - 1).astype(np.int8)\r\n\r\n newPatchMap = ni.binary_dilation(self.array).astype(np.int8) * (-1 * (border - 1))\r\n\r\n labeledNewPatchMap, patchNum = ni.label(newPatchMap)\r\n\r\n # if patchNum != np.amax(newLabel):\r\n # print 'number of patches: ', patchNum, '; number of local minimum:', np.amax(newLabel)\r\n # raise ValueError, \"Number of patches after splitting does not equal to number of local minimum!\"\r\n\r\n newPatchDict = {}\r\n\r\n for j in range(1, patchNum + 1):\r\n\r\n currPatchName = patchName + '.' + str(j)\r\n currArray = np.zeros(self.array.shape, dtype=np.int8)\r\n currArray[labeledNewPatchMap == j] = 1\r\n currArray = currArray * self.array\r\n\r\n if np.sum(currArray[:]) > 0:\r\n newPatchDict.update({currPatchName: Patch(currArray, self.sign)})\r\n\r\n if isplot:\r\n plt.figure()\r\n plt.subplot(121)\r\n plt.imshow(self.array, interpolation='nearest')\r\n plt.title(patchName + ': before split')\r\n plt.subplot(122)\r\n plt.imshow(labeledNewPatchMap, interpolation='nearest')\r\n plt.title(patchName + ': after split')\r\n\r\n return newPatchDict", "def __init__(self, smoothing=0.1):\n super(LabelSmoothing, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing", "def hilfe(self):\n toto_hilfe(3)", "def identify_leaflets(u, time_ts):\n z = u.select_atoms(\"all\").center_of_geometry()[2]\n COM_z= np.array([0,0,z]) #defines the global midplane position along z\n x, y, z = u.trajectory.ts.triclinic_dimensions[0][0], u.trajectory.ts.triclinic_dimensions[1][1], u.trajectory.ts.triclinic_dimensions[2][2]\n box = np.array([x, y, z, 90, 90, 90]) \n ### Determining side of the bilayer CHOL belongs to in this frame\n lipid1 = 'CHL'\n lipid2 = 'DLIP'\n lipid3 = 'SSM'\n lipid4 = 'DSPC'\n \n lpd1_atoms = u.select_atoms('resname %s and name O2'%lipid1) \n lpd2_atoms = u.select_atoms('resname %s and name P '%lipid2) \n lpd3_atoms = u.select_atoms('resname %s and name P '%lipid3) \n lpd4_atoms = u.select_atoms('resname %s and name P '%lipid4)\n \n num_lpd2 = lpd2_atoms.n_atoms\n num_lpd3 = lpd3_atoms.n_atoms\n num_lpd4 = lpd4_atoms.n_atoms \n # atoms in the upper leaflet as defined by insane.py or the CHARMM-GUI membrane builders\n # select cholesterol headgroups within 1.5 nm of lipid headgroups in the selected leaflet\n # this must be done because CHOL rapidly flip-flops between leaflets\n # so we must assign CHOL to each leaflet at every time step, and in large systems\n # with substantial membrane undulations, a simple cut-off in the z-axis just will not cut it\n if side == 'up':\n lpd2i = lpd2_atoms[:int((num_lpd2)/2)]\n lpd3i = lpd3_atoms[:int((num_lpd3)/2)]\n lpd4i = lpd4_atoms[:int((num_lpd4)/2)]\n \n\n lipids = lpd2i + lpd3i + lpd4i \n\n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box) \n lpd1i = ns_lipids.search(lipids,15.0) #1.5 nm\n leaflet = lpd1i + lpd2i + lpd3i + lpd4i \n\n elif side == 'down':\n lpd2i = lpd2_atoms[int((num_lpd2)/2):]\n lpd3i = lpd3_atoms[int((num_lpd3)/2):]\n lpd4i = lpd4_atoms[int((num_lpd4)/2):]\n\n lipids = lpd2i + lpd3i + lpd4i #+ lpd3i\n \n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box)\n lpd1i = ns_lipids.search(lipids,15.0) # 1.5nm\n leaflet = lpd1i + lpd2i + lpd3i+ lpd4i \n return lpd1i, lpd2i, lpd3i, lpd4i, COM_z, box, leaflet", "def cmd_label_merged_boundaries(self,sun_dir,output_dir=None):\n sun=sunreader.SunReader(sun_dir)\n if output_dir is None:\n # defaults location of dense output\n output_dir=os.path.join(sun.datadir,'dwaq',\"global-dense\")\n \n hyd_fn=glob.glob(os.path.join(output_dir,'*.hyd'))[0]\n \n hydro=SunHydro(sun=sun,hyd_path=hyd_fn,flow_shps=[self.flows_shp])\n\n class SpliceScenario(waq_scenario.Scenario):\n base_path=output_dir\n name=\"spliced\"\n\n scen=SpliceScenario(hydro=hydro)\n\n self.log.info(\"Writing labels\")\n scen.hydro.write_boundary_links()", "def __init__(self, smoothing=0.0):\n super(LabelSmoothing, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing", "def __init__(self, smoothing=0.0):\n super(LabelSmoothing, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing", "def make_stehle(self):\n\n temp_k = self.temp * e / k # temperature in K\n dens_cm = self.e_dens * 1.e-6 # electronic density in cm-3\n prefix = 'n_' + str(self.n_upper) + '_' + str(self.n_lower) + '_'\n\n # extract raw tabulated tabulated_data\n tab_temp_k = np.array(pystark.nc.variables[prefix + 'tempe'].data) # tabulated electron temperatures (K)\n olam0 = pystark.nc.variables[prefix + 'olam0'].data # line centre wavelength (A)\n num_tab_dens = pystark.nc.variables[prefix + 'id_max'].data\n fainom = pystark.nc.variables[prefix + 'fainom'].data\n tab_dens_cm = np.array(pystark.nc.variables[prefix + 'dense'].data) # tabulated electron densities (cm ** -3)\n f00 = np.array(pystark.nc.variables[prefix + 'f00'].data) # normal Holtsmark field strength (30 kV / m)\n dl12 = np.array(pystark.nc.variables[prefix + 'dl12'].data)\n dl12s = np.array(pystark.nc.variables[prefix + 'dl12s'].data)\n fainu = pystark.nc.variables[\n prefix + 'fainu'].data # Asymptotic value of iStark * (alpha ** 2.5) (\"wings factor in alfa units\")\n pr0 = np.array(pystark.nc.variables[\n prefix + 'pr0'].data) # Ratio of the mean interelectronic distance to the electronic Debye length\n jtot = np.array(pystark.nc.variables[prefix + 'jtot'].data,\n dtype=np.int) # \"number of wave lengths for the couple (T,Ne)\"\n dom = np.array(pystark.nc.variables[prefix + 'dom'].data) # frequency detunings in units (rad / (s*ues)\n d1om = np.array(pystark.nc.variables[prefix + 'd1om'].data)\n o1line = np.array(pystark.nc.variables[prefix + 'o1line'].data)\n o1lines = np.array(pystark.nc.variables[prefix + 'o1lines'].data)\n\n # ensure given temperature + density falls within tabulated values\n # change sligtly the value of the input density\n # dens_cm in order to remain , as far as possible, inside the tabulation\n # JSA: this first step seems bogus!\n\n if np.abs(dens_cm - tab_dens_cm[0]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[0] * 1.001\n\n for id in np.arange(1, num_tab_dens + 1):\n if np.abs(dens_cm - tab_dens_cm[id]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[id] * 0.999\n\n if dens_cm >= 2.0 * tab_dens_cm[num_tab_dens]:\n raise Exception(\n 'Your input density is higher than the largest tabulated value %f' % tab_dens_cm[num_tab_dens])\n\n if dens_cm <= tab_dens_cm[0]:\n raise Exception('Your input density is smaller than the smallest tabulated value %f' % tab_dens_cm[0])\n\n if temp_k >= tab_temp_k[9]:\n raise Exception('Your input temperature is higher than the largest tabulated value %f' % tab_temp_k[9])\n\n if temp_k <= tab_temp_k[0]:\n raise Exception('Your input temperature is lower than the smallest tabulated value %f' % tab_temp_k[0])\n\n normal_holtsmark_field = 1.25e-9 * (dens_cm ** (2. / 3.)) # normal field value in ues\n\n # calculate line centre wavelength and frequency using Rydberg formula\n # JSA: I have made this step clearer and corrected for deuteron mass in the Rydberg constant (though the effect is small)\n # TODO make sure this matches olam0 parameter above -- why were there two variables in the first place?!\n # rydberg_m = Rydberg / (1. + (electron_mass / physical_constants['deuteron mass'][0]))\n # wl_0_angst = 1e10 * (rydberg_m * (1 / n_lower ** 2 - 1 / n_upper ** 2)) ** -1\n\n wl_centre_angst = self.wl_centre * 1e10\n\n c_angst = c * 1e10 # velocity of light in Ansgtroms / s\n angular_freq_0 = 2 * np.pi * c_angst / wl_centre_angst # rad / s\n\n otrans = -2 * np.pi * c_angst / wl_centre_angst ** 2\n\n olines = o1lines / np.abs(otrans)\n oline = o1line / np.abs(otrans)\n\n # Limit analysis_tools to uncorrelated plasmas.\n # check that mean interelectronic distance is smaller than the electronic Debye length (equ. 10)\n PR0_exp = 0.0898 * (dens_cm ** (1. / 6.)) / np.sqrt(temp_k) # = (r0 / debye)\n if PR0_exp > 1.:\n raise Exception('The plasma is too strongly correlated\\ni.e. r0/debye=0.1\\nthe line cannot be computed.')\n\n # fainom_exp=fainom*(F00_exp**1.5)\n # fainum_exp=fainom_exp/( (OPI*2.)**1.5)\n\n # ========================\n # TABULATION Format CDS\n # si on veut ecrire\n # n -np lambda0 kalpha Ne E0 T R0/Debye Dalpha iDoppler iStark\n\n # IN_cds= N+0.01\n # INP_cds = NP+0.01\n\n # ***********************************************************\n # Don't edit the CDS format...\n # ***********************************************************\n\n # Skipped the code in the IF statement starting at line 470, since it\n # isn't used, if (.FALSE.) ...\n\n # ==============================================\n # define an unique detunings grid - domm - for the tabulated\n # profiles ( various temperatures , densities)\n # calculate all the line shapes for this common grid\n # units used at this points are Domega_new= Delta(omega)/F00\n # in rd/(s-1 ues)\n\n max_num_dens = 30 # Maximum number of densities\n max_num_tab_temp = 10\n max_num_detunings = 60 # Maximum number of detunings\n jtot = jtot.astype(np.int)\n domm = np.zeros(100000)\n dom0 = np.zeros(10000)\n tprof = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n tprofs = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n uprof = np.zeros([max_num_dens, 10000])\n uprofs = np.zeros([max_num_dens, 10000])\n\n inc = 0\n domm[inc] = 0.0\n # ---- Look to replace this loop\n for id in np.arange(num_tab_dens + 1): # loop over tab densities\n for j in np.arange(max_num_tab_temp): # loop over tab temperatures (?)\n for i in np.arange(1, jtot[id, j]):\n inc += 1\n dom0[inc] = dom[id, j, i]\n\n inc = np.count_nonzero(dom)\n npik = inc + 1\n # nut=10000\n\n # Calling numpy sort instead of piksrt\n tmp = np.sort(dom0[0:npik])\n dom0[0:npik] = tmp[0:npik]\n # dom0 seems to agree with the FORTRAN version\n\n inc = 0\n domm[0] = 0.0\n # print 'npik',npik\n # ---- Look to replace this loop\n for i in np.arange(1, npik):\n dif = (dom0[i] - dom0[i - 1])\n if dif <= 1.0E-6:\n continue\n if dif / np.abs(dom0[i]) <= 0.1:\n continue\n inc = inc + 1\n domm[inc] = dom0[i]\n\n jdom = inc + 1 # One line after marker 35\n\n for id in np.arange(num_tab_dens):\n for j in np.arange(10):\n if pr0[id, j] > 1.0:\n continue\n\n tprof[id, j, 0] = oline[id, j, 0]\n tprofs[id, j, 0] = olines[id, j, 0]\n\n if jtot[id, j] == 0:\n continue\n\n for i in np.arange(1, jdom + 1):\n skip1 = False\n skip2 = False\n # print 'i',i\n domeg = domm[i]\n ij_max = jtot[id, j]\n # print 'domeg,ij_max',domeg,ij_max\n for ij in np.arange(1, ij_max - 1):\n # print 'ij',ij\n test = (domeg - dom[id, j, ij]) * (domeg - dom[id, j, ij - 1])\n # print 'test1:',test\n if test <= 0.0:\n # print 'triggered test1'\n x1 = dom[id, j, ij - 1]\n x2 = dom[id, j, ij]\n x3 = dom[id, j, ij + 1]\n y1 = oline[id, j, ij - 1]\n y2 = oline[id, j, ij]\n y3 = oline[id, j, ij + 1]\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij - 1]\n y2 = olines[id, j, ij]\n y3 = olines[id, j, ij + 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n skip1 = True\n skip2 = True\n break\n\n if skip1 is False:\n test = (domeg - dom[id, j, ij_max - 2]) * (domeg - dom[id, j, ij_max - 1])\n # print 'test2:',test\n # print 'domeg',domeg\n # print 'dom[id,j,ij_max-1]',dom[id,j,ij_max-2]\n # print 'dom[id,j,ij_max]',dom[id,j,ij_max-1]\n if test <= 0.0:\n # print 'triggered test2'\n x1 = dom[id, j, ij_max - 3]\n x2 = dom[id, j, ij_max - 2]\n x3 = dom[id, j, ij_max - 1]\n y1 = oline[id, j, ij_max - 3]\n y2 = oline[id, j, ij_max - 2]\n y3 = oline[id, j, ij_max - 1]\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij_max - 3]\n y2 = olines[id, j, ij_max - 2]\n y3 = olines[id, j, ij_max - 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n skip2 = True\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n continue\n\n if skip2 is False:\n if domeg > dom[id, j, ij_max]:\n # print 'triggered test3'\n tprof[id, j, i] = fainom / (domeg ** 2.5)\n tprofs[id, j, i] = tprof[id, j, i]\n continue\n\n # We can skip writing the intermediate file\n\n\n for id in np.arange(num_tab_dens):\n otest_dens = (dens_cm - tab_dens_cm[id]) * (dens_cm - tab_dens_cm[id + 1])\n if otest_dens <= 0.0:\n dense1 = tab_dens_cm[id]\n dense2 = tab_dens_cm[id + 1]\n id1 = id\n id2 = id + 1\n break\n\n if dens_cm >= tab_dens_cm[num_tab_dens]:\n dense1 = tab_dens_cm[num_tab_dens - 1]\n dense2 = tab_dens_cm[num_tab_dens]\n id1 = num_tab_dens - 1\n id2 = num_tab_dens\n\n for it in np.arange(10):\n otest = (temp_k - tab_temp_k[it]) * (temp_k - tab_temp_k[it + 1])\n if otest <= 0.0:\n it1 = it\n it2 = it + 1\n # pr01 = pr0[id2,it1] # max value of pr0 for T1,T2,dense1,dense2\n tempe1 = tab_temp_k[it]\n tempe2 = tab_temp_k[it + 1]\n break\n\n # interpolation in temperature\n for id in np.arange(id1, id2 + 1):\n for i in np.arange(jdom):\n uprof[id, i] = tprof[id, it1, i] + (temp_k - tempe1) * (tprof[id, it2, i] - tprof[id, it1, i]) / (\n tempe2 - tempe1)\n uprofs[id, i] = tprofs[id, it1, i] + (temp_k - tempe1) * (tprofs[id, it2, i] - tprofs[id, it1, i]) / (\n tempe2 - tempe1)\n\n delta_lambda = np.zeros(jdom)\n delta_nu = np.zeros(jdom)\n wprof_nu = np.zeros(jdom)\n wprofs_nu = np.zeros(jdom)\n\n for i in np.arange(jdom):\n wprof = uprof[id1, i] + (dens_cm - dense1) * (uprof[id2, i] - uprof[id1, i]) / (dense2 - dense1)\n wprofs = uprofs[id1, i] + (dens_cm - dense1) * (uprofs[id2, i] - uprofs[id1, i]) / (dense2 - dense1)\n delta_omega = domm[i] * normal_holtsmark_field\n delta_nu[i] = delta_omega / (2 * np.pi)\n delta_lambda[i] = wl_centre_angst * delta_omega / (angular_freq_0 + delta_omega)\n # print(delta_lambda[i])\n wprof_nu[i] = (wprof / normal_holtsmark_field) * (2. * np.pi)\n wprofs_nu[i] = (wprofs / normal_holtsmark_field) * (2. * np.pi)\n # print '%e %e %e %e' %(delta_lambda[i],delta_nu[i],wprof_nu[i],wprofs_nu[i])\n\n delta_lambda2 = np.concatenate((-delta_lambda[::-1], delta_lambda)) + wl_centre_angst # + olam0\n delta_nu2 = np.concatenate((-delta_nu[::-1], delta_nu))\n wprof_nu2 = np.concatenate((wprof_nu[::-1], wprof_nu))\n wprofs_nu2 = np.concatenate((wprofs_nu[::-1], wprofs_nu))\n\n # for some reason, i only get a good agreement with the other models if i take the pure Stark broadened Stehle\n # output and manually convolve it with the Doppler profile -- not sure why...\n ls_sd = wprofs_nu2\n\n # interpolate onto frequency axis\n ls_sd = np.interp(self.freq_axis, delta_nu2 + self.freq_centre, ls_sd)\n\n return ls_sd", "def _add_labels(self):\n coords = self['pore.coords']\n self['pore.front'] = coords[:,0]<(0.1*self._Lx)\n self['pore.back'] = coords[:,0]>(0.9*self._Lx)\n self['pore.left'] = coords[:,1]<(0.1*self._Ly)\n self['pore.right'] = coords[:,1]>(0.9*self._Ly)\n self['pore.bottom'] = coords[:,2]<(0.1*self._Lz)\n self['pore.top'] = coords[:,2]>(0.9*self._Lz)\n bnds = self.pores(labels=['front','back','left','right','bottom','top'])\n self['pore.boundary'] = False\n self['pore.boundary'] = bnds" ]
[ "0.5253033", "0.5244755", "0.5173087", "0.51096773", "0.5103869", "0.50852627", "0.5053167", "0.4967331", "0.49329308", "0.4927268", "0.4886447", "0.4883699", "0.48704627", "0.48665786", "0.48657504", "0.48568624", "0.484965", "0.48423848", "0.48153552", "0.4808797", "0.47966635", "0.47905484", "0.47897264", "0.4789602", "0.47883236", "0.4778537", "0.47747788", "0.47747788", "0.47688013", "0.47625986" ]
0.5767312
0
make test label for combinations helper with two simple children.
def _make_combinationsTest_label(chain_parts): assert len(chain_parts) == 1 scenario = chain_parts[0]['hypoScenario'] assert scenario == 'combinationsTest' return """ combgen( [(2)(20et, 0eta320)] simple([(40et, 0eta320) (50et, 0eta320)]) simple([(35et, 0eta240) (55et, 0eta240)]) )"""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sums():\n assert label_parent(1, 2) == 3\n assert label_parent (1, 4) == 8\n # Should ignore arg order\n assert label_parent(4, 1) == 8", "def _make_partitionsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'partitionsTest'\n\n \n\n return \"\"\"\n partgen(\n [(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def test_recipe_nutrition_label_widget(self):\n pass", "def test_select_label(self):\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n layout = SectionLayout.objects.get(sectionlayouttranslation__name=\"Side by Side\")\n section1 = create_section(title=\"Test Section 1\", story=story, layout=layout)\n section2 = create_section(title=\"Test Section 2\", story=story, layout=layout)\n form = SectionRelationAdminForm()\n choices_list = list(form.fields['parent'].widget.choices)\n self.assertIn(story.title, choices_list[1][1])\n self.assertIn(story.title, choices_list[2][1])", "def setUp(self):\n\n singleLabels = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_2 = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2}),\n ({'D'}, {0}, set()),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_1 = linkoCreate.Linkograph(\n [({'A'}, set(), {1}),\n ({'D'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_0 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_2 = linkoCreate.Linkograph(\n [({'D'}, set(), set()),\n ({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_1 = linkoCreate.Linkograph(\n [({'D'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n trivialLinkograph = linkoCreate.Linkograph(\n [], ['A', 'B', 'C', 'D'])\n\n\n singleSubLinko1_4 = linkoCreate.Linkograph(\n [({'D'}, set(), {2,3}),\n ({'A'}, set(), {3}),\n ({'C'}, {0}, {3}),\n ({'A'}, {0,1,2}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko2_4 = linkoCreate.Linkograph(\n [({'A'}, set(), {2}),\n ({'C'}, set(), {2}),\n ({'A'}, {0,1}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko3_4 = linkoCreate.Linkograph(\n [({'C'}, set(), {1}),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko4_4 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n simpleLinko = linkoCreate.Linkograph(\n [({'A', 'B', 'C'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'B', 'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n if self.id().split('.')[-1] == 'test_createSubLinkographWithoutCommands':\n self.testParams = [\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': None,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 5,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko0_1},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 0,\n 'ExpectedLinkograph': singleSubLinko0_0},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko1_2},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko1_1},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 0,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko1_4},\n\n {'linko': singleLabels,\n 'lowerBound': 2,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko2_4},\n\n {'linko': singleLabels,\n 'lowerBound': 3,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko3_4},\n\n {'linko': singleLabels,\n 'lowerBound': 4,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko4_4},\n\n ]", "def test_labels(self):\n self.compliance_tester.test_labels(self.oi)", "def test_parent_label(self):\n l = self.d.label(1)\n l2 = self.d.label(31405)\n\n self.assertTrue(l.parent_label is None)\n self.assertTrue(l2 in l.sublabels)\n self.assertEqual(l2.parent_label, l)", "def test_simple(self):\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'p'},\n {'edge_info': '1', 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n content = '((h,p)hp:1,g)hpg;'\n self._do_test(content, exp)\n content = '((h,[pretest]p[test][posttest])hp,g)hpg;'\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP,\n 'comments': ['pretest', 'test', 'posttest'], 'label': 'p'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n self._do_test(content, exp)", "def test_label(self):\n xs = t.Label(t.Exactly(\"x\"), 'CustomLabel')\n self.assertEqual(writePython(xs),\n dd(\"\"\"\n def _G_label_1():\n _G_exactly_2, lastError = self.exactly('x')\n self.considerError(lastError, None)\n return (_G_exactly_2, self.currentError)\n _G_label_3, lastError = self.label(_G_label_1, \"CustomLabel\")\n self.considerError(lastError, None)\n _G_label_3\n \"\"\"))", "def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)", "def test_general_subset_level():\n pass", "def _make_simple_comb_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n simple_strs = []\n\n for cp in cps:\n print(cp)\n simple_strs.append(_make_simple_label([cp]))\n\n label = 'combgen([(%d)]' % len(cps)\n for s in simple_strs:\n label += ' %s ' % s\n label += ')'\n return label", "def _generateLabelAndName(self, obj, **args):\n result = []\n label = self._generateLabel(obj, **args)\n name = self._generateName(obj, **args)\n result.extend(label)\n if not len(label):\n result.extend(name)\n elif len(name) and name[0].strip() != label[0].strip():\n result.extend(name)\n return result", "def tests_ti_document_add_label(self):\n super().group_add_label()", "def getLabel2(*args):", "def getLabel2(*args):", "def test_checkboxtextgroup(self):\r\n self.check_group('checkboxtextgroup', 'choice', 'checkbox')", "def _generateTableCell2ChildLabel(self, obj, **args):\n result = []\n\n # If this table cell has 2 children and one of them has a\n # 'toggle' action and the other does not, then present this\n # as a checkbox where:\n # 1) we get the checked state from the cell with the 'toggle' action\n # 2) we get the label from the other cell.\n # See Orca bug #376015 for more details.\n #\n if obj.childCount == 2:\n cellOrder = []\n hasToggle = [False, False]\n for i, child in enumerate(obj):\n if self._script.utilities.hasMeaningfulToggleAction(child):\n hasToggle[i] = True\n break\n if hasToggle[0] and not hasToggle[1]:\n cellOrder = [ 1, 0 ]\n elif not hasToggle[0] and hasToggle[1]:\n cellOrder = [ 0, 1 ]\n if cellOrder:\n for i in cellOrder:\n if not hasToggle[i]:\n result.extend(self.generate(obj[i], **args))\n return result", "def test_process_label_in_node(self):\n tree = Node(children=[\n Node(\"Defining secret phrase.\", label=['AB', 'a']),\n Node(\"Has secret phrase. Then some other content\", \n label=['AB', 'b'])\n ], label=['AB'])\n t = Terms(tree)\n t.scoped_terms = {\n ('AB',): [Ref(\"secret phrase\", \"AB-a\", (9,22))]\n }\n # Term is defined in the first child\n self.assertEqual([], t.process(tree.children[0]))\n self.assertEqual(1, len(t.process(tree.children[1])))", "def get_extra_label(self, label_name: str, hierarchy: List[str]) -> Any:", "def test_get_scenarios_expanded(self):\n pass", "def test_nested():\n res = conf.status.conditions.choose(lambda c: (c.type, c.reason, c.root.metadata.choose(lambda m: (m[\"name\"], m.uid))))\n assert \"type\" in res # from conditions\n assert \"reason\" in res # from conditions\n assert \"name\" in res # from metadata\n assert \"uid\" in res # from metadata", "def _make_simple_label(chain_parts):\n \n if not _select_simple_chainparts(chain_parts):\n msg = 'Jet Configuration error: '\\\n 'chain fails substring selection: not \"simple\" '\n\n raise NotImplementedError(msg)\n \n label = 'simple(['\n for cp in chain_parts:\n smcstr = str(cp['smc'])\n jvtstr = str(cp['jvt'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr: # Run 2 chains have \"INF\" in the SMC substring\n condition_str += ',%s)' % smcstr.replace('INF','')\n elif jvtstr:\n condition_str += ',%s)' % jvtstr\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def test_nested_sequence(self):\n\n self.taxon_tester('Apis mellifera')\n self.taxon_tester('Apis')\n self.taxon_tester('Apini')\n self.taxon_tester('Apinae')\n # Apidae at 5680 species is a struggle\n self.taxon_tester('Apidae')\n if False:\n # Apoidea: 19566 takes 223 seconds\n self.taxon_tester('Apoidea')\n # Aculeata fails after 339 seconds\n self.taxon_tester('Aculeata')\n self.taxon_tester('Apocrita')\n self.taxon_tester('Hymenoptera')\n self.taxon_tester('Endopterygota')\n self.taxon_tester('Neoptera')\n self.taxon_tester('Pterygota')\n self.taxon_tester('Dicondylia')\n self.taxon_tester('Insecta')\n self.taxon_tester('Hexapoda')\n self.taxon_tester('Pancrustacea')\n self.taxon_tester('Mandibulata')\n self.taxon_tester('Arthropoda')\n self.taxon_tester('Panarthropoda')\n self.taxon_tester('Ecdysozoa')\n self.taxon_tester('Protostomia')\n self.taxon_tester('Bilateria')\n self.taxon_tester('Eumetazoa')\n self.taxon_tester('Metazoa')\n self.taxon_tester('Holozoa')\n self.taxon_tester('Opisthokonta')\n self.taxon_tester('Eukaryota')", "def test_bootstrap_support_labeled(self):\r\n master_tree = parse_newick('((a:2,b:3)ab:2,(c:1,d:2)cd:7)rt;')\r\n \"\"\"\r\n /-------.5 /-a\r\n ---1| \\-b\r\n \\------.5 /-c\r\n \\-d\r\n \"\"\"\r\n t1 = parse_newick('((a:6,b:8.2)hi:2,(c:1,d:2):7);') # same structure\r\n t2 = parse_newick('((a:2,b:3,c:33)ho:2,d:7);') # abc are siblings\r\n new_master, bootstraps = tc.bootstrap_support(master_tree, [t1, t2])\r\n expected = dict([('ab', .5), ('cd', .5), ('rt', 1.0)])\r\n self.assertDictEqual(bootstraps, expected)", "def test_title(names):", "def test_verbose_name_group(self): \n field_verboses = {\n \"title\": \"Название группы\",\n \"slug\": \"Слаг\",\n \"description\": \"Описание группы\",\n }\n for value, expected in field_verboses.items():\n with self.subTest(value=value):\n self.assertEqual(self.group._meta.get_field(value).verbose_name, expected)", "def plugin_second_label():\n return \"second\"", "def test_product_labels(self):\n\n prd = Product.objects.get(id=1)\n # label name\n label_name = prd._meta.get_field('name').verbose_name\n self.assertEqual(label_name, 'name')\n # label description\n label_name = prd._meta.get_field('description').verbose_name\n self.assertEqual(label_name, 'description')\n # label nutrition_grade\n label_name = prd._meta.get_field('nutrition_grade').name\n self.assertEqual(label_name, 'nutrition_grade')\n # label barcode\n label_name = prd._meta.get_field('barcode').verbose_name\n self.assertEqual(label_name, 'barcode')\n # label url\n label_name = prd._meta.get_field('url').verbose_name\n self.assertEqual(label_name, 'url')\n # label url_pic\n label_name = prd._meta.get_field('url_pic').name\n self.assertEqual(label_name, 'url_pic')\n # label store\n label_name = prd._meta.get_field('store').verbose_name\n self.assertEqual(label_name, 'store')\n # label prd_cat\n label_name = prd._meta.get_field('prd_cat').name\n self.assertEqual(label_name, 'prd_cat')\n # label fat\n label_name = prd._meta.get_field('fat').verbose_name\n self.assertEqual(label_name, 'fat')\n # label saturated_fat\n label_name = prd._meta.get_field('saturated_fat').name\n self.assertEqual(label_name, 'saturated_fat')\n # label sugar\n label_name = prd._meta.get_field('sugar').verbose_name\n self.assertEqual(label_name, 'sugar')\n # label salt\n label_name = prd._meta.get_field('salt').verbose_name\n self.assertEqual(label_name, 'salt')", "def is_test(self):\r\n return self.has_label('tests')" ]
[ "0.6137433", "0.6108757", "0.60195917", "0.5986544", "0.5936767", "0.58589", "0.5759686", "0.56950617", "0.5624841", "0.55902916", "0.55750483", "0.5560212", "0.5554626", "0.55542696", "0.5553175", "0.5553175", "0.55467266", "0.5503819", "0.5457172", "0.5451371", "0.54313546", "0.53999746", "0.5376586", "0.5375577", "0.5364844", "0.5359766", "0.535864", "0.5338235", "0.53258616", "0.53186446" ]
0.6972838
0
(Set, float, float, int, str) > list Filters a set of Products according to the parameters. This function is responsible to determine if filtering with tags should be applied or not.
def get_matching_products(products, lat, lng, radius, tags): if tags: tag_list = tags.split(',') return list([ product for product in products if is_matching_product_with_tags( product, lat, lng, radius, tag_list ) ]) else: return list([ product for product in products if is_matching_product( product, lat, lng, radius ) ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_generic(products, listings, result=None):\n print \"Apply Generic Filtering \"\n if result == None:\n result = {}\n matched_listings = []\n for alist in listings:\n manufacturer, renamed_manufacturer = find_manufacturer(products, alist)\n if manufacturer == False:\n continue\n for product in products[manufacturer]:\n product = product[0] # get product information all in lower case\n if not does_list_contains_model(\\\n alist, product['model'], manufacturer):\n continue\n if product['product_name'] not in result:\n result[product['product_name']] = []\n for matched_list in listings[alist]:\n matched_manufacturer =\\\n matched_list['manufacturer'].lower()\n if manufacturer not in matched_manufacturer and\\\n matched_manufacturer not in renamed_manufacturer:\n continue\n result[product['product_name']].append(matched_list)\n matched_listings.append(alist)\n remove_matched_list(listings, matched_listings)\n length_listings(listings)\n return result", "def filter_queryset(self, queryset):\n query_params = self.request.query_params\n # validate query parameters\n exception_response = ParamsCheck.validate(\n query_params, APIParams.products_list_params\n )\n if exception_response:\n return exception_response\n\n products_qs = self.get_queryset() # all\n\n category = query_params.get(\"category\", None)\n exclude_ingredients = query_params.get(\"exclude_ingredient\", None)\n exclude_ingredients = self._clean_string(exclude_ingredients)\n include_ingredients = query_params.get(\"include_ingredient\", None)\n include_ingredients = self._clean_string(include_ingredients)\n\n # filtering part\n if category is not None:\n products_qs = products_qs.filter(category=category)\n for each in include_ingredients:\n products_qs = products_qs.filter(ingredients__name=each)\n for each in exclude_ingredients:\n products_qs = products_qs.exclude(ingredients__name=each)\n\n return products_qs", "def filter_products(self, products, mrp_only=False, extension=None, **filters):\n\n filterMask = np.full(len(products), True, dtype=bool)\n\n # Applying the special filters (mrp_only and extension)\n if mrp_only:\n filterMask &= (products['productGroupDescription'] == \"Minimum Recommended Products\")\n\n if extension:\n mask = np.full(len(products), False, dtype=bool)\n for elt in extension:\n mask |= [False if isinstance(x, np.ma.core.MaskedConstant) else x.endswith(elt)\n for x in products[\"productFilename\"]]\n filterMask &= mask\n\n # Applying the rest of the filters\n for colname, vals in filters.items():\n\n if type(vals) == str:\n vals = [vals]\n\n mask = np.full(len(products), False, dtype=bool)\n for elt in vals:\n mask |= (products[colname] == elt)\n\n filterMask &= mask\n\n return products[np.where(filterMask)]", "def filter(self, filters):", "def test_tag_filter(self):\n request = RequestFactory().get('/?tags=foo&tags=bar')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.qs.filters['tags__slug__in'], ['foo', 'bar'])", "def filter_queryset(self, queryset):\n tags = self.request.GET.getlist(\"tag\")\n if tags:\n for tag in tags:\n queryset = queryset.filter(tag__tag=tag)\n return super().filter_queryset(queryset)", "def filter_by_tag(self, tags):\n\n if isinstance(tags, string_types):\n message = \"tags should be a list or None, got tags={}\".format(tags)\n raise TypeError(message)\n\n data_collection = DataCollection()\n for item in self.iteritems():\n if tags == [] or tags == None or all([tag in item.tags for tag in tags]):\n data_collection.add_data(item)\n return data_collection", "def filter_queryset(self, request, queryset, view):\n # filter by tags if available.\n tags = request.query_params.get(\"tags\", None)\n\n if tags and isinstance(tags, six.string_types):\n tags = tags.split(\",\")\n return queryset.filter(tags__name__in=tags)\n\n return queryset", "def test_filter_remove_only_bad_products(self):\n list_of_products = [self.good_product, self.bad_product]\n self.assertEqual(\n ProductValidator().filter(list_of_products),\n [self.good_product])", "def apply(self, catalog):\n out = []\n for product in catalog:\n valid = True\n for key, filtr in self.filters.items():\n valid = valid and filtr(product[key])\n if valid:\n out.append(product)\n out = sorted(out, key=lambda x: x['onDemandUsdPrice'])\n return self.limit_size(out)", "def check_intersection(product_list, product):\n best_products = []\n if len(product_list) > 0:\n\n for current_product in product_list:\n intersection_product = set(current_product.categories.all()) & set(\n product.categories.all())\n\n if len(intersection_product) >= 1:\n best_products.append(current_product)\n\n return best_products", "def filter_category_products(products, searchterm='', for_sale=None, **kwargs):\n\n searchterm = searchterm.lower()\n filtered_products = []\n for product in products:\n if searchterm not in product['name'].lower():\n continue\n if for_sale and not product['records']:\n continue\n filtered_products.append(product)\n return filtered_products", "def test_set_tag_filters(self):\n filters = QueryFilterCollection()\n\n url = \"?\"\n query_params = self.mocked_query_params(url, OCPTagView)\n handler = OCPTagQueryHandler(query_params)\n tag_keys = handler.get_tag_keys(filters=False)\n\n filter_key = tag_keys[0]\n\n filter_value = \"filter\"\n group_by_key = tag_keys[1]\n\n group_by_value = \"group_By\"\n\n url = f\"?filter[tag:{filter_key}]={filter_value}&group_by[tag:{group_by_key}]={group_by_value}\"\n query_params = self.mocked_query_params(url, OCPCpuView)\n handler = OCPReportQueryHandler(query_params)\n filters = handler._set_tag_filters(filters)\n\n expected = f\"\"\"<class 'api.query_filter.QueryFilterCollection'>: (AND: ('pod_labels__{filter_key}__icontains', '{filter_value}')), (AND: ('pod_labels__{group_by_key}__icontains', '{group_by_value}')), \"\"\" # noqa: E501\n\n self.assertEqual(repr(filters), expected)", "def clean(self, products):\n clean_products = []\n clean_categories = set()\n for product in products:\n if self.is_valid(product):\n product[\"categories\"] = [\n cat.strip().lower().capitalize()\n for cat in product[\"categories\"].split(\",\")\n ]\n clean_products.append(product)\n clean_categories |= set(product[\"categories\"])\n return clean_categories, clean_products", "def test_tag_filter(self):\n request = RequestFactory().get('/?search=foobar')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.qs.filters['name__icontains'], 'foobar')\n self.assertEquals(filter.qs.filters['status__startswith'], 'foobar')", "def filter(\n self, items: Iterable[Product], spec: Specification\n ) -> Generator[Product, None, None]:\n return (item for item in items if spec.is_satisfied(item))", "def _fuzzy_products(self, package: ImagePackage) -> typing.List[str]:\n\n products = {package.name}\n # TODO: add the generic product generation code (including nomatch exclusions here)\n return list(products)", "def call(self):\r\n clean_products = []\r\n\r\n for category in CATEGORIES:\r\n print(f\"Chargement des produits de type {category}\")\r\n api_url = SEARCH_API_URL + \\\r\n (f\"?search_terms={category}\"\r\n \"&search_tag=category&sort_by=unique_scans_n\"\r\n \"&page_size=1000&json=1\")\r\n json_response = requests.get(api_url).json()\r\n products = json_response[\"products\"]\r\n\r\n for product in products:\r\n clean_product = {\r\n k: v for k, v in product.items()\r\n if k in FIELD_NEEDED and v != ''}\r\n clean_products.append(clean_product)\r\n\r\n return clean_products", "def filter(self, filters:list)->list:\n for item in self.list:\n use_item = True\n for filter in filters:\n filter_key, filter_value, filter_type = filter\n if filter_type == \"<\" and item[filter_key] >= filter_value:\n use_item = False\n break\n elif filter_type == \">\" and item[filter_key] <= filter_value:\n use_item = False\n break\n elif filter_type == \"<=\" and item[filter_key] > filter_value:\n use_item = False\n break\n elif filter_type == \">=\" and item[filter_key] < filter_value:\n use_item = False\n break\n elif filter_type == \"=\" and not item[filter_key] == filter_value:\n use_item = False\n break\n if use_item:\n yield item", "def filter(self, artifacts: ArtifactsList) -> ArtifactsList:\n print(self.my_param)\n return artifacts", "def filter_products(self, products: list):\n # Get all products that has no parent\n independent = [product for product in products if product[\"parent_id\"] is None]\n # Get all products that has parent\n dependent = [\n product for product in products if product[\"parent_id\"] is not None\n ]\n # Sort dependent products by parent_id, so that a child will be always\n # inserted after the parent\n dependent = sorted(dependent, key=lambda item: item[\"parent_id\"])\n # Saves the total of objects\n self.total = len(independent) + len(dependent)\n return independent, dependent", "def filter_by_color(self, products, color):\n for p in products:\n # Selects products of the specified color\n if p.color == color:\n yield p", "def get_all_products(request, *args, query_str=''):\n\n active_filters = []\n products = Product.objects.all()\n product_fields = (\n (\"size\", \"options\"),\n (\"price\", \"range\"),\n (\"colours\", \"options\"),\n (\"year\", \"range\"),\n (\"collection\", \"equals\")\n )\n field_ranges = []\n for field, filter_type in product_fields:\n if filter_type == \"range\":\n (min_val) = products.filter().values_list(field).order_by(field)[0]\n (max_val) = products.filter().values_list(field).order_by\\\n (f'-{field}')[0]\n obj = {}\n obj['min_val'] = int(min_val[0])\n obj['max_val'] = int(max_val[0])\n obj['field'] = field\n field_ranges.append(obj)\n\n # if filter_type == \"options\":\n\n\n\n if request.GET:\n for key in request.GET:\n if \"__range\" in key:\n val = request.GET.getlist(key)\n val[:] = [int(x) for x in val]\n active_filters.append(\n [key.split(\"__\")[0], key.split(\"__\")[1], val]\n )\n obj = {}\n obj[key] = val\n query = Q(**obj)\n products = products.filter(query)\n\n\n if 'collection' in request.GET:\n collection_pk = request.GET['collection']\n if not collection_pk or not collection_pk.isnumeric():\n if query:\n return redirect(\n reverse('products'),\n kwargs={'query_str': query}\n )\n else:\n return redirect(reverse('products'))\n\n products = products.filter(collection=collection_pk)\n\n if 'q' in request.GET:\n query = request.GET['q']\n query_str = query\n if not query:\n return redirect(reverse('products'))\n\n queries = Q(display_name__icontains=query) | \\\n Q(name__icontains=query)\n products = products.filter(queries)\n\n\n context = {\n 'products': products,\n 'MEDIA_URL': settings.MEDIA_URL,\n 'search_term': query_str,\n 'filters': product_fields,\n 'field_ranges': field_ranges,\n 'active_filters': active_filters\n }\n\n return render(request, 'products/products.html', context)", "def _apply_filters(self, metadata):\n if \"keywords\" in self.filters:\n if not metadata.keywords:\n return False\n if not all(keyword in metadata.keywords for keyword in self.filters[\"keywords\"]):\n return False\n if \"features\" in self.filters:\n if not metadata.features:\n return False\n if not all(feature in metadata.features for feature in self.filters[\"features\"]):\n return False\n if \"authors\" in self.filters:\n if not metadata.authors:\n return False\n if not all(author in metadata.authors for author in self.filters[\"authors\"]):\n return False\n if \"version\" in self.filters:\n if not metadata.pylith_version:\n return False\n for verMeta in metadata.pylith_version:\n if not eval(\"{ver} {verMeta}\".format(ver=self.filters[\"version\"], verMeta=verMeta)):\n return False\n return True", "def filter(self, *args, **kwargs):", "def filterPick(list, filter, classification):\n y = []\n for job in list:\n x = [(job, classification) for l in job for m in (filter(l),) if m]\n y.append(x)\n return y", "def type_filter(self, items, types=None):", "def extract(self, filter_by='relevance', all_pages=False, limit=None):\r\n\r\n page = 1\r\n total_of_pages = 1\r\n products_list = []\r\n\r\n while page <= total_of_pages:\r\n\r\n products_code = None\r\n while products_code is None:\r\n soup = self.__olx_requests(filter_by, page)\r\n products_code = soup.find('div', {'class': \"sc-1fcmfeb-0 WQhDk\"})\r\n\r\n if page == 1 and all_pages is True and limit is None:\r\n max_pages = self.__number_of_pages(soup)\r\n if max_pages is not None:\r\n total_of_pages = max_pages\r\n\r\n elif page == 1 and all_pages is False and isinstance(limit, int):\r\n max_pages = self.__number_of_pages(soup)\r\n if max_pages is not None and limit <= max_pages:\r\n total_of_pages = limit\r\n else:\r\n total_of_pages = 1\r\n\r\n # Individual product - TAG\r\n for tags_products in [\"sc-1fcmfeb-2 ggOGTJ\", \"sc-1fcmfeb-2 hFOgZc\"]:\r\n for each_product in products_code.findAll('li', {'class': tags_products}):\r\n\r\n # Each product dictionary\r\n product_dict = {}\r\n\r\n # Name\r\n PRODUCT_NAME_TAG = 'fnmrjs-8 kRlFBv'\r\n if each_product.find('div', {'class': PRODUCT_NAME_TAG}) is not None:\r\n product_name = each_product.find('div', {'class': PRODUCT_NAME_TAG}).text\r\n if 'Anunciante online' in product_name:\r\n product_name = product_name.replace('Anunciante online', '')\r\n product_dict['Name'] = product_name\r\n else:\r\n continue\r\n\r\n # ID\r\n product_id = each_product.find('a', {'data-lurker-detail': 'list_id'})['data-lurker_list_id']\r\n product_dict['ID'] = product_id\r\n\r\n # Image\r\n PRODUCT_IMAGE_TAG = 'fnmrjs-5 jksoiN'\r\n product_img = each_product.find('div', {'class': PRODUCT_IMAGE_TAG}).find('img')['src']\r\n product_dict['Image'] = product_img\r\n\r\n # Price\r\n PRODUCT_PRICE_TAG = 'fnmrjs-15 clbSMi'\r\n if each_product.find('div', {'class': PRODUCT_PRICE_TAG}).text:\r\n product_price = each_product.find('div', {'class': PRODUCT_PRICE_TAG}).text\r\n product_dict['Price'] = re.findall('R\\$ (\\d*,?\\.?\\d*)|$', product_price)[0].replace('.', '')\r\n else:\r\n product_dict['Price'] = '-'\r\n\r\n # Date\r\n PRODUCT_DATE_TAG = 'fnmrjs-18 gMKELN'\r\n product_date = each_product.find('div', {'class': PRODUCT_DATE_TAG}).text\r\n if 'Hoje' in product_date:\r\n product_date = product_date.replace('Hoje', datetime.date.today().strftime(\"%d/%m \"))\r\n product_dict['Date'] = product_date\r\n\r\n elif 'Ontem' in product_date:\r\n product_date = product_date.replace('Ontem', (datetime.date.today() - datetime.timedelta(days=1)).strftime(\"%d/%m \"))\r\n product_dict['Date'] = product_date\r\n\r\n else:\r\n product_date = product_date.replace(re.findall(' ([a-z]*)\\d*', product_date)[0], months[re.findall(' ([a-z]*)\\d*', product_date)[0]]).replace(' ', r'/', 1)\r\n product_dict['Date'] = product_date\r\n\r\n # Location\r\n PRODUCT_LOCATION_PARENT_TAG = 'fnmrjs-21 bktOWr'\r\n PRODUCT_LOCATION_CHILD_TAG = 'fnmrjs-13 hdwqVC'\r\n product_location = each_product.find('div', {'class': PRODUCT_LOCATION_PARENT_TAG}).find('p', {'class': PRODUCT_LOCATION_CHILD_TAG}).text\r\n product_dict['City'] = re.findall('(.*\\w*),|$', product_location)[0]\r\n product_dict['Neighborhood'] = re.findall(r',(.*\\w*) - |$', product_location)[0].strip()\r\n product_dict['State'] = states_ddd[re.findall(r'DDD (\\d*)|$', product_location)[0]]\r\n\r\n # Link\r\n product_link = each_product.find('a', {'data-lurker-detail': 'list_id'})['href']\r\n product_dict['Link'] = product_link\r\n\r\n # List of Products\r\n products_list.append(product_dict)\r\n\r\n # Next page\r\n page += 1\r\n\r\n return products_list", "def order_filter(self,elements):", "def get_queryset(self):\n tags = self.request.query_params.get('tags')\n ingredient = self.request.query_params.get('ingredient')\n queryset = self.queryset\n if tags:\n tags_id = self._params_to_int(tags)\n queryset = queryset.filter(tags__id__in =tags_id)\n if ingredient:\n ingredient_id = self._params_to_int(ingredient)\n queryset = queryset.filter(ingredient__id__in = ingredient_id)\n\n return queryset.filter(user = self.request.user)" ]
[ "0.66341794", "0.6542159", "0.63417774", "0.6290791", "0.61444324", "0.61051065", "0.5993559", "0.5917431", "0.5875063", "0.5852951", "0.578841", "0.5734029", "0.57259727", "0.5694228", "0.5673258", "0.565935", "0.5637606", "0.56313235", "0.5576161", "0.5563928", "0.5539448", "0.552675", "0.5516025", "0.5515226", "0.5494058", "0.5465211", "0.5446835", "0.5442008", "0.5441432", "0.54033357" ]
0.6570375
1
(Product, float, float, radius) > boolean Check if the coordinates of a shop is within a radius (in meters) using the Vincenty's formulae.
def is_matching_product(product, lat, lng, radius): return vincenty( (lat, lng), (product.shop.lat, product.shop.lng) ).meters <= radius
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_matching_product_with_tags(product, lat, lng, radius, tags):\n return vincenty(\n (lat, lng),\n (product.shop.lat, product.shop.lng)\n ).meters <= radius and any(tag in product.shop.tags for tag in tags)", "def __contains__(self, position):\n return sum([(c1 - c2) ** 2 for (c1, c2) in zip(self.position, position)]) <= self.radius", "def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2", "def __contains__(self, other):\n x, y = other\n return self.radius >= sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def contains(self, position):\n return np.linalg.norm(position - self._center) < self._radius", "def inside_unit_circle(point):\n distance = math.sqrt(point[0] ** 2 + point[1] ** 2)\n return distance < 1", "def isInCircle(self,x1,y1,radius1):\r\n if(distance(self.x,x1,self.y,y1) < (self.radius+radius1)):\r\n return True\r\n return False", "def objects_radius(self, centre, radius):", "def checkBounds(x,y,z,center,radius):\n r2 = (x-center[0])**2 + (y-center[1])**2# + (z-center[0])**2\n if r2 < radius**2:\n return True\n else:\n return False", "def contains(self, loc): \n return loc.distance(self.center) <= self.radius", "def isInside(x1, y1, x2, y2, x3, y3, x, y):\n # Calculate area of triangle ABC\n A = area (x1, y1, x2, y2, x3, y3)\n \n # Calculate area of triangle PBC\n A1 = area (x, y, x2, y2, x3, y3)\n \n # Calculate area of triangle PAC\n A2 = area (x1, y1, x, y, x3, y3)\n \n # Calculate area of triangle PAB\n A3 = area (x1, y1, x2, y2, x, y)\n \n # Check if sum of A1, A2 and A3\n # is same as A\n if(A == A1 + A2 + A3):\n return True\n else:\n return False", "def is_point_inside_hypersphere(point: np.array, c: List[float], r: float) -> bool:\n return np.linalg.norm(point - c) < r", "def inside(x, y, primitive):\n\n # You should implement your inside test here for all shapes\n # for now, it only returns a false test\n\n if primitive[\"shape\"] == \"circle\":\n dist_sqr = ((primitive[\"center\"][0] - x) ** 2 +\n (primitive[\"center\"][1] - y) ** 2)\n\n return dist_sqr <= primitive[\"radius\"] ** 2\n else:\n return winding_number(x, y, primitive)\n\n return False", "def test_get_radius():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_rad = get_radius(center, radius, 30)\n\n assert returned_rad == radius\n assert returned_rad != center.get_x()\n assert returned_rad != center.get_y()", "def sphere_isclose(c1, c2, *args, **kwargs):\n return np.isclose(c1.radius, c2.radius, *args, **kwargs) and np.allclose(\n c1.center, c2.center, *args, **kwargs\n )", "def containsPos(self, obst_pos, aerial_pos):\n dist_to_center = obst_pos.distanceTo(aerial_pos)\n return dist_to_center <= self.sphere_radius", "def check_point_in_detector(p, radius=radius, height=height, distance=distance):\r\n if p[0]**2 + p[1]**2 <= radius**2: # Are the x and y coordinates in the circle?\r\n if (p[2] >= distance) and (p[2] <= height+distance): # Is the z coordinate between the distance and the height?\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False", "def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon", "def are_close(coord1, coord2, tolerance=10):\n return vincenty(coord1, coord2).meters < tolerance", "def incircle(self,xpos,ypos,cellx,celly):\n xcell, ycell = self.getcellcenter(cellx,celly)\n if ((xpos - xcell)**2 + (ypos - ycell)**2) < self.crad2:\n return True\n return False\n\n return cellx, celly", "def WhereAreYou(CurLongitude,CurLatitude,LocationLongitude,LocationLatitude,LocationRadius):\n # Calculate the great circle distance between two points\n # on the earth (specified in decimal degrees)\n # 将十进制度数转化为弧度\n CurLongitude,CurLatitude,LocationLongitude,LocationLatitude = map(radians, [float(CurLongitude),float(CurLatitude),float(LocationLongitude),float(LocationLatitude)])\n\n # haversine公式\n dlon = LocationLongitude - CurLongitude\n dlat = LocationLatitude - CurLatitude\n a = sin(dlat / 2) ** 2 + cos(CurLatitude) * cos(LocationLatitude) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # 地球平均半径,单位为公里\n distance = c * r * 1000\n if(distance < float(LocationRadius)):\n return True\n else:\n return False", "def __contains__(self, point): \n corners = self.corners\n\n if isinstance(point, tuple):\n from pyresample.spherical_geometry import Coordinate\n retval = planar_point_inside(Coordinate(*point), corners)\n else:\n retval = planar_point_inside(point, corners)\n\n #print ' retval from FALSE CORNERS contains '+str(retval)\n\n return retval", "def get_radius(self):", "def within_radius(self, radius=5.0):\n\n return GeoEntry.within_radius(self.primary_geocode, radius, ['all',])", "def in_circle(x0, y0, x, y, r):\n return ((x - x0) ** 2 + (y - y0) ** 2) <= (r ** 2)", "def is_point_inside_hypermoon(point: np.array, c: Tuple[List[float]], r: Tuple[float]) -> bool:\n return is_point_inside_hypersphere(point, c[0], r[0]) and not is_point_inside_hypersphere(point, c[1], r[1])", "def vincenty(p1, p2):\n # Note: GeoPy expects (latitude, longitude) pairs.\n return geopy.distance.vincenty(\n (p1.y, p1.x),\n (p2.y, p2.x)\n ).miles", "def check_coordinates(X, Y):\n\n # Accounting for elliptical Jupiter disk\n Y *= 1.071374\n\n return sqrt(X ** 2 + Y ** 2)", "def is_clicked(vtx_x, vtx_y, mouse_x, mouse_y, radius):\n return math.sqrt(((mouse_x - vtx_x) ** 2) + ((mouse_y - vtx_y) ** 2)) < radius", "def is_point_inside_hypercube(point: List[float], c: List[float], r: float) -> bool:\n diff = np.subtract(point, c)\n return np.all(np.absolute(diff) <= r)" ]
[ "0.64729494", "0.59081954", "0.5859355", "0.58463377", "0.5835103", "0.5793143", "0.5749297", "0.5747117", "0.571432", "0.5694992", "0.5625121", "0.55783933", "0.5532883", "0.5531398", "0.55265725", "0.5519289", "0.54891366", "0.5458684", "0.54542017", "0.5451329", "0.53815925", "0.5368734", "0.53533536", "0.5344653", "0.5337585", "0.5330999", "0.53292596", "0.5327822", "0.5281993", "0.527632" ]
0.7570688
0
(Product, float, float, radius, list) > boolean Check if the coordinates of a shop is within a radius (in meters) using the Vincenty's formulae and if the shop contains any of the tags provided.
def is_matching_product_with_tags(product, lat, lng, radius, tags): return vincenty( (lat, lng), (product.shop.lat, product.shop.lng) ).meters <= radius and any(tag in product.shop.tags for tag in tags)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_matching_product(product, lat, lng, radius):\n return vincenty(\n (lat, lng),\n (product.shop.lat, product.shop.lng)\n ).meters <= radius", "def shops_within_radius(self, lat, lng, radius, tags=None):\n center_point = geoindex.GeoPoint(lat, lng)\n points = self.geoindex.get_nearest_points(center_point, radius, 'km')\n\n def tags_filter(shops):\n for shop in shops:\n for tag in tags:\n if tag in shop['tags']:\n yield shop\n break\n\n def get_shops():\n for point, distance in points:\n point.ref['distance'] = distance\n yield point.ref\n\n if tags:\n return tags_filter(get_shops())\n else:\n return get_shops()", "def __contains__(self, position):\n return sum([(c1 - c2) ** 2 for (c1, c2) in zip(self.position, position)]) <= self.radius", "def get_matching_products(products, lat, lng, radius, tags):\n if tags:\n tag_list = tags.split(',')\n return list([\n product for product in products\n if is_matching_product_with_tags(\n product,\n lat,\n lng,\n radius,\n tag_list\n )\n ])\n else:\n return list([\n product for product in products\n if is_matching_product(\n product,\n lat,\n lng,\n radius\n )\n ])", "def contains(self, position):\n return np.linalg.norm(position - self._center) < self._radius", "def __contains__(self, other):\n x, y = other\n return self.radius >= sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def objects_radius(self, centre, radius):", "def contains(self, loc): \n return loc.distance(self.center) <= self.radius", "def FindPointsWithinRadius(self, p_float, , vtkIdList):\n ...", "def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2", "def isInside(x1, y1, x2, y2, x3, y3, x, y):\n # Calculate area of triangle ABC\n A = area (x1, y1, x2, y2, x3, y3)\n \n # Calculate area of triangle PBC\n A1 = area (x, y, x2, y2, x3, y3)\n \n # Calculate area of triangle PAC\n A2 = area (x1, y1, x, y, x3, y3)\n \n # Calculate area of triangle PAB\n A3 = area (x1, y1, x2, y2, x, y)\n \n # Check if sum of A1, A2 and A3\n # is same as A\n if(A == A1 + A2 + A3):\n return True\n else:\n return False", "def containsPos(self, obst_pos, aerial_pos):\n dist_to_center = obst_pos.distanceTo(aerial_pos)\n return dist_to_center <= self.sphere_radius", "def checkBounds(x,y,z,center,radius):\n r2 = (x-center[0])**2 + (y-center[1])**2# + (z-center[0])**2\n if r2 < radius**2:\n return True\n else:\n return False", "def isInCircle(self,x1,y1,radius1):\r\n if(distance(self.x,x1,self.y,y1) < (self.radius+radius1)):\r\n return True\r\n return False", "def check_point_in_detector(p, radius=radius, height=height, distance=distance):\r\n if p[0]**2 + p[1]**2 <= radius**2: # Are the x and y coordinates in the circle?\r\n if (p[2] >= distance) and (p[2] <= height+distance): # Is the z coordinate between the distance and the height?\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False", "def inside(x, y, primitive):\n\n # You should implement your inside test here for all shapes\n # for now, it only returns a false test\n\n if primitive[\"shape\"] == \"circle\":\n dist_sqr = ((primitive[\"center\"][0] - x) ** 2 +\n (primitive[\"center\"][1] - y) ** 2)\n\n return dist_sqr <= primitive[\"radius\"] ** 2\n else:\n return winding_number(x, y, primitive)\n\n return False", "def is_point_inside_hypersphere(point: np.array, c: List[float], r: float) -> bool:\n return np.linalg.norm(point - c) < r", "def _intersected(positions, radius):\n P1 = positions[0]\n P2 = positions[1]\n P3 = positions[2]\n temp1 = P2 - P1\n e_x = temp1 / np.linalg.norm(temp1)\n temp2 = P3 - P1\n i = np.dot(e_x, temp2)\n temp3 = temp2 - i * e_x\n e_y = temp3 / np.linalg.norm(temp3)\n e_z = np.cross(e_x, e_y)\n d = np.linalg.norm(P2 - P1)\n j = np.dot(e_y, temp2) \n x = d / 2\n y = (-2*i*x + i*i + j*j) / (2*j)\n temp4 = radius**2 - x*x - y*y\n if temp4 < 0:\n return False\n return True", "def isoutside(coords, shape):\n # Label external pores for trimming below\n if len(shape) == 1: # Spherical\n # Find external points\n r = np.sqrt(np.sum(coords**2, axis=1))\n Ps = r > shape[0]\n elif len(shape) == 2: # Cylindrical\n # Find external pores outside radius\n r = np.sqrt(np.sum(coords[:, [0, 1]]**2, axis=1))\n Ps = r > shape[0]\n # Find external pores above and below cylinder\n if shape[1] > 0:\n Ps = Ps + (coords[:, 2] > shape[1])\n Ps = Ps + (coords[:, 2] < 0)\n else:\n pass\n elif len(shape) == 3: # Rectilinear\n shape = np.array(shape, dtype=float)\n try:\n lo_lim = shape[:, 0]\n hi_lim = shape[:, 1]\n except IndexError:\n lo_lim = np.array([0, 0, 0])\n hi_lim = shape\n Ps1 = np.any(coords > hi_lim, axis=1)\n Ps2 = np.any(coords < lo_lim, axis=1)\n Ps = Ps1 + Ps2\n return Ps", "def sphere_isclose(c1, c2, *args, **kwargs):\n return np.isclose(c1.radius, c2.radius, *args, **kwargs) and np.allclose(\n c1.center, c2.center, *args, **kwargs\n )", "def inside_unit_circle(point):\n distance = math.sqrt(point[0] ** 2 + point[1] ** 2)\n return distance < 1", "def get_radius(self):", "def incircle(self,xpos,ypos,cellx,celly):\n xcell, ycell = self.getcellcenter(cellx,celly)\n if ((xpos - xcell)**2 + (ypos - ycell)**2) < self.crad2:\n return True\n return False\n\n return cellx, celly", "def __contains__(self, point): \n corners = self.corners\n\n if isinstance(point, tuple):\n from pyresample.spherical_geometry import Coordinate\n retval = planar_point_inside(Coordinate(*point), corners)\n else:\n retval = planar_point_inside(point, corners)\n\n #print ' retval from FALSE CORNERS contains '+str(retval)\n\n return retval", "def is_point_inside_hypermoon(point: np.array, c: Tuple[List[float]], r: Tuple[float]) -> bool:\n return is_point_inside_hypersphere(point, c[0], r[0]) and not is_point_inside_hypersphere(point, c[1], r[1])", "def __contains__(self, point):\n #### Original \n from pyresample.spherical_geometry import point_inside, Coordinate\n corners = self.corners\n\n if isinstance(point, tuple):\n return point_inside(Coordinate(*point), corners)\n else:\n return point_inside(point, corners)\n #### End Original\n #from .spherical import SphPolygon\n #log.info('RUNNING SPHERICAL in __contains__')\n #sphpoly = SphPolygon(corners)\n #return sphpoly.intersection(SphPolygon(point), sphpoly)", "def containsPos(self, aerial_pos):\n # Check altitude of position\n aerial_alt = aerial_pos.altitude_msl\n if (aerial_alt < 0 or aerial_alt > self.cylinder_height):\n return False\n # Check lat/lon of position\n dist_to_center = self.gps_position.distanceTo(aerial_pos.gps_position)\n if dist_to_center > self.cylinder_radius:\n return False\n # Both within altitude and radius bounds, inside cylinder\n return True", "def within_radius(self, radius=5.0):\n\n return GeoEntry.within_radius(self.primary_geocode, radius, ['all',])", "def is_clicked(vtx_x, vtx_y, mouse_x, mouse_y, radius):\n return math.sqrt(((mouse_x - vtx_x) ** 2) + ((mouse_y - vtx_y) ** 2)) < radius", "def HasPoint(self, vtkAMRBox, , , p_float_6, p_float_7, p_float_8):\n ..." ]
[ "0.7254775", "0.61709523", "0.5765121", "0.57588995", "0.5667875", "0.5657872", "0.5615331", "0.5498272", "0.54953516", "0.5392794", "0.5387623", "0.5384196", "0.53837055", "0.5313938", "0.52996296", "0.5263196", "0.5251948", "0.5240129", "0.5167428", "0.51551384", "0.5082203", "0.5078475", "0.5062163", "0.5049635", "0.50441986", "0.50374484", "0.5028136", "0.50127864", "0.5002548", "0.49691728" ]
0.78412956
0
Create a copy of a facemap proc file, but pointing to a new video. By default, the new proc file is created in the same folder as the new videofile and named videofile_proc.npy.
def copy_facemap_roi(procfile, videofile, outputfile=None): videodata = np.load(procfile, allow_pickle=True).item() videodata['filenames'] = [[videofile]] if outputfile is None: outputfile = os.path.splitext(videofile)[0]+'_proc.npy' if os.path.isfile(outputfile): print(f'File {outputfile} exists. It will not be overwritten.') return None np.save(outputfile, videodata) return outputfile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_video(input_file, output_file):\n input_video = VideoFileClip(input_file)\n output_video = input_video.fl_image(detect_lane.fit_and_plot)\n output_video.write_videofile(output_file, audio=False)", "def process_video(lane, fname, output):\n\tclip = VideoFileClip(fname)\n\toutput_name = output\n\toutput_clip = clip.fl_image(lane.pipeline)\n\toutput_clip.write_videofile(output_name, audio=False)\n\tprint ('Video processed successfully')", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def process_video(proc_state):\n entry = proc_state.entry\n workbench = proc_state.workbench\n video_config = mgg.global_config['media_type:mediagoblin.media_types.video']\n\n queued_filepath = entry.queued_media_file\n queued_filename = proc_state.get_queued_filename()\n name_builder = FilenameBuilder(queued_filename)\n\n medium_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}-640p.webm'))\n\n thumbnail_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}.thumbnail.jpg'))\n\n # Create a temporary file for the video destination (cleaned up with workbench)\n tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False)\n with tmp_dst:\n # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square\n progress_callback = ProgressCallback(entry)\n\n dimensions = (\n mgg.global_config['media:medium']['max_width'],\n mgg.global_config['media:medium']['max_height'])\n\n # Extract metadata and keep a record of it\n metadata = transcoders.VideoTranscoder().discover(queued_filename)\n store_metadata(entry, metadata)\n\n # Figure out whether or not we need to transcode this video or\n # if we can skip it\n if skip_transcode(metadata):\n _log.debug('Skipping transcoding')\n\n dst_dimensions = metadata['videowidth'], metadata['videoheight']\n\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n did_transcode = False\n else:\n transcoder = transcoders.VideoTranscoder()\n\n transcoder.transcode(queued_filename, tmp_dst.name,\n vp8_quality=video_config['vp8_quality'],\n vp8_threads=video_config['vp8_threads'],\n vorbis_quality=video_config['vorbis_quality'],\n progress_callback=progress_callback,\n dimensions=dimensions)\n\n dst_dimensions = transcoder.dst_data.videowidth,\\\n transcoder.dst_data.videoheight\n\n # Push transcoded video to public storage\n _log.debug('Saving medium...')\n mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)\n _log.debug('Saved medium')\n\n entry.media_files['webm_640'] = medium_filepath\n\n did_transcode = True\n\n # Save the width and height of the transcoded video\n entry.media_data_init(\n width=dst_dimensions[0],\n height=dst_dimensions[1])\n\n # Temporary file for the video thumbnail (cleaned up with workbench)\n tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False)\n\n with tmp_thumb:\n # Create a thumbnail.jpg that fits in a 180x180 square\n transcoders.VideoThumbnailerMarkII(\n queued_filename,\n tmp_thumb.name,\n 180)\n\n # Push the thumbnail to public storage\n _log.debug('Saving thumbnail...')\n mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)\n entry.media_files['thumb'] = thumbnail_filepath\n\n # save the original... but only if we did a transcoding\n # (if we skipped transcoding and just kept the original anyway as the main\n # media, then why would we save the original twice?)\n if video_config['keep_original'] and did_transcode:\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n # Remove queued media file from storage and database\n proc_state.delete_queue_file()", "def process_video(self, tmp_output_folder, video_name, video_num, total_videos):\n vidcap = cv2.VideoCapture(join(tmp_output_folder, video_name))\n print(f\"Processing video {video_num}/{total_videos} with name {video_name} \\n\")\n\n input_length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n frame_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))\n frame_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vidcap.get(cv2.CAP_PROP_FPS))\n\n metadata = []\n faces_all_frames = []\n success, image = vidcap.read()\n count = 0\n frame = 0\n while success:\n if count % self.sample_every == 0:\n height, width = image.shape[:2]\n image = cv2.resize(image, (self.width, self.height), interpolation=cv2.INTER_CUBIC)\n\n # Convert from BGR color (OpenCV) to RGB color (face_recognition)\n rgb_image = image[:, :, ::-1]\n\n # Find all the faces in the current frame of video\n face_locations = face_recognition.face_locations(rgb_image)\n faces = []\n face_num = 0\n # Display the results\n for top, right, bottom, left in face_locations:\n # Draw a box around the face\n faces.append(image[top:bottom, left:right, :].copy())\n metadata.append(\n f\"{video_name},frame-{count}.face-{face_num}.jpg,{count},{face_num},{input_length},{fps},{frame_width},{frame_height},{top},{right},{bottom},{left}\\n\")\n face_num += 1\n faces_all_frames.append(faces)\n\n frame += 1\n success, image = vidcap.read()\n count += 1\n video_num += 1\n vidcap.release()\n\n with open(f\"{self.output_folder}/faces-pickle/{video_name}.pkl\", \"wb\") as f_out:\n pickle.dump(faces_all_frames, f_out)\n return metadata", "def process_video(input_file, output_file):\n with open('all-features-rbf-svm.p', 'rb') as svm_fd:\n clf = pickle.load(svm_fd)\n with open('all-features-scaler.p', 'rb') as scaler_fd:\n hog_scaler = pickle.load(scaler_fd)\n hog_parameters = HogParameters(orientations=18, pixels_per_cell=8, cells_per_block=2)\n clip = VideoFileClip(input_file)\n test_clip = clip.fl_image(\n lambda frame: process_frame(frame, clf=clf, norm_scaler=hog_scaler, hog_parameters=hog_parameters, spatial_size=(16, 16), hist_bins=32))\n test_clip.write_videofile(output_file, audio=False)", "def convert_video(video_file, output_file_name):\n video_stream = cv2.VideoCapture(video_file)\n total_frames = video_stream.get(cv2.CAP_PROP_FRAME_COUNT)\n background = get_median_frame(video_stream)\n video_stream.release()\n #reopen for processing:\n video_stream = cv2.VideoCapture(video_file)\n #ready an output writer\n writer = cv2.VideoWriter(output_file_name, \n cv2.VideoWriter_fourcc(*\"MP4V\"), fps,(1080,1920)) #(1920,1080))\n frameCnt=0\n pos = [] #Array for the coordinates\n while(frameCnt < total_frames-1):\n frameCnt+=1\n ret, frame = video_stream.read()\n dframe = background_subtraction(frame,background)\n cnts = find_contours(dframe)\n x,y = find_lowest_contour(cnts)\n pos.append([x,y])\n if len(pos): \n cv2.polylines(frame,np.int32([pos]),False,(0, 255, 0),2)\n writer.write(cv2.resize(frame, (1080,1920))) ## size probably shoudn't be fixed.\n writer.release()\n video_stream.release()\n return pos", "def create_movie(name, folder):\n cmd = [\"ffmpeg\", \"-framerate\", \"1\", \"-i\", folder + \"/pic%04d.png\", \"-c:v\",\n \"libx264\", \"-r\", \"30\", \"-pix_fmt\", \"yuv420p\", name]\n return subprocess.call(cmd)", "def local_video(**kwargs):\n output_dir = run_video_preprocess(\n video_file=input_video,\n roi_locations=kwargs[\"roi_locations\"],\n preprocess_analysis=kwargs[\"preprocess_analysis\"],\n database=False\n )\n\n run_analysis_pipeline(\n preprocess_analysis=kwargs[\"preprocess_analysis\"],\n json_filepath=output_dir,\n )", "def make_video(queue, video_file, destination, face_locations, face_encodings, match_encodings, settings):\n trackers = [] # list of tracker objects, one for each matched face\n # get video\n video = cv2.VideoCapture(video_file) # input VideoCapture object\n frame_rate = video.get(cv2.CAP_PROP_FPS) # frames per second in input video\n width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) # width of input video frame\n height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) # height of input video frame\n frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) # number of frames in input video\n # get settings\n track_period = settings[\"track_period\"] # track period from settings\n tolerance = settings[\"tolerance\"] # face matching tolerance from settings\n blur_method = settings[\"blur_method\"] # type of blurring from settings\n blur_intensity = settings[\"blur_intensity\"] # blurring filter size from settings\n display_output = settings[\"display_output\"] # flag indicating whether to display output video from settings\n # initialize writer\n out = video_utils.initialize_writer(destination, (width, height), frame_rate) # VideoWriter object\n for i in range(frame_count):\n ret, img = video.read() # ret indicates if frame was read correctly, img is last read frame\n if i % track_period == 0: # frame for detection\n current_frame_encodings = np.array(face_encodings[i // track_period]) # array of encodings for faces in current frame\n matched_indices, matched_encodings = recognition.match_faces(current_frame_encodings, np.array(match_encodings), tolerance) # indices of matched faces from current frame and their encodings\n matched_locations = [face_locations[i // track_period][k] for k in matched_indices] # locations of matched faces from current frame\n trackers = tracking.start_trackers(img, matched_locations) # list of tracker objects, one for each matched face\n else: # frame for tracking\n matched_locations = tracking.update_locations(trackers, img) # updated locations of matched faces from current frame\n # generate blurred image\n blurred = None # object holding image with blurred faces\n if blur_method == \"pixelate\":\n blurred = blur_methods.pixelated(img, matched_locations, blur_intensity)\n elif blur_method == \"blur\":\n blurred = blur_methods.blurred(img, matched_locations, blur_intensity)\n elif blur_method == \"blacken\":\n blurred = blur_methods.blackened(img, matched_locations)\n out.write(blurred)\n\n out.release()\n queue.put(0)\n if display_output:\n video_utils.display_video(destination)", "def process_video(self, input_path, output_path, debug=False):\n clip = VideoFileClip(input_path)\n if debug:\n test_clip = clip.fl_image(self.process_image_debug)\n else:\n test_clip = clip.fl_image(self.process_image)\n test_clip.write_videofile(output_path)", "def preprocess_sample(file, params):\n\n videoFile = file + \".mp4\"\n audioFile = file + \".wav\"\n roiFile = file + \".png\"\n visualFeaturesFile = file + \".npy\"\n\n roiSize = params[\"roiSize\"]\n normMean = params[\"normMean\"]\n normStd = params[\"normStd\"]\n vf = params[\"vf\"]\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n #Extract the audio from the video file using the FFmpeg utility and save it to a wav file.\n v2aCommand = \"ffmpeg -y -v quiet -i \" + videoFile + \" -ac 1 -ar 16000 -vn \" + audioFile\n os.system(v2aCommand)\n\n\n #for each frame, resize to 224x224 and crop the central 112x112 region\n captureObj = cv.VideoCapture(videoFile)\n roiSequence = list()\n while (captureObj.isOpened()):\n ret, frame = captureObj.read()\n if ret == True:\n grayed = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n grayed = grayed/255\n grayed = cv.resize(grayed, (224,224))\n roi = grayed[int(112-(roiSize/2)):int(112+(roiSize/2)), int(112-(roiSize/2)):int(112+(roiSize/2))]\n roiSequence.append(roi)\n else:\n break\n captureObj.release()\n cv.imwrite(roiFile, np.floor(255*np.concatenate(roiSequence, axis=1)).astype(np.int))\n\n\n #normalise the frames and extract features for each frame using the visual frontend\n #save the visual features to a .npy file\n inp = np.stack(roiSequence, axis=0)\n inp = np.expand_dims(inp, axis=[1,2])\n inp = (inp - normMean)/normStd\n inputBatch = torch.from_numpy(inp)\n inputBatch = (inputBatch.float()).to(device)\n vf.eval()\n with torch.no_grad():\n outputBatch = vf(inputBatch)\n out = torch.squeeze(outputBatch, dim=1)\n out = out.cpu().numpy()\n np.save(visualFeaturesFile, out)\n return", "def analyze_video(vidNum_iter, config, pointInds_toUse, pts_spaced, session): # function needed for multiprocessing\n\n optic = config['Optic']\n\n numVids = session['num_vids']\n path_vid_allFiles = session['videos']\n lk_names = [key for key in optic.keys() if 'lk_' in key]\n lk_params = {k.split('lk_')[1]: (tuple(optic[k]) if type(optic[k]) is list else optic[k]) \\\n for k in lk_names}\n\n vid = imageio.get_reader(path_vid_allFiles[vidNum_iter], 'ffmpeg')\n # metadata = vid.get_meta_data()\n\n path_vid = path_vid_allFiles[vidNum_iter] # get path of the current vid\n video = cv2.VideoCapture(path_vid) # open the video object with openCV\n numFrames = int(video.get(\n cv2.CAP_PROP_FRAME_COUNT)) # get frame count of this vid GENERALLY INACCURATE. OFF BY AROUND -25 frames\n\n frameToSet = 0\n frame = vid.get_data(\n frameToSet) # Get a single frame to use as the first 'previous frame' in calculating optic flow\n new_frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n old_frame = new_frame_gray\n\n displacements_tmp = np.zeros((pts_spaced.shape[0], 2, np.uint64(numFrames + (numVids * 1000)))) * np.nan\n\n print(' ', end='', flush=True)\n text = \"progresser #{}\".format(vidNum_iter)\n print(f'\\n Calculating displacement field: video # {vidNum_iter + 1}/{numVids}')\n\n for iter_frame, new_frame in enumerate(tqdm(vid, total=numFrames, desc=text, position=vidNum_iter)):\n new_frame_gray = cv2.cvtColor(new_frame, cv2.COLOR_BGR2GRAY) # convert to grayscale\n\n ##calculate optical flow\n pointInds_new, status, error = cv2.calcOpticalFlowPyrLK(old_frame, new_frame_gray, pointInds_toUse, None,\n **lk_params) # Calculate displacement distance between STATIC/ANCHORED points and the calculated new points. Also note the excluded 'NextPts' parameter. Could be used for fancier tracking\n\n ## Calculate displacement and place into variable 'displacements' (changes in size every iter)\n if iter_frame == 0:\n displacements_tmp[:, :, iter_frame] = np.zeros((pts_spaced.shape[0], 2))\n else:\n displacements_tmp[:, :, iter_frame] = np.single(np.squeeze((\n pointInds_new - pointInds_toUse))) # this is the important variable. Simply the difference in the estimate\n\n old_frame = new_frame_gray # make current frame the 'old_frame' for the next iteration\n\n return displacements_tmp", "def run_video(self, video_path):\n file, ext = os.path.splitext(video_path)\n video_name = file.split('/')[-1]\n out_filename = video_name + '_out' + '.avi'\n\n cap = cv2.VideoCapture(video_path)\n wi = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n he = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n print(wi, he)\n\n vwriter = cv2.VideoWriter(out_filename, cv2.VideoWriter_fourcc(*'MJPG'), 10, (wi, he))\n counter = 0\n fac = 2\n start = time.time()\n while True:\n ret, image = cap.read()\n\n if ret:\n counter += 1\n\n ## resize image\n\n height, width, channels = image.shape\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n resized_image = cv2.resize(image, target_size, interpolation=cv2.INTER_AREA)\n output = resized_image.copy()\n\n ## get segmentation map\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n ## visualize\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n\n ## overlay on image\n alpha = 0.7\n cv2.addWeighted(seg_image, alpha, output, 1 - alpha, 0, output)\n\n output = cv2.resize(output, (wi, he), interpolation=cv2.INTER_AREA)\n # outimg = 'image_' + str(counter) + '.jpg'\n # cv2.imwrite(os.path.join(os.getcwd(), 'test_out', outimg),output)\n vwriter.write(output)\n else:\n break\n\n end = time.time()\n print(\"Frames and Time Taken: \", counter, end - start)\n cap.release()\n vwriter.release()", "def process_video(filename, args, cfg, net):\n # Split video into frames\n images = split_video(filename)\n # Set output dir\n output_dir = args.output\n # Add brackets and extension to filename\n output_path = create_video_output_path(output_dir, cfg)\n # Get height and width of 1st image\n height, width, _ = check_img_size(images[0]).shape\n # Create VideoWriter object\n video = cv2.VideoWriter(output_path, \n cv2.VideoWriter_fourcc(*'FMP4'), \n cfg['video']['fps'], \n (width, height))\n for image in images:\n # Process frames\n img_steps = process_image(image, cfg, net)\n # Check for --show-detections flag\n output_img = check_if_adding_bboxes(args, img_steps) \n # Write to video\n video.write(output_img) \n # Release video writer object\n video.release()", "def video(perspective_matrix_path, source=\"cam\", save=False, save_path=None, file_name=\"out\", cam_cal=None):\n if not os.path.isfile(perspective_matrix_path):\n raise FileNotFoundError(\"Path to perspective matrix file not exist!\")\n\n with open(perspective_matrix_path, \"rb\") as p:\n perspective_matrix = pickle.load(p)\n M = perspective_matrix[\"M\"]\n Minv = perspective_matrix[\"Minv\"]\n\n if source == \"cam\":\n cap = cv2.VideoCapture(0)\n else:\n if not os.path.isfile(source):\n raise FileNotFoundError(source, \" not Exist!\")\n cap = cv2.VideoCapture(source)\n\n # camera calibration parameters [ mtx , dist]\n mtx = None\n dist = None\n\n out = None\n if save:\n if not os.path.isdir(save_path):\n raise FileNotFoundError(save_path, \" Not Exist!\")\n file_name += \".mp4\"\n out = cv2.VideoWriter(save_path + file_name, -1, 20, (int(cap.get(3)), int(cap.get(4))))\n\n if cam_cal:\n if not os.path.isfile(cam_cal):\n raise FileNotFoundError(cam_cal, \" Not Exist!\")\n\n with open(cam_cal, \"rb\") as p:\n calibration = pickle.load(p)\n mtx = calibration[\"mtx\"]\n dist = calibration[\"dist\"]\n\n left_line = Line(5)\n right_line = Line(5)\n\n while True:\n # Capture frame-by-frame\n ret, frame = cap.read()\n if not ret:\n print(\"Finished..\")\n sys.exit(0)\n\n # cv2 read frame as BGR, convert it to RGB\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n # camera calibration\n if not (mtx is None or dist is None):\n frame = cv2.undistort(frame, mtx, dist, None, mtx)\n\n # get edges in image\n edges = apply_edge_detection(frame)\n\n # transform image to bird view\n warped = warped_img(edges, M)\n\n # init out image which will draw lane line on it then weight it with original frame\n out_img = np.zeros_like(warped)\n if len(warped.shape) == 3 and warped.shape[2] == 3:\n pass\n else:\n out_img = np.dstack((out_img, out_img, out_img))\n\n # if line not detected, apply sliding window\n if not left_line.detected or not right_line.detected:\n leftx, lefty, rightx, righty = sliding_window(warped, 9, 200)\n\n # if already detected apply search around detected line\n else:\n leftx, lefty = search_around_poly(left_line, warped)\n rightx, righty = search_around_poly(right_line, warped)\n\n # will used for plotting line, find x fitted\n ploty = np.linspace(warped.shape[0] // 4, warped.shape[0] - 1, warped.shape[0])\n\n # check if at least 100 pixels detected as line\n if len(leftx) > 100 and len(rightx) > 100:\n\n # make detected flag true\n left_line.detected = True\n right_line.detected = True\n\n left_line.current_x = leftx\n left_line.current_y = lefty\n\n right_line.current_x = rightx\n right_line.current_y = righty\n\n left_line.fit_polynomial(ploty)\n right_line.fit_polynomial(ploty)\n\n else:\n print(\"Line not detected in this frame \")\n # we just draw line form previous frame\n\n # make detected flag true\n left_line.detected = False\n right_line.detected = False\n\n # update Lane line radius\n left_line.radius()\n right_line.radius()\n\n # avg radius of to lines, and plot it\n radius = (left_line.radius_of_curvature + right_line.radius_of_curvature) // 2\n frame = write_text(frame, \"Radius of Curvature = \" + str(radius) + \" M\", pos=(20, 50))\n\n # calculate Alignment ( how much car away from center between Lane lines\n dir = \"Left\" # car far from left or right\n\n left_line.car_offset(frame.shape) # distance from left line\n right_line.car_offset(frame.shape) # distance from right line\n\n distance = round(right_line.line_base_pos - left_line.line_base_pos, 2)\n\n if distance < 0: # car far away from left line not right line\n distance = -distance\n dir = \"Right\"\n frame = write_text(frame, \"Vehicle is {}m {} of center\".format(distance, dir), pos=(20, 80))\n\n # ** plot lane lines on image **\n # left_line.draw_line(out_img, ploty)\n # right_line.draw_line(out_img, ploty)\n\n # color pixel which belong to lane lines\n left_line.color_pixel(out_img, (255, 0, 0))\n right_line.color_pixel(out_img, (255, 100, 0))\n\n # fit green triangle in area between lane lines\n pts_left = np.array([np.transpose(np.vstack([left_line.bestx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_line.bestx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(out_img, np.int_([pts]), (0, 255, 0))\n\n # return image to normal view from bird view\n out_img_undit = warped_img(out_img, Minv)\n\n # weight out_image_undit with original frame\n frame = cv2.addWeighted(out_img_undit, 0.5, frame, 1, 0)\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n cv2.imshow(\"frame\", frame)\n\n # write video\n if save:\n out.write(frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()", "def emv(inputVideoPath, outputVideoPath, maxLevel, freqLow, freqHigh, alpha, chromAttenuation, startFrameNumber, endFrameNumber, lambdaC=-1, app=\"color\", method=\"ideal\", roi=None): \n fps, frames = getVideoFrames(inputVideoPath, startFrameNumber, endFrameNumber)\n if app==\"color\":\n recreateFrames=emvCoreColor(frames, fps, maxLevel, freqLow, freqHigh, alpha, chromAttenuation, method)\n elif app==\"motion\":\n recreateFrames=emvCoreMotion(frames, fps, maxLevel, freqLow, freqHigh, alpha, lambdaC, chromAttenuation, method)\n saveFramesToVideoROI(frames, recreateFrames, outputVideoPath, roi)\n return", "def displacements_monothread(config, pointInds_toUse, pointInds_tracked, pointInds_tracked_tuple, displacements,\n pts_spaced, color_tuples, session):\n\n ## Main loop to pull out displacements in each video\n ind_concat = 0\n fps = 0\n tic_fps = time.time()\n tic_all = time.time()\n\n optic = config['Optic']\n video = config['Video']\n\n vidNums_toUse = optic['vidNums_toUse']\n showVideo_pref = optic['showVideo_pref']\n fps_counterPeriod = video['fps_counterPeriod']\n printFPS_pref = video['printFPS_pref']\n remote = config['General']['remote']\n save_vid = video['save_demo']\n\n Fs = video['Fs']\n vid_width = video['width']\n vid_height = video['height']\n test_len = video['demo_len']\n save_pathFull = str(Path(config['Paths']['viz']) / 'optic_test.avi')\n\n numVids = session['num_vids']\n path_vid_allFiles = session['videos']\n lk_names = [key for key in optic.keys() if 'lk_' in key]\n lk_params = {k.split('lk_')[1]: (tuple(optic[k]) if type(optic[k]) is list else optic[k]) \\\n for k in lk_names}\n\n # Define the codec and create VideoWriter object\n if showVideo_pref and (save_vid or remote):\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n print(f'saving to file {save_pathFull}')\n out = cv2.VideoWriter(save_pathFull, fourcc, Fs, (np.int64(vid_width), np.int64(vid_height)))\n else:\n out = None\n vid_lens = []\n for vidNum_iter in vidNums_toUse:\n vid = imageio.get_reader(path_vid_allFiles[vidNum_iter], 'ffmpeg')\n # metadata = vid.get_meta_data()\n\n path_vid = path_vid_allFiles[vidNum_iter] # get path of the current vid\n video = cv2.VideoCapture(path_vid) # open the video object with openCV\n numFrames_rough = int(video.get(\n cv2.CAP_PROP_FRAME_COUNT)) # get frame count of this vid GENERALLY INACCURATE. OFF BY AROUND -25 frames\n\n frameToSet = 0\n frame = vid.get_data(0) # Get a single frame to use as the first 'previous frame' in calculating optic flow\n new_frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n old_frame = new_frame_gray\n\n print(f'\\n Calculating displacement field: video # {vidNum_iter + 1}/{numVids}')\n # while True:\n for iter_frame, new_frame in enumerate(tqdm(vid, total=numFrames_rough)):\n new_frame_gray = cv2.cvtColor(new_frame, cv2.COLOR_BGR2GRAY) # convert to grayscale\n\n ##calculate optical flow\n pointInds_new, status, error = cv2.calcOpticalFlowPyrLK(old_frame, new_frame_gray, pointInds_toUse, None,\n **lk_params) # Calculate displacement distance between STATIC/ANCHORED points and the calculated new points. Also note the excluded 'NextPts' parameter. Could be used for fancier tracking\n\n ## Calculate displacement and place into variable 'displacements' (changes in size every iter) \n if iter_frame == 0:\n displacements[:, :, ind_concat] = np.zeros((pts_spaced.shape[0], 2))\n else:\n displacements[:, :, ind_concat] = np.single(np.squeeze((\n pointInds_new - pointInds_toUse))) # this is the important variable. Simply the difference in the estimate\n\n old_frame = new_frame_gray # make current frame the 'old_frame' for the next iteration\n\n ## below is just for visualization. Nothing calculated is maintained\n if showVideo_pref:\n pointInds_tracked = pointInds_tracked + (\n pointInds_new - pointInds_toUse) # calculate integrated position\n pointInds_tracked = pointInds_tracked - (\n pointInds_tracked - pointInds_toUse) * 0.01 # multiplied constant is the relaxation term. this is just for display purposes. Relaxation term chosen during cleanup will be real\n pointInds = [pointInds_tracked, pointInds_tracked_tuple]\n counters = [iter_frame, vidNum_iter, ind_concat, fps]\n if (remote and iter_frame < test_len) or not remote:\n videos.visualize_progress(config, session, new_frame, pointInds, color_tuples, counters, out)\n\n if (save_vid or remote) and iter_frame == test_len:\n out.release()\n\n k = cv2.waitKey(1) & 0xff\n if k == 27: break\n\n ind_concat = ind_concat + 1\n\n if ind_concat % fps_counterPeriod == 0:\n elapsed = time.time() - tic_fps\n fps = fps_counterPeriod / elapsed\n if printFPS_pref:\n print(fps)\n tic_fps = time.time()\n vid_lens.append(iter_frame+1)\n ## Calculate how long calculation took\n elapsed = time.time() - tic_all\n helpers.print_time('video time elapsed:', elapsed)\n print(f'Capture rate: {round(ind_concat / elapsed, 3)} fps')\n\n numFrames_total = ind_concat - 1\n cv2.destroyAllWindows()\n\n displacements = displacements[:, :, ~np.isnan(displacements[0, 0, :])]\n\n return displacements, numFrames_total, vid_lens", "def write_face_samples(model, output_path, invid):\n \n if not os.path.isdir(output_path) :\n os.mkdir(output_path)\n \n video = mmcv.VideoReader(invid)\n for frame_ix, frame in enumerate(video):\n frame_name = f\"{output_path}webcam_{frame_ix}_0.jpg\"\n if os.path.isfile(frame_name): continue\n \n frame_img = PIL.Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n _ = model(frame_img,frame_name)", "def makeVideo():\n os.system(\"cd video && ffmpeg -r 10 -i img%05d.jpg -vcodec mpeg4 -y caronthehill_clip.mp4\")", "def __call__(self, video_path, per_frames = 1 , offset = None):\n \n cap = cv2.VideoCapture(video_path)\n \n if not cap.isOpened():\n raise Exception(\"Video file does not exist or is invalid\")\n\n \n if offset:\n cap.set(cv2.CAP_PROP_POS_MSEC, offset)\n \n \n info = []\n\n while cap.isOpened():\n ret, frame = cap.read()\n if ret:\n if cap.get(cv2.CAP_PROP_POS_FRAMES) % per_frames == 0:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n faces_info = self.detect_faces_from_image(frame,\n desired_width=224, desired_height=224) \n if faces_info:\n for element in faces_info:\n face_img = image.img_to_array(element[1])\n\n face_img = utils.preprocess_input(face_img, version=1)\n face_img = np.expand_dims(face_img, axis=0)\n\n features = self.vgg_feature_extractor.predict(face_img)\n label = self.gender_svm.predict(features)[0]\n decision_value = round(self.gender_svm.decision_function(features)[0], 3)\n\n bounding_box = element[0][0]\n detection_score = round(element[5], 3)\n bbox_length = bounding_box.bottom() - bounding_box.top()\n\n info.append([\n cap.get(cv2.CAP_PROP_POS_FRAMES), bounding_box, (bbox_length, bbox_length), label,\n decision_value, detection_score\n ])\n\n else:\n break\n cap.release()\n info = pd.DataFrame.from_records(info, columns = ['frame', 'bb', 'size','label', 'decision', 'conf'])\n return info", "def run(self):\n\n for file_cnt, file_path in enumerate(self.files_found):\n video_timer = SimbaTimer()\n video_timer.start_timer()\n _, self.video_name, _ = get_fn_ext(file_path)\n self.video_info, self.px_per_mm, self.fps = self.read_video_info(\n video_name=self.video_name\n )\n self.width, self.height = int(\n self.video_info[\"Resolution_width\"].values[0]\n ), int(self.video_info[\"Resolution_height\"].values[0])\n if self.video_setting:\n self.fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value)\n self.video_save_path = os.path.join(\n self.heatmap_clf_location_dir, self.video_name + \".mp4\"\n )\n self.writer = cv2.VideoWriter(\n self.video_save_path,\n self.fourcc,\n self.fps,\n (self.width, self.height),\n )\n if self.frame_setting:\n self.save_video_folder = os.path.join(\n self.heatmap_clf_location_dir, self.video_name\n )\n if not os.path.exists(self.save_video_folder):\n os.makedirs(self.save_video_folder)\n self.data_df = read_df(file_path=file_path, file_type=self.file_type)\n clf_array, aspect_ratio = self.__calculate_bin_attr(\n data_df=self.data_df,\n clf_name=self.clf_name,\n bp_lst=self.bp_lst,\n px_per_mm=self.px_per_mm,\n img_width=self.width,\n img_height=self.height,\n bin_size=self.bin_size,\n fps=self.fps,\n )\n\n if self.max_scale == \"auto\":\n self.max_scale = self.__calculate_max_scale(clf_array=clf_array)\n if self.max_scale == 0:\n self.max_scale = 1\n\n if self.final_img_setting:\n self.make_clf_heatmap_plot(\n frm_data=clf_array[-1, :, :],\n max_scale=self.max_scale,\n palette=self.palette,\n aspect_ratio=aspect_ratio,\n file_name=os.path.join(\n self.heatmap_clf_location_dir,\n self.video_name + \"_final_frm.png\",\n ),\n shading=self.shading,\n clf_name=self.clf_name,\n img_size=(self.width, self.height),\n final_img=True,\n )\n\n if self.video_setting or self.frame_setting:\n for frm_cnt, cumulative_frm_idx in enumerate(range(clf_array.shape[0])):\n frm_data = clf_array[cumulative_frm_idx, :, :]\n cum_df = pd.DataFrame(frm_data).reset_index()\n cum_df = cum_df.melt(\n id_vars=\"index\",\n value_vars=None,\n var_name=None,\n value_name=\"seconds\",\n col_level=None,\n ).rename(\n columns={\"index\": \"vertical_idx\", \"variable\": \"horizontal_idx\"}\n )\n cum_df[\"color\"] = (\n (cum_df[\"seconds\"].astype(float) / float(self.max_scale))\n .round(2)\n .clip(upper=100)\n )\n color_array = np.zeros(\n (\n len(cum_df[\"vertical_idx\"].unique()),\n len(cum_df[\"horizontal_idx\"].unique()),\n )\n )\n for i in range(color_array.shape[0]):\n for j in range(color_array.shape[1]):\n value = cum_df[\"color\"][\n (cum_df[\"horizontal_idx\"] == j)\n & (cum_df[\"vertical_idx\"] == i)\n ].values[0]\n color_array[i, j] = value\n\n fig = plt.figure()\n im_ratio = color_array.shape[0] / color_array.shape[1]\n plt.pcolormesh(\n color_array,\n shading=self.shading,\n cmap=self.palette,\n rasterized=True,\n alpha=1,\n vmin=0.0,\n vmax=float(self.max_scale),\n )\n plt.gca().invert_yaxis()\n plt.xticks([])\n plt.yticks([])\n plt.axis(\"off\")\n plt.tick_params(axis=\"both\", which=\"both\", length=0)\n cb = plt.colorbar(pad=0.0, fraction=0.023 * im_ratio)\n cb.ax.tick_params(size=0)\n cb.outline.set_visible(False)\n cb.set_label(\n \"{} (seconds)\".format(self.clf_name), rotation=270, labelpad=10\n )\n plt.tight_layout()\n plt.gca().set_aspect(aspect_ratio)\n canvas = FigureCanvas(fig)\n canvas.draw()\n mat = np.array(canvas.renderer._renderer)\n image = cv2.cvtColor(mat, cv2.COLOR_RGB2BGR)\n image = cv2.resize(image, (self.width, self.height))\n image = np.uint8(image)\n plt.close()\n\n if self.video_setting:\n self.writer.write(image)\n if self.frame_setting:\n frame_save_path = os.path.join(\n self.save_video_folder, str(frm_cnt) + \".png\"\n )\n cv2.imwrite(frame_save_path, image)\n print(\n \"Created heatmap frame: {} / {}. Video: {} ({}/{})\".format(\n str(frm_cnt + 1),\n str(len(self.data_df)),\n self.video_name,\n str(file_cnt + 1),\n len(self.files_found),\n )\n )\n\n if self.video_setting:\n self.writer.release()\n\n video_timer.stop_timer()\n print(\n \"Heatmap plot for video {} saved (elapsed time: {}s) ... \".format(\n self.video_name, video_timer.elapsed_time_str\n )\n )\n\n self.timer.stop_timer()\n stdout_success(\n msg=\"All heatmap visualizations created in project_folder/frames/output/heatmaps_classifier_locations directory\",\n elapsed_time=\"self.timer.elapsed_time_str\",\n )", "def _spawn_ffmpeg(self) -> None:\r\n if self.ffmpeg_proc is not None:\r\n raise RuntimeError('_spawn_ffmpeg called when ffmpeg_proc is '\r\n + f'{self.ffmpeg_proc} (not None)')\r\n\r\n args = ['ffmpeg', '-f', 'rawvideo', '-vcodec', 'rawvideo',\r\n '-s', f'{self.frame_size[0]}x{self.frame_size[1]}',\r\n '-pix_fmt', 'rgba', '-r', str(self.fps),\r\n '-loglevel', 'quiet',\r\n '-i', 'pipe:0',\r\n '-vcodec', 'h264', '-pix_fmt', 'yuv420p',\r\n '-movflags', '+faststart']\r\n\r\n if self.bitrate > 0:\r\n args.extend(['-b', f'{self.bitrate}k'])\r\n args.extend(['-y', self.outfile])\r\n\r\n create_flags = sp.CREATE_NO_WINDOW if 'nt' in os.name else 0\r\n self.ffmpeg_proc = sp.Popen(args, shell=False, stdout=None, stderr=None,\r\n stdin=sp.PIPE, creationflags=create_flags)", "def write_video(project_video_output, output_folder, fps=20):\n print(\"Creating video {}, FPS={}\".format(project_video_output, fps))\n clip = ImageSequenceClip(output_folder, fps)\n clip.write_videofile(project_video_output)", "def process_video(path, method):\n # TODO: Obtener nombre de video a partir del path\n video_name = ''\n # TODO: Crear carpeta si es que no existe (Deteccion/{method}/{video_name})\n folder_path = f\"Deteccion/{method}/{video_name}\"\n try:\n cap = cv2.VideoCapture(path)\n # Check if camera opened successfully\n if (cap.isOpened() is False):\n print(\"Error opening video stream or file\")\n\n frame_counter = 0\n # Read until video is completed\n while(cap.isOpened()):\n # Capture frame-by-frame\n ret, frame = cap.read()\n if ret is True:\n # TODO: Crear carpeta del frame si es que no existe (Deteccion/{method}/{video_name}/{frame_name})\n frame_name = f\"frame_{frame_counter}\"\n\n faces = process_frame(frame, method)\n # TODO: Guardar bounding boxes\n np.save(f\"{folder_path}/{frame_name}/bounding_boxes.npy\", faces)\n\n # TODO: Guardar imagenes recortadas\n for bb in faces:\n pass\n frame_counter += 1\n\n # Break the loop\n else:\n break\n\n finally:\n # When everything done, release the video capture object\n cap.release()", "def mapBackToSurface(array,filename):\n #### Map back to surface\n if array.shape[0]==360:\n out_array = np.zeros((glasser2.shape[0],3))\n\n roicount = 0\n for roi in range(360):\n for col in range(array.shape[1]):\n vertex_ind = np.where(glasser2==roi+1)[0]\n out_array[vertex_ind,col] = array[roicount,col]\n\n roicount += 1\n\n else:\n out_array = array\n\n #### \n # Write file to csv and run wb_command\n np.savetxt(filename + '.csv', out_array,fmt='%s')\n wb_file = filename + '.dscalar.nii'\n wb_command = 'wb_command -cifti-convert -from-text ' + filename + '.csv ' + glasserfile2 + ' ' + wb_file + ' -reset-scalars'\n os.system(wb_command)\n os.remove(filename + '.csv')", "def tagVideo(modelpath, videopath, outputPath=None): \n model = get_model_instance_segmentation(3)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # model.load_state_dict(torch.load(modelpath, map_location=device), strict=False)\n model.load_state_dict(torch.load(modelpath, map_location=device))\n model = model.to(device)\n model.eval()\n\n \n data_transform = transforms.Compose([\n ToPILImage(),\n transforms.ToTensor(), \n ])\n\n\n if outputPath:\n writer = FFmpegWriter(str(outputPath))\n \n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.namedWindow('main', cv2.WINDOW_NORMAL)\n labels = ['No mask', 'Mask']\n labelColor = [(10, 0, 255), (10, 255, 0)]\n img_count = 0\n outputDir = os.path.dirname(os.path.realpath(outputPath))\n frame_count = 0\n boundingBoxes = []\n for frame in vreader(str(videopath)):\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n print('Frame:', frame_count)\n\n if frame_count%30==0:\n frameTensor = data_transform(frame)\n frameTensor = torch.unsqueeze(frameTensor, 0).to(device)\n output = model(frameTensor)\n boundingBoxes = plot_image_new(frame, frameTensor[0], output[0]) \n \n if len(boundingBoxes)>0:\n for bb in boundingBoxes:\n cv2.rectangle(frame,\n (bb[0], bb[1]),\n (bb[2], bb[3]),\n (54, 66, 227),\n thickness=2)\n\n cv2.imshow('main', frame)\n if outputPath:\n writer.writeFrame(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n frame_count += 1\n if outputPath:\n writer.close()\n cv2.destroyAllWindows()", "def generateVideo(textFilePath,extractedPath,createdVideoPath):\r\n\t\t#Check if the save directory exists, If not create directory\r\n\t\tif not os.path.exists(createdVideoPath):\r\n\t\t\tos.mkdir(createdVideoPath)\r\n\t\t#Open the text file\r\n\t\tfile = open(textFilePath)\r\n\t\tvideoPath = createdVideoPath\r\n\r\n\t\tfor hashedData in file:\r\n\t\t\thashedData = hashedData.split(\"\\n\")[0]\r\n\t\t\timage_folder = extractedPath + \"/\" + \"data\" + \"/\" + hashedData\r\n\t\t\tvideo_name = hashedData + \".avi\"\r\n\t\t\timages = os.listdir(image_folder)\r\n\t\t\tframe = cv2.imread(os.path.join(image_folder, images[0]))\r\n\t\t\theight, width, layers = frame.shape\r\n\t\t\t#declare the video writter\r\n\t\t\tvideo = cv2.VideoWriter(videoPath + \"/\" +video_name, 0, 1, (width,height))\r\n\t\t\t#Write all images to a single video\r\n\t\t\tfor image in images:\r\n\t\t\t\tvideo.write(cv2.imread(os.path.join(image_folder, image)))\r\n\r\n\t\t\tcv2.destroyAllWindows()\r\n\t\t\tvideo.release()", "def create_video():\n print(\"Generating output video\")\n frame_array = []\n files = [f for f in os.listdir(MODIFIED_FRAMES_DIR) if isfile(join(MODIFIED_FRAMES_DIR, f))]\n #for sorting the file names properly\n # files.sort(key = lambda x: x[3:-4])\n files = sorted(files,key=lambda x: int(os.path.splitext(x)[0]))\n for i in range(len(files)):\n filename= MODIFIED_FRAMES_DIR + files[i]\n # print(filename)\n #reading each files\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width,height)\n \n #inserting the frames into an image array\n frame_array.append(img)\n \n out = cv2.VideoWriter(OUTPUT_FILE,cv2.VideoWriter_fourcc(*'DIVX'), FRAME_RATE, size)\n for i in range(len(frame_array)):\n # writing to a image array\n out.write(frame_array[i])\n out.release()\n print(\"Output video generated successfully...\")\n\n # img_array = []\n # for filename in glob.glob(MODIFIED_FRAMES_DIR+'/*.jpg'):\n # img = cv2.imread(filename)\n # height, width, layers = img.shape\n # size = (width,height)\n # img_array.append(img)\n\n # height, width, layers = img_array[0].shape\n # size = (width,height)\n # out = cv2.VideoWriter('output.mov',cv2.VideoWriter_fourcc(*'DIVX'), 15, size) \n # for i in range(len(img_array)):\n # out.write(img_array[i])\n # out.release()" ]
[ "0.6444913", "0.6132553", "0.6021357", "0.6021357", "0.5793323", "0.575927", "0.56994724", "0.5620519", "0.5565024", "0.5488642", "0.54845893", "0.5479945", "0.5418265", "0.5404724", "0.53233945", "0.5318268", "0.5299321", "0.5261676", "0.52610755", "0.51977015", "0.51628315", "0.5160664", "0.5157827", "0.5144048", "0.5141774", "0.51226306", "0.51177037", "0.5109612", "0.5085291", "0.5074146" ]
0.8082349
0
Find the onsets in the array representing the synchronization light. This function assumes the onsets are periodic (with randomness within 0.5T and 1.5T). The function can also fix missing onsets.
def find_sync_light_onsets(sync_light, invert=True, fixmissing=False): # -- Find changes in synch light -- sync_light_diff = np.diff(sync_light, prepend=0) if invert: sync_light_diff = -sync_light_diff sync_light_diff[sync_light_diff < 0] = 0 sync_light_threshold = 0.2*sync_light_diff.max() sync_light_onset = sync_light_diff > sync_light_threshold # -- Find period of sync_light_onset -- sync_light_onset_ind = np.where(sync_light_onset)[0] sync_light_onset_diff = np.diff(sync_light_onset_ind) # In units of frames expected_onset_period = np.median(sync_light_onset_diff) # In units of (float) frames # -- Remove repeated onsets -- onset_freq_upper_threshold = int(1.5 * expected_onset_period) onset_freq_lower_threshold = int(0.5 * expected_onset_period) repeated_onsets = sync_light_onset_diff < onset_freq_lower_threshold repeated_onsets_ind = np.where(repeated_onsets)[0] fixed_sync_light_onset = sync_light_onset.copy() fixed_sync_light_onset[sync_light_onset_ind[repeated_onsets_ind+1]] = False # -- Fix missing onsets -- if fixmissing: missing_next_onsets = sync_light_onset_diff > onset_freq_upper_threshold missing_next_onsets_ind = np.where(missing_next_onsets)[0] for indm, missing_onset_ind in enumerate(missing_next_onsets_ind): onset_diff = sync_light_onset_diff[missing_onset_ind] n_missing = int(np.round(onset_diff / expected_onset_period))-1 #print(n_missing) last_onset_ind = sync_light_onset_ind[missing_onset_ind] next_onset_ind = sync_light_onset_ind[missing_onset_ind+1] period_missing = (next_onset_ind - last_onset_ind)//(n_missing+1) new_onset_inds = last_onset_ind + np.arange(1, n_missing+1)*period_missing #print([last_onset_ind, next_onset_ind]) #print(new_onset_inds) fixed_sync_light_onset[new_onset_inds] = True return fixed_sync_light_onset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_onsets(self):\n get_onsets = ess.OnsetRate()\n # onset_times is np array\n self.onset_times, onset_rate = get_onsets(self.audio)\n # Onset as sample number in the audio signal\n index2delete = []\n previous_time = -9999999\n for index, itime in enumerate(self.onset_times):\n if (itime - previous_time) < 2*self.stroke_length:\n index2delete.append(index)\n else:\n previous_time = itime\n self.onset_times = np.delete(self.onset_times, index2delete)\n self.onset_samples = [int(self.sampling_rate*i) for i in self.onset_times]", "def DetectPulseOnset(self, asig, fs, wMS):\n # the percentage of the maximal value of the slope sum function\n # to detect the onset\n AmplitudeRatio = .01\n\n # low pass filter\n sig = self.zpIIR(asig, 3, .1, 20, 5 * 2/fs)\n wSmp = int(np.round(wMS*fs/1000))\n\n BlankWindowRatio = .9\n\n # delta x\n diffsig = np.diff(sig)\n\n z = np.empty((sig.size - 1 - wSmp, 1))\n z[:] = np.NaN\n\n # calculate slope sum function\n for i in range(wSmp,sig.size-1):\n subsig = diffsig[i-wSmp:i]\n z[i-wSmp] = np.sum(subsig[subsig>0])\n\n z0 = np.mean(z)\n onset = [0]\n tPnt = []\n zThres = 0\n blankWin = int(np.round(400*fs/1000))\n subIdx = np.r_[onset[0]: onset[0] + 4*blankWin + 1]\n MedianArrayWinSize = 5\n\n # this value controls the final acceptance\n PrcofMaxAMP = .2\n SSFAmpArray = np.ones((MedianArrayWinSize,1))*(np.max(z) - np.min(z)) * PrcofMaxAMP\n # the percentage of maximal amplitude for threshold crossing\n DetectionThreshold = .2\n SSFCrossThresholdArray = np.ones((MedianArrayWinSize,1))*z0*DetectionThreshold\n idx = 1\n\n # Keep loop going while onsets detected\n while(1):\n\n # look for the first location where z > z0\n try:\n\n # Look in z[subIdx] (and make sure it doesn't go past z's size)\n # find first index where z > the mean of z\n tempIndex = np.trim_zeros(subIdx*(z.size>subIdx), 'b')\n ix = np.amin(np.where(z[tempIndex] > z0)[0])\n except:\n break\n\n ix = tempIndex[ix]\n tPnt.append(ix)\n srcWin = np.r_[np.maximum(0,ix - wSmp): ix + wSmp]\n #if the window has passed the length of the data, then exit\n if srcWin[-1] >= len(z):\n break\n\n # This section of code is to remove the initial zero-region in the SSF function before looking for onset (if such region exists)\n zPnt = np.where(z[srcWin] == 0)\n\n if zPnt[0].size != 0:\n zPnt = srcWin[zPnt[0]]\n\n if np.any(zPnt < ix):\n srcWin = np.r_[zPnt[np.max(np.where(zPnt < ix))]: ix + wSmp]\n\n # accept the window\n if ( np.max(z[srcWin]) - np.min(z[srcWin]) > zThres):\n\n # calculate the threshold for next cycle\n SSFAmp = (np.max(z[srcWin]) - np.min(z[srcWin])) * PrcofMaxAMP\n SSFAmpArray[np.remainder(idx, MedianArrayWinSize)] = SSFAmp\n zThres = np.median(SSFAmpArray)\n SSFCrossThresholdArray[np.remainder(idx, MedianArrayWinSize)] = np.mean(z[srcWin])*DetectionThreshold\n z0 = np.median(SSFCrossThresholdArray)\n minSSF = np.min(z[srcWin]) + SSFAmp *AmplitudeRatio\n a = srcWin[0] + np.min(np.where(z[srcWin] >= minSSF))\n onset.append(a)\n\n # adaptively determine analysis window for next cycle\n bw = blankWin\n subIdx = np.round(np.r_[a + bw: a + 3*bw])\n idx = idx + 1\n\n else:\n # no beat detected\n subIdx = np.round(subIdx + blankWin)\n\n return onset", "def _get_run_onsets(\n runs, length_fr, pad_fr, running_threshold_cms, offset_fr):\n out = []\n for run in runs:\n t2p = run.trace2p()\n tr = t2p.trace('deconvolved')\n\n # Add all onsets of \"other\" frames\n others = t2p.nocs(length_fr, pad_fr,\n running_threshold_cms)\n for ot in others:\n start = ot + offset_fr\n out.append(tr[:, start:start + length_fr])\n\n return out", "def getOnsetTick(s):\n ticksPerQuarter = getResolution(s)\n onsets = [int(n.offset * ticksPerQuarter) for n in s.flat.notes]\n return onsets", "def detect_correction_onset(baseline_llr, \n motiontrajectory, times,\n ptstart, ptend, \n disturbancemode, \n disturbancevalue, \n disturbanceonsettime=None, # if none, reconstruct onset form the threshold ratio\n disturbance_threshold=9.0,\n plotfilename=None):\n\n # Error considering the disturbance\n screentrajectory, err_screen, disturbancetime, disturbanceindex = compute_trial_error(\n motiontrajectory, times,\n ptstart, ptend, \n disturbancemode, \n disturbancevalue, \n disturbanceonsettime,\n disturbance_threshold)\n \n # Error as if there is no disturbance (ballistic)\n err_ballistic = raydistance_error(motiontrajectory, ptstart, ptend)\n is_back = 1 - 1 * is_target_forward(motiontrajectory, ptstart, ptend)\n err_ballistic += is_back # penalty for wrong direction\n \n # Normalized motion phase\n phase = normalized_motion_phase(screentrajectory, ptstart, ptend)\n\n # Correction onset is the point \n # when no-disturbance error exceeds A*sigma and\n # the real error is decreasing\n ionsets = []\n for i in range(disturbanceindex, len(err_screen)-3):\n err_base_mean, err_base_covar = baseline_llr.regress(phase[i])\n err_base_var = np.sqrt(err_base_covar)\n if (np.abs(err_ballistic[i]) > 1 * err_base_var) and \\\n (np.abs(err_screen[i]) > np.abs(err_screen[i+1])) and \\\n (np.abs(err_screen[i+1]) > np.abs(err_screen[i+2])) and \\\n (np.abs(err_screen[i+2]) > np.abs(err_screen[i+3])):\n #(np.abs(err_ballistic[i]) > np.abs(err_screen[i])):\n ionsets.append(i)\n\n # Plot the detected onsets\n make_plots = True\n if make_plots:\n plt.plot(screentrajectory[:, 0], screentrajectory[:, 1])\n plt.plot(motiontrajectory[:, 0], motiontrajectory[:, 1])\n sp = ptstart\n plt.plot(sp[0], sp[1], marker='o', markersize=3,)\n gp = ptend\n plt.plot(gp[0], gp[1], marker='o', markersize=3,)\n \n plt.scatter(screentrajectory[:-1, 0], screentrajectory[:-1, 1], marker='o', s=15, \n linewidths=4, c=err_screen, alpha=0.5, cmap=plt.cm.coolwarm)\n\n onsets = screentrajectory[ionsets]\n plt.scatter(onsets[:, 0], onsets[:, 1], marker='*', s=10, linewidths=4,)\n plt.axis(\"equal\")\n if plotfilename is not None:\n plt.savefig(plotfilename)\n plt.close()\n else:\n plt.show()\n \n\n return onsets", "def badMuons(self, allmuons, allvertices):\n\n muons = list(m for m in allmuons) # make it a python list\n goodMuon = []\n\n if len(allvertices) < 1: raise RuntimeError\n PV = allvertices[0].position()\n \n out = [] \n for mu in muons:\n if (not(mu.isPFMuon()) or mu.innerTrack().isNull()):\n goodMuon.append(-1); # bad but we don't care\n continue;\n if (self.preselection(mu)):\n dxypv = abs(mu.innerTrack().dxy(PV));\n dzpv = abs(mu.innerTrack().dz(PV));\n if (self.tighterId(mu)):\n ipLoose = ((dxypv < 0.5 and dzpv < 2.0) or mu.innerTrack().hitPattern().pixelLayersWithMeasurement() >= 2);\n goodMuon.append(ipLoose or (not(self.selectClones_) and self.tightGlobal(mu)));\n elif (self.safeId(mu)):\n ipTight = (dxypv < 0.2 and dzpv < 0.5);\n goodMuon.append(ipTight);\n else:\n goodMuon.append(0);\n else:\n goodMuon.append(3); # maybe good, maybe bad, but we don't care\n\n n = len(muons)\n for i in range(n):\n if (muons[i].pt() < self.ptCut_ or goodMuon[i] != 0): continue;\n bad = True;\n if (self.selectClones_):\n bad = False; # unless proven otherwise\n n1 = muons[i].numberOfMatches(ROOT.reco.Muon.SegmentArbitration);\n for j in range(n):\n if (j == i or goodMuon[j] <= 0 or not(self.partnerId(muons[j]))): continue\n n2 = muons[j].numberOfMatches(ROOT.reco.Muon.SegmentArbitration);\n if (deltaR(muons[i],muons[j]) < 0.4 or (n1 > 0 and n2 > 0 and ROOT.muon.sharedSegments(muons[i],muons[j]) >= 0.5*min(n1,n2))):\n bad = True;\n break;\n if (bad):\n out.append(muons[i]);\n return out", "def build_set(ls, dsets):\n\n def noh(ls, dsets):\n \"\"\"\n This function remove hydrogens from the selection\n \"\"\"\n data_set = build_set(ls[1], dsets)\n\n noh_set = set()\n pred = oechem.OEIsHydrogen()\n\n for idx in data_set:\n atom = system.GetAtom(oechem.OEHasAtomIdx(idx))\n if not pred(atom):\n noh_set.add(idx)\n\n return noh_set\n\n def residues(ls):\n \"\"\"\n This function select residues based on the residue numbers. An example of\n selection can be:\n mask = 'resid A:16 17 19 B:1'\n \"\"\"\n # List residue atom index to be restrained\n res_atom_set = set()\n\n # Dictionary of lists with the chain residues selected to be restrained\n # e.g. {chainA:[res1, res15], chainB:[res19, res17]}\n chain_dic = {'': []}\n\n # Fill out the chain dictionary\n i = 0\n while i < len(ls):\n if ls[i].isdigit():\n chain_dic[''].append(int(ls[i]))\n i += 1\n else:\n try:\n chain_dic[ls[i]].append(int(ls[i + 2]))\n except:\n chain_dic[ls[i]] = []\n chain_dic[ls[i]].append(int(ls[i + 2]))\n i += 3\n\n # Loop over the molecular system to select the atom indexes to be selected\n hv = oechem.OEHierView(system, oechem.OEAssumption_BondedResidue + oechem.OEAssumption_ResPerceived)\n for chain in hv.GetChains():\n chain_id = chain.GetChainID()\n if chain_id not in chain_dic:\n continue\n for frag in chain.GetFragments():\n for hres in frag.GetResidues():\n res_num = hres.GetOEResidue().GetResidueNumber()\n if res_num not in chain_dic[chain_id]:\n continue\n for oe_at in hres.GetAtoms():\n res_atom_set.add(oe_at.GetIdx())\n\n return res_atom_set\n\n def around(dist, ls):\n \"\"\"\n This function select atom not far than the threshold distance from\n the current selection. The threshold distance is in Angstrom\n\n selection can be:\n mask = '5.0 around ligand'\n \"\"\"\n # at = system.GetAtom(oechem.OEHasAtomIdx(idx))\n\n # Atom set selection\n atom_set_around = set()\n\n # Create a OE bit vector mask for each atoms\n bv_around = oechem.OEBitVector(system.GetMaxAtomIdx())\n\n # Set the mask atom\n for at in system.GetAtoms():\n if at.GetIdx() in ls:\n bv_around.SetBitOn(at.GetIdx())\n\n # Predicate\n pred = oechem.OEAtomIdxSelected(bv_around)\n\n # Create the system molecule based on the atom mask\n molecules = oechem.OEMol()\n oechem.OESubsetMol(molecules, system, pred)\n\n # Create the Nearest neighbours\n nn = oechem.OENearestNbrs(system, float(dist))\n\n for nbrs in nn.GetNbrs(molecules):\n for atom in oechem.OEGetResidueAtoms(nbrs.GetBgn()):\n if atom.GetIdx() in ls:\n continue\n atom_set_around.add(atom.GetIdx())\n\n return atom_set_around\n\n # Start Body of the selection function by language\n\n # Terminal Literal return the related set\n if isinstance(ls, str):\n return dsets[ls]\n # Not or Noh\n if len(ls) == 2:\n if ls[0] == 'noh': # Noh case\n return noh(ls, dsets)\n elif ls[0] == 'not': # Not case\n return dsets['system'] - build_set(ls[1], dsets)\n else: # Resid case with one index\n return residues(ls[1])\n\n if len(ls) == 3:\n if ls[1] == 'or': # Or Case (set union)\n return build_set(ls[0], dsets) | build_set(ls[2], dsets)\n elif ls[1] == 'and': # And Case (set intersection)\n return build_set(ls[0], dsets) & build_set(ls[2], dsets)\n elif ls[1] == 'diff': # Diff case (set difference)\n return build_set(ls[0], dsets) - build_set(ls[2], dsets)\n elif ls[1] == 'around': # Around case\n return around(ls[0], build_set(ls[2], dsets))\n else:\n return residues(ls[1:]) # Resid case with one or two indexes\n else:\n if ls[0] == 'resid':\n return residues(ls[1:]) # Resid case with multiple indexes\n else:\n raise ValueError(\"The passed list have too many tokens: {}\".format(ls))", "def get_selected_muons(muons, trigobj, mask_events, mu_pt_cut_leading, mu_pt_cut_subleading, mu_aeta_cut, mu_iso_cut): \n passes_iso = muons.pfRelIso04_all < mu_iso_cut\n passes_id = muons.mediumId == 1\n passes_subleading_pt = muons.pt > mu_pt_cut_subleading\n passes_leading_pt = muons.pt > mu_pt_cut_leading\n passes_aeta = NUMPY_LIB.abs(muons.eta) < mu_aeta_cut\n \n trigobj.masks[\"mu\"] = (trigobj.id == 13)\n \n muons_matched_to_trigobj = NUMPY_LIB.invert(mask_deltar_first(muons, muons.masks[\"all\"], trigobj, trigobj.masks[\"mu\"], 0.1))\n \n #select muons that pass these cuts\n muons_passing_id = passes_iso & passes_id & passes_subleading_pt & muons_matched_to_trigobj\n \n #select events that have muons passing cuts \n events_passes_muid = sum_in_offsets(muons, muons_passing_id, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 2\n events_passes_leading_pt = sum_in_offsets(muons, muons_passing_id & passes_leading_pt, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 1\n events_passes_subleading_pt = sum_in_offsets(muons, muons_passing_id & passes_subleading_pt, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 2\n\n base_event_sel = mask_events & events_passes_muid & events_passes_leading_pt & events_passes_subleading_pt\n \n muons_passing_os = select_muons_opposite_sign(muons, muons_passing_id & passes_subleading_pt)\n events_passes_os = sum_in_offsets(muons, muons_passing_os, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) == 2\n \n final_event_sel = base_event_sel & events_passes_os\n final_muon_sel = muons_passing_id & passes_subleading_pt & muons_passing_os\n \n return {\n \"selected_events\": final_event_sel,\n \"selected_muons\": final_muon_sel,\n }", "def clean_detections(npts, on_off):\n on = on_off[:,0]\n off = on_off[:,1]\n idx_on = [on[0]]\n idx_off = [off[0]]\n lowest_idx = on[0]\n\n for ion, ioff in zip(on, off):\n if ion > lowest_idx + npts:\n idx_on.append(ion)\n idx_off.append(ioff)\n lowest_idx = ion\n\n return np.asarray((idx_on, idx_off)).T", "def onset(sig, stw, ltw, centred=False):\n\n stw = int(round(stw))\n ltw = int(round(ltw))\n\n n_channels, n_samples = sig.shape\n onset = np.copy(sig)\n onset_raw = np.copy(sig)\n for i in range(n_channels):\n if np.sum(sig[i, :]) == 0.0:\n onset[i, :] = 0.0\n onset_raw[i, :] = onset[i, :]\n else:\n if centred is True:\n onset[i, :] = sta_lta_centred(sig[i, :], stw, ltw)\n else:\n onset[i, :] = classic_sta_lta(sig[i, :], stw, ltw)\n onset_raw[i, :] = onset[i, :]\n np.clip(1 + onset[i, :], 0.8, np.inf, onset[i, :])\n np.log(onset[i, :], onset[i, :])\n\n return onset_raw, onset", "def merge_sets(sets):\n idxs_skipped = []\n n = len(sets)\n for i in range(n-1):\n if i not in idxs_skipped:\n set_i = sets[i]\n for j in range(i+1,n):\n set_j = sets[j]\n if set_i.intersection( set_j ) > set([]):\n sets[i].update( set_j )\n idxs_skipped.append( j )\n sets_u = [ sets[k] for k in np.setdiff1d(range(n), idxs_skipped).astype(np.int) ]\n return sets_u", "def onsets_rt(self) -> Optional[annotations.BeatData]:\n return load_onsets(self.onsets_rt_path)", "def find_flats(aperture, side='blue'):\r\n \r\n # find dome flat images\r\n domeflats = iraf.hselect('%s????.fits' % side, '$I', 'TURRET == \"APERTURE\" & APERTURE == \"%s\" & LAMPS == \"0000000\" & AIRMASS < 1.01 & IMGTYPE == \"flat\"' % aperture, Stdout=1)\r\n # find internal flat (incandescent lamp) images\r\n intflats = iraf.hselect('%s????.fits' % side, '$I', 'TURRET == \"LAMPS\" & APERTURE == \"%s\" & LAMPS == \"0000001\" & AIRMASS < 1.01' % aperture, Stdout=1)\r\n # dome flats are prefered over internal flats\r\n flats = []\r\n if (len(intflats) > 0) & (len(domeflats) == 0):\r\n flats = intflats\r\n print \"Using %d internal flats for the %s arcsec slit.\" % (len(intflats), aperture)\r\n if len(domeflats) > 3:\r\n flats = domeflats\r\n print \"Using %d dome flats for the %s arcsec slit.\" % (len(domeflats), aperture)\r\n\r\n return flats", "def getmountoffsets():\n r = _getoffsets(isMountoffset=True)\n return r", "def add_sets(list_of_sets):\n global true_introns\n for item in list_of_sets:\n true_introns.update(item)", "def lights_on(self) -> list:\n return [\n entity for entity in self.all_lights if self.hass.get_state(entity) == \"on\"\n ]", "def pointsets_mod_automorphism(self, pointsets):\n points = set()\n for ps in pointsets:\n points.update(ps)\n points = tuple(points)\n Aut = self.lattice_automorphism_group(points,\n point_labels=tuple(range(len(points))))\n indexsets = set([ frozenset([points.index(p) for p in ps]) for ps in pointsets ])\n orbits = []\n while len(indexsets)>0:\n idx = indexsets.pop()\n orbits.append(frozenset([points[i] for i in idx]))\n for g in Aut:\n g_idx = frozenset([g(i) for i in idx])\n indexsets.difference_update([g_idx])\n return tuple(orbits)", "def test_find_sets(self):\n cards = numpy.array([[1,1,1,2,0],\n [0,1,2,2,2],\n [0,1,2,2,2],\n [0,1,2,2,2]])\n\n set_indices = set_solver.find_sets(cards)\n self.assertEqual(len(set_indices), 2)\n self.assertTrue((0, 1, 2) in set_indices)\n self.assertTrue((2, 3, 4) in set_indices)", "def find_all_ngon_sols():\n ngon = [None for _ in range(N)] \n ngon_set = set()\n numbers = set(range(1, (2 * N) + 1))\n\n for triplet in permutations(numbers, 3):\n ngon[0] = tuple(triplet)\n total = sum(triplet)\n next_ngon_set = set()\n fill_ngon(ngon, numbers - set(triplet), 1, next_ngon_set, total)\n ngon_set |= next_ngon_set\n\n return ngon_set", "def powerSetNaive(array):\n\n res = [[d] for d in array]\n res.append([])\n array_ = []\n skip = 1\n while skip <=len(array)-1:\n\n for x in range(0,len(array),skip):\n array_.append(array[x])\n for y in range(len(array_[0:x+skip+1])):\n toAppend = array_[y:x+1]\n if toAppend not in res:\n res.append(toAppend)\n toAppend = array_[0:x]\n if toAppend not in res:\n res.append(toAppend)\n array_=[]\n skip = skip + 1\n\n return res", "def simple(onArray, offArray):\n \n Larray = len(onArray)\n Larray2 = len(offArray)\n \n assert Larray == Larray2, \"both arrays should have the same size\"\n \n #onFiltered = numpy.array(onArray)[:,OnOff.misc.constants.dataRange]\n #offFiltered = numpy.array(offArray)[:,OnOff.misc.constants.dataRange]\n \n #return onFiltered,offFiltered,OnOff.misc.constants.dataRange\n drange = OnOffCalc.misc.getDatarange(onArray.shape[1])\n dataMask = numpy.ones(onArray.shape)\n #dataMask[:,OnOffCalc.misc.constants.dataRange] = 0\n dataMask[:,drange] = 0\n \n return dataMask", "def getIMA(s, onsets):\n s = s.stripTies()\n\n # with subprocess.Popen([\"onsets2ima\",\"-onsets\"] + [str(o) for o in onsets], stdout=subprocess.PIPE) as proc:\n # output = proc.stdout.read().decode('ascii')\n\n proc = subprocess.Popen([\"onsets2ima\",\"-onsets\"] + [str(o) for o in onsets], stdout=subprocess.PIPE)\n try:\n outs, errs = proc.communicate(timeout=5)\n except TimeoutExpired:\n proc.kill()\n raise IMATimeoutError\n output = outs.decode('ascii')\n\n ima_str = output.split('\\n')[0].strip()\n ima_spect_str = output.split('\\n')[1].strip()\n\n ima = [float(w) for w in ima_str.split(' ')]\n ima_spect = [float(w) for w in ima_spect_str.split(' ')]\n\n #if onset of first note != 0 (start with rest), add zeros to ima_spect\n ima_spect = [0.0]*onsets[0] + ima_spect\n\n ima_spect = [ima_spect[o] for o in onsets]\n\n return ima, ima_spect", "def mask_the_images(working_path,set_name):\n\n file_list=glob('/media/talhassid/My Passport/haimTal/test_images_0b8afe447b5f1a2c405f41cf2fb1198e.npy')\n out_images = [] #final set of images for all patients\n for fname in file_list:\n out_images_per_patient = []\n print (\"working on file \", fname)\n imgs_to_process = np.load(fname.replace(\"lungmask\",\"images\")) # images of one patient\n masks = np.load(fname)\n for i in range(len(imgs_to_process)):\n mask = masks[i]\n img = imgs_to_process[i]\n new_size = [512,512] # we're scaling back up to the original size of the image\n img= mask*img # apply lung mask\n #\n # renormalizing the masked image (in the mask region)\n #\n new_mean = np.mean(img[mask>0])\n new_std = np.std(img[mask>0])\n #\n # Pulling the background color up to the lower end\n # of the pixel range for the lungs\n #\n old_min = np.min(img) # background color\n img[img==old_min] = new_mean-1.2*new_std # resetting backgound color\n img = img-new_mean\n img = img/new_std\n #make image bounding box (min row, min col, max row, max col)\n labels = measure.label(mask)\n regions = measure.regionprops(labels)\n #\n # Finding the global min and max row over all regions\n #\n min_row = 512\n max_row = 0\n min_col = 512\n max_col = 0\n for prop in regions:\n B = prop.bbox\n if min_row > B[0]:\n min_row = B[0]\n if min_col > B[1]:\n min_col = B[1]\n if max_row < B[2]:\n max_row = B[2]\n if max_col < B[3]:\n max_col = B[3]\n width = max_col-min_col\n height = max_row - min_row\n if width > height:\n max_row=min_row+width\n else:\n max_col = min_col+height\n #\n # cropping the image down to the bounding box for all regions\n # (there's probably an skimage command that can do this in one line)\n #\n img = img[min_row:max_row,min_col:max_col]\n mask = mask[min_row:max_row,min_col:max_col]\n if max_row-min_row <5 or max_col-min_col<5: # skipping all images with no god regions\n pass\n else:\n # moving range to -1 to 1 to accomodate the resize function\n mean = np.mean(img)\n img = img - mean\n min = np.min(img)\n max = np.max(img)\n img = img/(max-min)\n new_img = resize(img,[512,512], mode='constant')\n out_images_per_patient.append(new_img)\n\n id = re.sub(r'.*_images_(.*)\\.npy',r'\\1',fname)\n patient_images_and_id = (out_images_per_patient,id)\n out_images.append(patient_images_and_id)\n print (\"Delete files: {} \\n\\t {} \".format(fname,re.sub(\"lungmask\",\"images\",fname)))\n os.remove(fname)\n os.remove(fname.replace(\"images\",\"lungmask\")) # images of one patient\n\n\n np.save(working_path+\"{}Images.npy\".format(set_name),out_images)", "def nondetects(self, masked=False):\r\n grd = self.grd\r\n xnd = []\r\n ynd = []\r\n ncells = len(grd.cells['depth'])\r\n non_detects_i_tr = np.zeros(ncells, np.int32)\r\n if masked:\r\n not_flagged = np.where(self.rec_track.flagged==0)[0]\r\n rec_track = self.rec_track[not_flagged]\r\n rec_seg = self.make_segments(set_depth=True, \r\n input_rec_track=rec_track)\r\n else:\r\n rec_seg = self.rec_seg\r\n for nr, rseg in enumerate(rec_seg):\r\n seg = rec_seg[nr]\r\n dt = seg.dt\r\n if dt > dt_signal+1:\r\n t1 = seg.t1\r\n t2 = seg.t2\r\n nint = int(np.rint((t2-t1)/dt_signal)) - 1\r\n x1 = seg.x1\r\n x2 = seg.x2\r\n y1 = seg.y1\r\n y2 = seg.y2\r\n dx_nd = (x2 - x1)/float(nint+1)\r\n dy_nd = (y2 - y1)/float(nint+1)\r\n if nint < 120: # 10 minute cutoff for nondetect filling\r\n xint = [x1 + n*dx_nd for n in range(1,nint)]\r\n yint = [y1 + n*dy_nd for n in range(1,nint)]\r\n xnd = xnd + xint\r\n ynd = ynd + yint\r\n\r\n for nd in range(len(xnd)):\r\n xy = [xnd[nd], ynd[nd]]\r\n i = grd.select_cells_nearest(xy)\r\n if (i is not None) and (i >= 0):\r\n non_detects_i_tr[i] += 1\r\n\r\n return non_detects_i_tr", "def _compute_soffsets(self):\n self.soffsets = [ [] for i in self.doffsets ]\n for idx,dofs in enumerate(self.doffsets):\n for o in dofs:\n self.soffsets[(idx + o) % self.p].append(-o)", "def _gaussian_picker(self, onset, phase, start_time, p_arr, s_arr, ptt,\n stt):\n\n # Determine indices of P and S pick times\n pt_idx = int((p_arr - start_time) * self.sampling_rate)\n st_idx = int((s_arr - start_time) * self.sampling_rate)\n\n # Determine P and S pick window upper and lower bounds based on\n # (P-S)/2 -- either this or the next window definition will be\n # used depending on which is wider.\n pmin_idx = int(pt_idx - (st_idx - pt_idx) / 2)\n pmax_idx = int(pt_idx + (st_idx - pt_idx) / 2)\n smin_idx = int(st_idx - (st_idx - pt_idx) / 2)\n smax_idx = int(st_idx + (st_idx - pt_idx) / 2)\n\n # Check if index falls outside length of onset function; if so set\n # window to start/end at start/end of data.\n for idx in [pmin_idx, pmax_idx, smin_idx, smax_idx]:\n if idx < 0:\n idx = 0\n if idx > len(onset):\n idx = len(onset)\n\n # Defining the bounds to search for the event over\n # Determine P and S pick window upper and lower bounds based on\n # set percentage of total travel time, plus marginal window\n\n # window based on self.fraction_tt of P/S travel time\n pp_ttime = ptt * self.fraction_tt\n ps_ttime = stt * self.fraction_tt\n\n # Add length of marginal window to this. Convert to index.\n P_idxmin_new = int(pt_idx - int((self.marginal_window + pp_ttime)\n * self.sampling_rate))\n P_idxmax_new = int(pt_idx + int((self.marginal_window + pp_ttime)\n * self.sampling_rate))\n S_idxmin_new = int(st_idx - int((self.marginal_window + ps_ttime)\n * self.sampling_rate))\n S_idxmax_new = int(st_idx + int((self.marginal_window + ps_ttime)\n * self.sampling_rate))\n\n # Setting so the search region can't be bigger than (P-S)/2:\n # compare the two window definitions; if (P-S)/2 window is\n # smaller then use this (to avoid picking the wrong phase).\n P_idxmin = np.max([pmin_idx, P_idxmin_new])\n P_idxmax = np.min([pmax_idx, P_idxmax_new])\n S_idxmin = np.max([smin_idx, S_idxmin_new])\n S_idxmax = np.min([smax_idx, S_idxmax_new])\n\n # Setting parameters depending on the phase\n if phase == \"P\":\n sta_winlen = self.p_onset_win[0]\n win_min = P_idxmin\n win_max = P_idxmax\n if phase == \"S\":\n sta_winlen = self.s_onset_win[0]\n win_min = S_idxmin\n win_max = S_idxmax\n\n # Find index of maximum value of onset function in the appropriate\n # pick window\n max_onset = np.argmax(onset[win_min:win_max]) + win_min\n # Trim the onset function in the pick window\n onset_trim = onset[win_min:win_max]\n\n # Only keep the onset function outside the pick windows to\n # calculate the pick threshold\n onset_threshold = onset.copy()\n onset_threshold[P_idxmin:P_idxmax] = -1\n onset_threshold[S_idxmin:S_idxmax] = -1\n onset_threshold = onset_threshold[onset_threshold > -1]\n\n # Calculate the pick threshold: either user-specified percentile of\n # data outside pick windows, or 88th percentile within the relevant\n # pick window (whichever is bigger).\n threshold = np.percentile(onset_threshold, self.pick_threshold * 100)\n threshold_window = np.percentile(onset_trim, 88)\n threshold = np.max([threshold, threshold_window])\n\n # Remove data within the pick window that is lower than the threshold\n tmp = (onset_trim - threshold).any() > 0\n\n # If there is any data that meets this requirement...\n if onset[max_onset] >= threshold and tmp:\n exceedence = np.where((onset_trim - threshold) > 0)[0]\n exceedence_dist = np.zeros(len(exceedence))\n\n # Really faffy process to identify the period of data which is\n # above the threshold around the highest value of the onset\n # function.\n d = 1\n e = 0\n while e < len(exceedence_dist) - 1:\n if e == len(exceedence_dist):\n exceedence_dist[e] = d\n else:\n if exceedence[e + 1] == exceedence[e] + 1:\n exceedence_dist[e] = d\n else:\n exceedence_dist[e] = d\n d += 1\n e += 1\n\n # Find the indices for this period of data\n tmp = exceedence_dist[np.argmax(onset_trim[exceedence])]\n tmp = np.where(exceedence_dist == tmp)\n\n # Add one data point below the threshold at each end of this period\n gau_idxmin = exceedence[tmp][0] + win_min - 1\n gau_idxmax = exceedence[tmp][-1] + win_min + 2\n\n # Initial guess for gaussian half-width based on onset function\n # STA window length\n data_half_range = int(sta_winlen * self.sampling_rate / 2)\n\n # Select data to fit the gaussian to\n x_data = np.arange(gau_idxmin, gau_idxmax, dtype=float)\n x_data = x_data / self.sampling_rate\n y_data = onset[gau_idxmin:gau_idxmax]\n\n # Convert indices to times\n x_data_dt = np.array([])\n for i in range(len(x_data)):\n x_data_dt = np.hstack([x_data_dt, start_time + x_data[i]])\n\n # Try to fit a gaussian.\n try:\n # Initial parameters are:\n # height = max value of onset function\n # mean = time of max value\n # sigma = data half-range (calculated above)\n p0 = [np.max(y_data),\n float(gau_idxmin + np.argmax(y_data))\n / self.sampling_rate,\n data_half_range / self.sampling_rate]\n\n # Do the fit\n popt, _ = curve_fit(util.gaussian_1d, x_data, y_data, p0)\n\n # Results:\n # popt = [height, mean (seconds), sigma (seconds)]\n max_onset = popt[0]\n # Convert mean (pick time) to time\n mean = start_time + float(popt[1])\n sigma = np.absolute(popt[2])\n\n gaussian_fit = {\"popt\": popt,\n \"xdata\": x_data,\n \"xdata_dt\": x_data_dt,\n \"PickValue\": max_onset,\n \"PickThreshold\": threshold}\n\n # If curve_fit fails. Will also spit error message to stdout,\n # though this can be suppressed - see warnings.filterwarnings()\n except (ValueError, RuntimeError):\n gaussian_fit = self.DEFAULT_GAUSSIAN_FIT\n gaussian_fit[\"PickThreshold\"] = threshold\n sigma = -1\n mean = -1\n max_onset = -1\n\n # If onset function does not exceed threshold in pick window\n else:\n gaussian_fit = self.DEFAULT_GAUSSIAN_FIT\n gaussian_fit[\"PickThreshold\"] = threshold\n sigma = -1\n mean = -1\n max_onset = -1\n\n return gaussian_fit, max_onset, sigma, mean", "def _get_ring_nodes(m, namin=3, namax=9, remove_redudant=T):\n # first search for rings\n sets = []\n for i in range(namin, namax+1):\n #if i in [3,4,5]:\n pat_i = '*~1' + '~*'*(i-2) + '~*1'\n #else:\n # pat_i = '*:1' + ':*'*(i-2) + ':*1'\n Qi = Chem.MolFromSmarts( pat_i )\n for tsi in m.GetSubstructMatches(Qi):\n set_i = set(tsi)\n if set_i not in sets:\n sets.append( set(tsi) )\n if remove_redudant:\n # now remove those rings that are union of smaller rings\n n = len(sets)\n sets_remove = []\n ijs = itl.combinations( list(range(n)), 2 )\n sets_u = []\n for i,j in ijs:\n set_ij = sets[i].union( sets[j] )\n if (set_ij in sets) and (set_ij not in sets_remove):\n sets_remove.append( set_ij )\n sets_u = cim.get_compl(sets, sets_remove)\n else:\n sets_u = sets\n return sets_u", "def onsets_rb(self) -> Optional[annotations.BeatData]:\n return load_onsets(self.onsets_rb_path)", "def getSets():", "def emg_onsets(emg_amplitude, threshold=0, threshold2=None):\n # Sanity checks.\n if not isinstance(emg_amplitude, np.ndarray):\n emg_amplitude = np.atleast_1d(emg_amplitude).astype('float64')\n if threshold > np.max(emg_amplitude):\n raise ValueError(\"NeuroKit error: emg_onsets(): threshold\"\n \"specified exceeds the maximum of the signal\"\n \"amplitude.\")\n if threshold2 is not None and threshold2 > np.max(emg_amplitude):\n raise ValueError(\"NeuroKit error: emg_onsets(): threshold2\"\n \"specified exceeds the maximum of the signal\"\n \"amplitude.\")\n\n # Extract indices of data points greater than or equal to threshold.\n indices = np.nonzero(emg_amplitude >= threshold)[0]\n\n # Extract initial and final indexes of each activity burst.\n indices = np.vstack((indices[np.diff(np.hstack((-np.inf, indices))) > 1],\n indices[np.diff(np.hstack((indices, np.inf))) > 1])).T\n indices = indices[indices[:, 1]-indices[:, 0] >= 0, :]\n\n # Threshold2.\n if threshold2 is not None:\n indices2 = np.ones(indices.shape[0], dtype=bool)\n for i in range(indices.shape[0]):\n if np.count_nonzero(emg_amplitude[indices[i, 0]: indices[i, 1]+1] >= threshold2) < 1:\n indices2[i] = False\n indices = indices[indices2, :]\n\n # Prepare output.\n indices = list(np.concatenate(indices))\n info = {\"EMG_Onsets\": indices}\n\n return info" ]
[ "0.6857784", "0.6249284", "0.58837444", "0.5452273", "0.53577805", "0.5303218", "0.52744794", "0.52505463", "0.51989776", "0.51802486", "0.51636773", "0.50905186", "0.507182", "0.5051442", "0.5007559", "0.50056607", "0.49910834", "0.4961425", "0.4949146", "0.493811", "0.49135157", "0.49105212", "0.49056417", "0.48830733", "0.48685992", "0.48603174", "0.4848229", "0.48321086", "0.48042777", "0.47821876" ]
0.7570172
0
Estimate whether the animal was running during each trial. This function first smooths the running trace according to smoothsize (noncausal), it then uses the average of N presamples before the onset to to estimate whether running was higher than the threshold.
def estimate_running_each_trial(running_trace, trial_onset, smoothsize=10, presamples=4, threshold=3, showfig=False): smoothwin = np.ones(smoothsize)/(smoothsize) running_trace_smooth = np.convolve(running_trace, smoothwin, mode='same') trial_onset_ind = np.where(trial_onset)[0] presamples_inds = np.arange(-presamples, 0) + trial_onset_ind[:, np.newaxis] pretrial_avg = running_trace_smooth[presamples_inds].mean(axis=1) running_each_trial = pretrial_avg > threshold if showfig: plt.cla() plt.plot(running_trace_smooth, '0.8') plt.plot(trial_onset_ind, pretrial_avg, 'xg') plt.plot(trial_onset_ind, running_each_trial*running_trace_smooth.max(), 'og') plt.axhline(threshold, color='k') plt.legend(['running_trace_smooth', 'pretrial_avg', 'running_each_trial'], loc='upper right') plt.show() return running_each_trial, running_trace_smooth
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def punches(self):\n #:TODO Need to parameterize n\n # Initialize smoothing function\n # Also because I can't take the second derivitive\n\n n = 3\n assert (len(self.averages)==len(self.timestamps))\n size = len(self.averages)\n slopes = []\n for t in [0,size-n]:\n averages = np.asarray(self.averages[t:size])\n timestamps = np.asarray(self.timestamps[t:size])\n \"\"\"\n slope = np.absolute((np.corrcoef(averages,\n timestamps))*np.std(averages)/np.std(timestamps))\n \"\"\"\n slope = np.absolute(np.polyfit(timestamps, averages, 1)[0])*1000000\n #plt.scatter(timestamps, averages)\n slopes.append(slope)\n # If you were punching you are likely still punching need to set a weighting factor to this somehow\n # print(slopes[1])\n self.smoothing_queue.pop(0)\n if self.SIG_DELTA_AVERAGE < slopes[1]:\n self.smoothing_queue.append(1)\n else:\n self.smoothing_queue.append(0)\n if self.smoothing_queue.count(1) > len(self.smoothing_queue)/2:\n punching = True\n else: punching = False\n # print(self.smoothing_queue)\n\n return punching\n #self.counter +=1\n \"\"\"\n if self.counter==self.timing:\n self.counter == 0\n else:\n \"\"\"", "def is_SNP(count):\n counts = sum(count)\n return counts and float(counts - max(count)) / counts > MAX_NOISE", "def stationarity(self, nfactor=20):\n \n tau = self.sampler.get_autocorr_time(tol=0)\n converged = np.all(tau * nfactor < self.sampler.iteration)\n return converged", "def stop_on_low_ais_ess(trial_id, result):\n return result[\"ais_effective_sample_size\"] < 0.1", "def current_threshold_hit(self):\n\n\t\tnew_current = self.robot.pdp.getCurrent(const.CARGO_PDP_ID)\n\n\t\tself._current_samples.append(new_current)\n\n\t\tif len(self._current_samples) > 10:\n\t\t\tself._current_samples.pop(0)\n\n\t\t# Calculate new running average\n\t\tnew_avg = sum(self._current_samples) / len(self._current_samples)\n\n\t\treturn new_avg > const.CARGO_INTAKE_THRESHOLD", "def is_artificial(self):\n\t\treturn 0", "def epidemic_finish(states, iteration):\n return np.sum(states) == 0 and iteration > 10", "def autorange(self, analyte=None, gwin=11, win=40, smwin=5,\n conf=0.01, on_mult=(1., 1.), off_mult=(1., 1.), d_mult=1.2):\n\n if analyte is None:\n analyte = self.internal_standard\n\n bins = 50 # determine automatically? As a function of bkg rms noise?\n\n v = self.focus[analyte] # get trace data\n vl = np.log10(v[v > 1]) # remove zeros from value\n x = np.linspace(vl.min(), vl.max(), bins) # define bin limits\n\n n, _ = np.histogram(vl, x) # make histogram of sample\n kde = gaussian_kde(vl)\n yd = kde.pdf(x) # calculate gaussian_kde of sample\n\n mins = self.findmins(x, yd) # find minima in kde\n\n vs = fastsmooth(v, gwin)\n bkg = vs < 10**(d_mult * mins[0]) # set background as lowest distribution\n if not bkg[0]:\n bkg[0] = True\n\n # assign rough background and signal regions based on kde minima\n self.bkg = bkg\n self.sig = ~bkg\n\n # remove transitions by fitting a gaussian to the gradients of\n # each transition\n # 1. calculate the absolute gradient of the target trace.\n g = abs(fastgrad(v, gwin))\n # 2. determine the approximate index of each transition\n zeros = bool_2_indices(bkg).flatten()\n if zeros[0] == 0:\n zeros = zeros[1:]\n if zeros[-1] == bkg.size:\n zeros = zeros[:-1]\n tran = [] # initialise empty list for transition pairs\n\n for z in zeros: # for each approximate transition\n # isolate the data around the transition\n if z - win > 0:\n xs = self.Time[z - win:z + win]\n ys = g[z - win:z + win]\n # determine type of transition (on/off)\n # checkes whether first - last value in window is\n # positive ('on') or negative ('off')\n tp = np.diff(v[z - win:z + win][[0, -1]]) > 0\n\n else:\n xs = self.Time[:z + win]\n ys = g[:z + win]\n # determine type of transition (on/off)\n tp = np.diff(v[:z + win][[0, -1]]) > 0\n # determine location of maximum gradient\n c = self.Time[z] # xs[ys == np.nanmax(ys)]\n try: # in case some of them don't work...\n # fit a gaussian to the first derivative of each\n # transition. Initial guess parameters are determined\n # by:\n # - A: maximum gradient in data\n # - mu: c\n # - sigma: half the exponential decay coefficient used\n # for despiking OR 1., if there is no exponent.\n try:\n width = 0.5 * abs(self.despike_params['exponent'])\n except:\n width = 1.\n # The 'sigma' parameter of curve_fit:\n # This weights the fit by distance from c - i.e. data closer\n # to c are more important in the fit than data further away\n # from c. This allows the function to fit the correct curve,\n # even if the data window has captured two independent\n # transitions (i.e. end of one ablation and start of next)\n # ablation are < win time steps apart).\n pg, _ = curve_fit(gauss, xs, ys,\n p0=(np.nanmax(ys),\n c,\n width),\n sigma=abs(xs - c) + .1)\n # get the x positions when the fitted gaussian is at 'conf' of\n # maximum\n # determine transition FWHM\n fwhm = 2 * pg[-1] * np.sqrt(2 * np.log(2))\n # apply on_mult or off_mult, as appropriate.\n if tp:\n lim = np.array([-fwhm, fwhm]) * np.array(on_mult) + pg[1]\n else:\n lim = np.array([-fwhm, fwhm]) * np.array(off_mult) + pg[1]\n\n tran.append(lim)\n except:\n warnings.warn((\"\\nSample {:s}: \".format(self.sample) +\n \"Transition identification at \" +\n \"{:.1f} failed.\".format(self.Time[z]) +\n \"\\nPlease check the data plots and make sure \" +\n \"everything is OK.\\n(Run \" +\n \"'trace_plots(ranges=True)'\"),\n UserWarning)\n pass # if it fails for any reason, warn and skip it!\n\n # remove the transition regions from the signal and background ids.\n for t in tran:\n self.bkg[(self.Time > t[0]) & (self.Time < t[1])] = False\n self.sig[(self.Time > t[0]) & (self.Time < t[1])] = False\n\n self.trn = ~self.bkg & ~self.sig\n\n self.mkrngs()\n\n # final check to catch missed transitions\n # calculate average transition width\n tr = self.Time[self.trn ^ np.roll(self.trn, 1)]\n tr = np.reshape(tr, [tr.size // 2, 2])\n self.trnrng = tr\n trw = np.mean(np.diff(tr, axis=1))\n\n corr = False\n for b in self.bkgrng.flat:\n if (self.sigrng - b < 0.3 * trw).any():\n self.bkg[(self.Time >= b - trw / 2) &\n (self.Time <= b + trw / 2)] = False\n self.sig[(self.Time >= b - trw / 2) &\n (self.Time <= b + trw / 2)] = False\n corr = True\n\n if corr:\n self.mkrngs()\n\n # number the signal regions (used for statistics and standard matching)\n n = 1\n for i in range(len(self.sig) - 1):\n if self.sig[i]:\n self.ns[i] = n\n if self.sig[i] and ~self.sig[i + 1]:\n n += 1\n self.n = int(max(self.ns)) # record number of traces\n\n return", "def get_excess_smoothing_status(self) -> bool:\n return self._excess_smoothing_live.get()", "def is_sampled(z):\n return True", "def preprocessing(image, smooth_size, folder):\n from skimage.restoration import denoise_tv_chambolle\n \n dim = int(image.shape[0] / 50.)\n smoothed = rank.median(image, disk(smooth_size))\n #smoothed = denoise_tv_chambolle(image, weight=0.002)\n smoothed = rank.enhance_contrast(smoothed, disk(smooth_size))\n \n pl.subplot(2, 3, 1)\n pl.title(\"after median\")\n pl.imshow(smoothed)\n pl.gray()\n # If after smoothing the \"dot\" disappears\n # use the image value\n \n # TODO: wat do with thresh?\n try:\n im_max = smoothed.max()\n thresh = threshold_otsu(image)\n except:\n im_max = image.max()\n thresh = threshold_otsu(image)\n\n \n if im_max < thresh:\n labeled = np.zeros(smoothed.shape, dtype=np.int32)\n \n else:\n binary = smoothed > thresh\n \n # TODO: this array size is the fault of errors\n bin_open = binary_opening(binary, np.ones((dim, dim)), iterations=5)\n bin_close = binary_closing(bin_open, np.ones((5,5)), iterations=5)\n \n pl.subplot(2, 3, 2)\n pl.title(\"threshold\")\n pl.imshow(binary, interpolation='nearest')\n pl.subplot(2, 3, 3)\n pl.title(\"opening\")\n pl.imshow(bin_open, interpolation='nearest')\n pl.subplot(2, 3, 4)\n pl.title(\"closing\")\n pl.imshow(bin_close, interpolation='nearest')\n \n distance = ndimage.distance_transform_edt(bin_open)\n local_maxi = peak_local_max(distance,\n indices=False, labels=bin_open)\n \n markers = ndimage.label(local_maxi)[0]\n \n labeled = watershed(-distance, markers, mask=bin_open)\n pl.subplot(2, 3, 5)\n pl.title(\"label\")\n pl.imshow(labeled)\n #pl.show()\n pl.savefig(folder)\n pl.close('all')\n\n #misc.imsave(folder, labeled)\n# labels_rw = random_walker(bin_close, markers, mode='cg_mg')\n# \n# pl.imshow(labels_rw, interpolation='nearest')\n# pl.show()\n\n return labeled", "def choose_to_stop_early(self):\n # return self.cumulated_num_tests > 10 # Limit to make 10 predictions\n # return np.random.rand() < self.early_stop_proba\n batch_size = 30 # See ingestion program: D_train.init(batch_size=30, repeat=True)\n num_examples = self.metadata_.size()\n num_epochs = self.cumulated_num_steps * batch_size / num_examples\n return num_epochs > self.num_epochs_we_want_to_train # Train for certain number of epochs then stop", "def was_pig_caught(prize):\n if prize > 20:\n return True\n return False", "def _compute_is_terminal(self):\n # by default the episode will terminate when all samples are labelled\n done = LalEnv._compute_is_terminal(self)\n # it also terminates when self.n_horizon datapoints were labelled\n if np.size(self.indeces_known) == self.n_horizon:\n done = True\n return done", "def num_wet(self):\n return np.sum(self.array == 5)", "def DetectPulseOnset(self, asig, fs, wMS):\n # the percentage of the maximal value of the slope sum function\n # to detect the onset\n AmplitudeRatio = .01\n\n # low pass filter\n sig = self.zpIIR(asig, 3, .1, 20, 5 * 2/fs)\n wSmp = int(np.round(wMS*fs/1000))\n\n BlankWindowRatio = .9\n\n # delta x\n diffsig = np.diff(sig)\n\n z = np.empty((sig.size - 1 - wSmp, 1))\n z[:] = np.NaN\n\n # calculate slope sum function\n for i in range(wSmp,sig.size-1):\n subsig = diffsig[i-wSmp:i]\n z[i-wSmp] = np.sum(subsig[subsig>0])\n\n z0 = np.mean(z)\n onset = [0]\n tPnt = []\n zThres = 0\n blankWin = int(np.round(400*fs/1000))\n subIdx = np.r_[onset[0]: onset[0] + 4*blankWin + 1]\n MedianArrayWinSize = 5\n\n # this value controls the final acceptance\n PrcofMaxAMP = .2\n SSFAmpArray = np.ones((MedianArrayWinSize,1))*(np.max(z) - np.min(z)) * PrcofMaxAMP\n # the percentage of maximal amplitude for threshold crossing\n DetectionThreshold = .2\n SSFCrossThresholdArray = np.ones((MedianArrayWinSize,1))*z0*DetectionThreshold\n idx = 1\n\n # Keep loop going while onsets detected\n while(1):\n\n # look for the first location where z > z0\n try:\n\n # Look in z[subIdx] (and make sure it doesn't go past z's size)\n # find first index where z > the mean of z\n tempIndex = np.trim_zeros(subIdx*(z.size>subIdx), 'b')\n ix = np.amin(np.where(z[tempIndex] > z0)[0])\n except:\n break\n\n ix = tempIndex[ix]\n tPnt.append(ix)\n srcWin = np.r_[np.maximum(0,ix - wSmp): ix + wSmp]\n #if the window has passed the length of the data, then exit\n if srcWin[-1] >= len(z):\n break\n\n # This section of code is to remove the initial zero-region in the SSF function before looking for onset (if such region exists)\n zPnt = np.where(z[srcWin] == 0)\n\n if zPnt[0].size != 0:\n zPnt = srcWin[zPnt[0]]\n\n if np.any(zPnt < ix):\n srcWin = np.r_[zPnt[np.max(np.where(zPnt < ix))]: ix + wSmp]\n\n # accept the window\n if ( np.max(z[srcWin]) - np.min(z[srcWin]) > zThres):\n\n # calculate the threshold for next cycle\n SSFAmp = (np.max(z[srcWin]) - np.min(z[srcWin])) * PrcofMaxAMP\n SSFAmpArray[np.remainder(idx, MedianArrayWinSize)] = SSFAmp\n zThres = np.median(SSFAmpArray)\n SSFCrossThresholdArray[np.remainder(idx, MedianArrayWinSize)] = np.mean(z[srcWin])*DetectionThreshold\n z0 = np.median(SSFCrossThresholdArray)\n minSSF = np.min(z[srcWin]) + SSFAmp *AmplitudeRatio\n a = srcWin[0] + np.min(np.where(z[srcWin] >= minSSF))\n onset.append(a)\n\n # adaptively determine analysis window for next cycle\n bw = blankWin\n subIdx = np.round(np.r_[a + bw: a + 3*bw])\n idx = idx + 1\n\n else:\n # no beat detected\n subIdx = np.round(subIdx + blankWin)\n\n return onset", "def is_smelling(self,conc_array):\n if conc_array[int(self.x)][int(self.y)]>self.threshold:\n self.smell_timer = self.Timer(self.T,self.lamda)\n #Nav mode three and four need to know whether the moth is smelling\n #at a specific moment, for that reason they use Tfirst.\n self.Tfirst = self.T\n self.odor = True #this datum will be useful in the graphical functions\n return True\n elif self.turned_on:\n self.odor = False\n if self.smell_timer.is_running(self.T):\n return True #note - even though the there is no detection, the navigator stay in nav mode.\n else:\n self.odor = False\n return False", "def _compute_is_terminal(self):\n new_score = self.episode_qualities[-1]\n # by default the episode will terminate when all samples are labelled\n done = LalEnv._compute_is_terminal(self)\n # it also terminates when a quality reaches a predefined level\n if new_score >= self.target_quality:\n done = True\n return done", "def Continue():\n # adjust this to take as many steps as you need\n return warp.top.it <= 500", "def is_over(self, state: StonehengeState) -> bool:\n total_result = state.hori_result + state.left_result + state.right_result\n total_line = len(total_result)\n p1_taken = 0\n p2_taken = 0\n # all_taken = True\n for item in total_result:\n if item == '1':\n p1_taken+=1\n elif item =='2':\n p2_taken += 1\n # else:\n # all_taken = False\n # print('p1 taken:' + str(p1_taken))\n # print('p2 taken:' + str(p2_taken))\n # print('p1_taken more than half?')\n # print(float(p1_taken) >= total_line/2)\n # print('p2_taken more than half?')\n # print(float(p2_taken) >= total_line/2)\n return float(p1_taken) >= total_line/2 or float(p2_taken) >= total_line/2", "def win_condition(self):\n return self.wave == 8", "def run_experiment() -> List[bool]:\n return [random.random() < 0.5 for _ in range(1000)]", "def precondition(amp):\n n = len(amp)\n mean = np.mean(amp[:n/5])\n return -(amp-mean)", "def test_next_window_time_sample_passed(self):\n test_window_scheme = WindowingScheme(self.window_test_filter, 3)\n # Value 15 will be filtered as it ranges between lower and upper bound limits\n filtered_value = test_window_scheme.filter(self.middle_value)\n self.assertEquals(filtered_value, self.middle_value)\n # Let next window time elapse\n time.sleep(4)\n filtered_value = test_window_scheme.filter(self.more_than_upper_bound)\n # None is expected as filtered value because at least one sample has been already passed and\n # value ranges outside lower and upper bound limits\n self.assertEquals(filtered_value, None)", "def _step(self, a):\n obs, rew, done, info = super()._step(a)\n # rew = +1 if past int threshold for first time in episode\n # if self.robot.body_xyz[0] > self.threshold:\n # self.threshold += 1\n # rew = 1.0\n # else:\n # rew = 0.0\n # self.steps += 1\n # if self.steps > self.max_episode_steps:\n # done = True\n return obs, rew, done, info", "def success_rate(x_tapes):\n return np.sum([is_success(x_tape) for x_tape in x_tapes]) / len(x_tapes)", "def problem2():\n k = 4\n total_draws = 20\n total_balls = 50\n\n plt.figure()\n for _ in range(50):\n for num_samples in [10000]:\n experiment_results = []\n for samples in range(num_samples):\n N = np.random.randint(1, k, total_balls - 1)\n N = np.append(N, k)\n N = np.array(N).flatten()\n random.shuffle(N)\n draw = N[:total_draws]\n experiment_result = np.any(draw == 4)\n experiment_results.append(experiment_result)\n plt.plot(np.cumsum(experiment_results) / np.arange(1, num_samples + 1))\n old_result = experiment_results[:]\n\n plt.xlabel('Total Draws')\n plt.ylabel('Probability')\n plt.show()", "def get_nsatpix( self, step ):\n \n return np.sum( self.get_image_step( step, divide_by_exptime=False ) >= 1.6e4 )", "def get_sensor_bool_dryspot_runlevel(self, filename, threshold_min_counted_dryspots=5):\n f = h5py.File(filename, \"r\")\n meta_file = h5py.File(str(filename).replace(\"RESULT.erfh5\", \"meta_data.hdf5\"), 'r')\n\n try:\n single_states, set_of_states, useless_states = self.__get_dryspot_data(f, meta_file)\n multi_states = self.__get_pressure_data(f)\n multi_states = multi_states.squeeze()\n\n activated_sensors = np.count_nonzero(multi_states, axis=1)\n percentage_of_all_sensors = activated_sensors / 1140\n len_wanted_seq = 100\n current = 0\n sequence = np.zeros((len_wanted_seq, self.num_sensors))\n frame_labels = []\n\n if self.aux_info:\n original_frame_idxs = np.full(len_wanted_seq, np.nan, np.int16)\n frame_labels_aux = np.full(len_wanted_seq, np.nan, np.int8)\n sample_percentages = np.full(len_wanted_seq, np.nan)\n single_state_indices = np.full(len_wanted_seq, np.nan, np.int16)\n # flowfronts = np.zeros((len_wanted_seq, self.image_size[0], self.image_size[1]))\n # _coords, flat_fillings = self.__get_filling_data(f, single_states)\n\n for i, sample in enumerate(single_states):\n state_num = int(str(sample).replace(\"state\", \"0\"))\n try:\n sample_percentage = percentage_of_all_sensors[state_num - 1]\n if sample_percentage >= current / len_wanted_seq:\n data = multi_states[state_num - 1, :]\n data = np.log(np.add(data, 1)) # TODO make log optional\n if self.sensor_indizes != ((0, 1), (0, 1)):\n rect = data.reshape(38, 30)\n sel = rect[self.sensor_indizes[0][0]::self.sensor_indizes[0][1],\n self.sensor_indizes[1][0]::self.sensor_indizes[1][1]]\n data = sel.flatten()\n sequence[current, :] = data\n\n frame_label = 0\n if state_num in set_of_states:\n frame_label = 1\n frame_labels.append(frame_label)\n\n if self.aux_info:\n original_frame_idxs[current] = state_num\n frame_labels_aux[current] = frame_label\n sample_percentages[current] = sample_percentage\n single_state_indices[current] = i\n # flowfronts[current, :, :] = create_np_image(target_shape=self.image_size,\n # norm_coords=_coords, data=flat_fillings[i])\n current += 1\n except IndexError:\n continue\n\n # determine runlevel label using frame labels and threshold\n lens_of_runs_of_dryspots = [sum(1 for _ in group) for key, group in\n groupby(np.array(frame_labels) == 1) if key]\n max_len = 0 if len(lens_of_runs_of_dryspots) == 0 else max(lens_of_runs_of_dryspots)\n label = 0 if max_len < threshold_min_counted_dryspots else 1\n\n f.close()\n meta_file.close()\n\n if self.aux_info:\n # framelabels, original_frame_idx, original_num_frames, flowfronts, filling_percentage\n aux = {\"framelabel\": frame_labels_aux,\n \"original_frame_idx\": original_frame_idxs,\n \"original_num_multi_states\": len(multi_states),\n \"percent_of_sensors_filled\": sample_percentages,\n \"single_state_indices\": single_state_indices,\n }\n return [(sequence, label, aux)]\n\n return [(sequence, label)]\n except KeyError:\n f.close()\n meta_file.close()\n return None", "def autorange(xvar, sig, gwin=7, swin=None, win=30,\n on_mult=(1.5, 1.), off_mult=(1., 1.5),\n nbin=10, transform='log', thresh=None):\n failed = []\n sig = np.asanyarray(sig)\n\n # smooth signal\n if swin is not None:\n sigs = fastsmooth(sig, swin)\n else:\n sigs = sig\n\n # transform signal\n if transform == 'log':\n tsigs = log_nozero(sigs)\n else:\n tsigs = sigs\n\n if thresh is None:\n if tsigs.ndim == 1:\n scale = False\n tsigs = tsigs.reshape(-1, 1)\n else:\n scale = True\n fsig = separate_signal(tsigs, scaleX=scale).astype(bool)\n else:\n if transform == 'log':\n thresh = np.log(thresh)\n fsig = tsigs > thresh\n fsig[0] = False # the first value must always be background\n fbkg = ~fsig\n\n # remove transitions by fitting a gaussian to the gradients of\n # each transition\n\n # 1. determine the approximate index of each transition\n zeros = bool_2_indices(fsig)\n \n if zeros is not None:\n zeros = zeros.flatten()\n if sigs.ndim > 1:\n sigs = sigs.sum(axis=1)\n\n # 2. calculate the absolute gradient of the target trace.\n grad = abs(fastgrad(sigs, gwin)) # gradient of untransformed data.\n\n for z in zeros: # for each approximate transition\n # isolate the data around the transition\n if z - win < 0:\n lo = gwin // 2\n hi = int(z + win)\n elif z + win > (len(sig) - gwin // 2):\n lo = int(z - win)\n hi = len(sig) - gwin // 2\n else:\n lo = int(z - win)\n hi = int(z + win)\n\n xs = xvar[lo:hi]\n ys = grad[lo:hi]\n\n # determine type of transition (on/off)\n mid = (hi + lo) // 2\n tp = sigs[mid + 3] > sigs[mid - 3] # True if 'on' transition.\n\n # fit a gaussian to the first derivative of each\n # transition. Initial guess parameters:\n # - A: maximum gradient in data\n # - mu: c\n # - width: 2 * time step\n # The 'sigma' parameter of curve_fit:\n # This weights the fit by distance from c - i.e. data closer\n # to c are more important in the fit than data further away\n # from c. This allows the function to fit the correct curve,\n # even if the data window has captured two independent\n # transitions (i.e. end of one ablation and start of next)\n # ablation are < win time steps apart).\n centre = xvar[z] # center of transition\n width = (xvar[1] - xvar[0]) * 2\n\n try:\n pg, _ = curve_fit(gauss, xs, ys,\n p0=(np.nanmax(ys),\n centre,\n width),\n sigma=(xs - centre)**2 + .01)\n # get the x positions when the fitted gaussian is at 'conf' of\n # maximum\n # determine transition FWHM\n fwhm = abs(2 * pg[-1] * np.sqrt(2 * np.log(2)))\n # apply on_mult or off_mult, as appropriate.\n if tp:\n lim = np.array([-fwhm, fwhm]) * on_mult + pg[1]\n else:\n lim = np.array([-fwhm, fwhm]) * off_mult + pg[1]\n\n fbkg[(xvar > lim[0]) & (xvar < lim[1])] = False\n fsig[(xvar > lim[0]) & (xvar < lim[1])] = False\n\n except RuntimeError:\n failed.append([centre, tp])\n pass\n\n ftrn = ~fbkg & ~fsig\n\n # if there are any failed transitions, exclude the mean transition width\n # either side of the failures\n if len(failed) > 0:\n trns = xvar[bool_2_indices(ftrn)]\n tr_mean = (trns[:, 1] - trns[:, 0]).mean() / 2\n for f, tp in failed:\n if tp:\n ind = (xvar >= f - tr_mean *\n on_mult[0]) & (xvar <= f + tr_mean * on_mult[0])\n else:\n ind = (xvar >= f - tr_mean *\n off_mult[0]) & (xvar <= f + tr_mean * off_mult[0])\n fsig[ind] = False\n fbkg[ind] = False\n ftrn[ind] = False\n\n return fbkg, fsig, ftrn, [f[0] for f in failed]" ]
[ "0.5619866", "0.5553437", "0.5524997", "0.53908414", "0.5296018", "0.5253825", "0.5213897", "0.52011967", "0.51841336", "0.5176683", "0.51282734", "0.5120962", "0.51179755", "0.5112065", "0.50441957", "0.50383514", "0.503653", "0.50297564", "0.4981896", "0.4948819", "0.49382964", "0.49352995", "0.4923914", "0.4913876", "0.488362", "0.486648", "0.4865212", "0.4859857", "0.4845986", "0.4844091" ]
0.7172721
0
Create instance of PyRPS. redis_url Redis instance address (tuple containing (hostname, port)). namespace Namespace to separate Pub/Sub instance from another running on the same redis host.
def __init__(self, namespace, redis_url=("localhost", 6379)): self.namespace = namespace if isinstance(redis_url, tuple): self.redis = StrictRedis(host=redis_url[0], port=redis_url[1]) elif isinstance(redis_url, str): self.redis = StrictRedis(host=redis_url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect_to_redis():\n return Redis(host=redis_host, port=redis_port, db=0)", "def __init__(self):\n try:\n config = redis_settings[\"REDIS_BACKEND\"]\n self.servers = config[\"servers\"]\n self.port = config[\"port\"]\n self.db = config[\"db\"]\n self.password = config[\"password\"]\n # r = redis.Redis('10.66.136.84', '6379', 0,password=\"xsw2CDE#vfr4\")\n #r = redis.Redis('10.66.136.84', '6379', 0)\n self.redis = Redis(self.servers, self.port, self.db,\n password=self.password, socket_timeout=1)\n except Exception, e:\n print \"Redis YAMLConfig Error :\", e\n logging.error(e)", "def __init__(self, host, port):\n self.r = redis.StrictRedis(host=host, port=port)", "def conn_redis(host, port, db=0):\r\n r = redis.Redis(host=host, port=port, db=db)\r\n return r", "def create_connection():\n # REDIS_URL is defined in .env and loaded into the environment by Honcho\n redis_url = os.getenv('REDIS_URL')\n # If it's not defined, use the Redis default\n if not redis_url:\n redis_url = 'redis://localhost:6379'\n urlparse.uses_netloc.append('redis')\n url = urlparse.urlparse(redis_url)\n return redis.StrictRedis(\n host=url.hostname,\n port=url.port,\n db=0,\n password=url.password\n )", "def __init__(self, config):\n self.r = redis.StrictRedis(host=config['REDIS_HOST'],\n port=config['REDIS_PORT'],\n db=config['REDIS_DB'])", "def connect_redis(uri):\n puri = urlparse.urlparse(uri)\n host = puri.hostname\n port = puri.port\n password = puri.password if puri.password else ''\n db_name = puri.path.split('/')[1]\n r = redis.Redis(host=host, port=port, password=password, db=db_name)\n assert r.ping()\n return r", "def __init__(self, settings):\n\n self.r = redis.Redis(\n host=settings['hostname'],\n port=settings['port']\n )\n\n # set the redis list name for storing jobs\n self.joblist = settings['joblistname']", "def connect(self):\n self.connection = redis.Redis(\n host=self.host,\n port=self.port,\n socket_connect_timeout=self.timeout,\n socket_timeout=self.timeout\n )", "def _conn_redis(self) -> Redis:\n return Redis(host=self._REDIS_DB_HOST, port=self._REDIS_DB_PORT, db=0,decode_responses=True)", "def create_redis_connection(app=None):\n\n if app:\n app.logger.info('Instantiated new redis connection.')\n\n redis_connection = redis.StrictRedis(\n host=\"localhost\",\n port=6379,\n db=0\n )\n\n if not redis_connection.exists('last_queue_idx'):\n redis_connection.set('last_queue_idx', 0)\n\n return redis_connection", "def get_connection(self, params):\r\n return Redis(connection_pool=self.get_or_create_connection_pool(params))", "def _get_conn(self):\n return redis.Redis(connection_pool=self.pool)", "def get_redis_client():\n return redis.from_url(settings.REDIS_URI)", "def _connect(self):\n self.connection = RedisConnection(self.host, self.port, self.dbname)", "def __init__(self, redis_connection=None):\n self._redis_connection = redis_connection or get_websocket_redis_connection()", "def redis_conn_pool(self) -> ConnectionPool:\n if self._redis_conn_pool is None:\n if self._config[\"graph_redis_pool_block\"]:\n pool_class: Callable = BlockingConnectionPool\n else:\n pool_class = ConnectionPool\n\n if self._config[\"graph_redis_pool_gevent_queue\"]:\n redis_conn_pool = pool_class().from_url(\n self._config[\"graph_redis_url\"],\n decode_components=True,\n max_connections=self._config[\"graph_redis_pool_max_connections\"],\n timeout=self._config[\"graph_redis_pool_timeout\"],\n queue_class=gevent.queue.LifoQueue,\n )\n\n else:\n redis_conn_pool = pool_class().from_url(\n self._config[\"graph_redis_url\"],\n decode_components=True,\n max_connections=self._config[\"graph_redis_pool_max_connections\"],\n timeout=self._config[\"graph_redis_pool_timeout\"],\n )\n\n self._redis_conn_pool = redis_conn_pool\n\n self._logger.debug(\n \"[%s]: Initialized Redis connection pool: %s\",\n self.__name__,\n self._redis_conn_pool,\n )\n\n return self._redis_conn_pool", "def get_redis_server():\n return redis_server", "def get_redis(**kwargs):\n redis_cls = kwargs.pop('redis_cls', DEFAULT_REDIS_CLS)\n url = kwargs.pop('url', None)\n if url:\n return redis_cls.from_url(url, **kwargs)\n else:\n return redis_cls(**kwargs)", "def __init__(self, host, redis_port, ssh_user, use_ssh=True):\n\n if use_ssh:\n forwarder = create_tunnel(host=host, port=redis_port, ssh_user=ssh_user)\n self.connection = redis.StrictRedis(host=forwarder.bind_address, port=forwarder.bind_port, db=0)\n else:\n self.connection = redis.StrictRedis(host=host, port=redis_port, db=0)", "def _connect_to_redis(self):\n for name, config in settings.STREAM_REDIS_CONFIG.items():\n self._redis_client = tornadoredis.Client(host=config['host'],\n port=config['port'],\n password=config['password'],\n connection_pool=pool)\n self._redis_client.connect()", "def __init__(self):\n self._redis = redis.Redis(host=\"localhost\", port=6379)\n self._redis.flushdb()", "def _CreateRedisClient(self):\n try:\n redis_client = redis.from_url(self._REDIS_URL, socket_timeout=60)\n redis_client.ping()\n except redis.exceptions.ConnectionError:\n redis_client = fakeredis.FakeStrictRedis()\n\n return redis_client", "def _CreateRedisClient(self):\n try:\n redis_client = redis.from_url(self._REDIS_URL, socket_timeout=60)\n redis_client.ping()\n except redis.exceptions.ConnectionError:\n redis_client = fakeredis.FakeStrictRedis()\n\n return redis_client", "def get_redis() -> redis.Redis:\n global redis_conn\n if not redis_conn:\n host = app.config.get(\"REDIS_HOST\", \"127.0.0.1\")\n port = app.config.get(\"REDIS_PORT\", \"6379\")\n db = app.config.get(\"REDIS_DB\", \"0\")\n redis_conn = redis.Redis(host=host, port=port, db=db)\n\n return redis_conn", "def get_redis():\n return redis.StrictRedis(host='redis', port=6379)", "def __init__(self, host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD):\n self.db = redis.StrictRedis(host=host, port=port, password=password, decode_responses=True)", "def __init__(self):\n self._rcon = None\n self._host = CONFIG.redis.host\n self._port = CONFIG.redis.port\n self._db = CONFIG.redis.database\n self.refresh()", "def _connect(self):\n try: \n self.r = redis.StrictRedis(host=self.host, port=self.port, db=self.db)\n except:\n raise", "def __init__(self, handlers=None, default_host=\"\", transforms=None,\n wsgi=False, **settings):\n tornado.web.Application.__init__(self, handlers, default_host, transforms, wsgi, **settings)\n self._rc = redis.StrictRedis(**(settings.get('redis_config', {}))) # redis client: one per application\n self._rcps = self._rc.pubsub() # redis pubsub obj: one per application\n self._sub_cbs = {} # redis pubsub callbacks: one per subscription\n self._sub_cmd_q = 'q_sub_cmds_' + uuid4().hex # TODO: could make a shorter ID just based on tornado server ID\n self._rcps.subscribe(self._sub_cmd_q)\n listener = threading.Thread(target=self._rc_listen)\n listener.setDaemon(True)\n listener.start()" ]
[ "0.65858567", "0.6510327", "0.6420053", "0.6199663", "0.6159909", "0.61511815", "0.6020774", "0.5985737", "0.593212", "0.5925699", "0.5880181", "0.5879944", "0.5878096", "0.5852507", "0.5827931", "0.5803593", "0.5802949", "0.5757128", "0.56726545", "0.56652087", "0.5645896", "0.56363237", "0.5621862", "0.5621862", "0.5597433", "0.55327207", "0.5463311", "0.5443462", "0.53979987", "0.5382986" ]
0.7303635
0
Publish new message into queue. queue Queue name. message Message data. ttl How long the message should stay alive.
def publish(self, queue, message, ttl=3600): # Get next message ID message_id = self.redis.incr(self._ns_nextid()) # Push message to queue self.redis.setex(self._ns_message(queue, message_id), ttl, message) # List all consumers of given queue consumers = self.redis.smembers(self._ns_subscriptions(queue)) # Publish the message to all the consumers. for consumer in consumers: self.redis.rpush(self._ns_queue(queue, consumer), message_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def publish(self, queue, message):\n\n # Instead of passing a queue to the constructor, the publish checks if\n # the target queue exists. If not, it declares the target queue\n if not self.queue:\n self.channel.queue_declare(queue=queue)\n self.queue = queue\n\n self.channel.basic_publish(\n exchange='', routing_key=queue, body=message)", "def publish(self, queue, message):\n # 1. Setup the channel to use to publish message\n channel_handler = ChannelHandler(self._connection)\n\n # 2. Open the channel before using it\n channel_handler.open_channel()\n\n # 3. Send the message via the channel\n channel_handler.send_message(self._exchange_name, queue, message)\n\n # 4. Close the channel after publishing the message\n channel_handler.close_channel()\n LOGGER.info('Bellow message `%s` is published in `%s`', message, queue)", "def test_queue_publish(self):\n self.queue_publisher._connect()\n with self.assertLogs(level='INFO') as cm:\n result = self.queue_publisher.publish_message(test_data['valid'])\n self.assertEqual(True, result)\n\n self.assertIn('Published message to queue', cm.output[8])", "def push(message: str, date: datetime.datetime):\n msg_id = str(uuid.uuid4())\n pipeline = connection.pipeline()\n pipeline.set(msg_id, message)\n pipeline.zadd(QUEUE_KEY, {\n msg_id: date.timestamp()\n })\n pipeline.execute()\n logger.info(f'Save a new future email: [message: {message}, date: {date}]')", "def publish_message(self):\n\n message_count = 0\n while message_count < self._messages:\n message_count += 1\n message_body = \"task number %i\" %(message_count)\n self._channel.basic_publish(exchange='',\n routing_key=self._queue_name,\n body=message_body,\n properties=pika.BasicProperties(\n delivery_mode=2 # make message persistant\n ))\n print(\"Published message %i\" %(message_count))\n time.sleep(self._message_interval)", "def publish(self, message_body, routing_key, exchange=None):\n\n publish_exchange = exchange or self.producer.exchange\n\n self.producer.publish(\n body=message_body,\n exchange=publish_exchange,\n routing_key=routing_key,\n retry=settings.PUBLISH_RETRY,\n retry_policy={\n # First retry immediately,\n 'interval_start': settings.PUBLISH_RETRY_INTERVAL_START,\n # then increase by 2s for every retry.\n 'interval_step': settings.PUBLISH_RETRY_INTERVAL_STEP,\n # but don't exceed 30s between retries.\n 'interval_max': settings.PUBLISH_RETRY_INTERVAL_MAX,\n # give up after 30 tries.\n 'max_retries': settings.PUBLISH_RETRY_MAX_RETRIES,\n # callback for logging\n 'errback': self.on_publish_error,\n 'on_revive': self.on_connection_revival\n },\n # declare exchange and queue and bind them\n declare=list(self.queues.values())) # queues is a dict.\n log.info(f'Published '\n f'message: {self.producer.exchange.name}::{routing_key}')\n log.debug(f'Published '\n f'message_body: {message_body}')", "def new_task(data):\n rabbit_host = os.getenv('RABBIT_HOST', 'localhost')\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(rabbit_host)\n )\n channel = connection.channel()\n channel.basic_publish(\n exchange='',\n routing_key='task_queue',\n body=json.dumps(data),\n properties=pika.BasicProperties(\n delivery_mode=2, # make message persistent\n )\n )\n connection.close()", "def _publish(self, topic_name, message):\n msg = {\n 'op': 'publish',\n 'topic': topic_name,\n 'msg': message\n }\n json_msg = json.dumps(msg)\n self.ws.send(json_msg)", "def publish( self, topic, data, qos = 1, retain = False ):\n logging.info( \"Publishing to topic %s\" %topic )\n self.client.publish( topic, data, qos = qos, retain = retain )", "def publish(self, message):\n logger.info(\"Publishing to topic [{0}]: {1}\".format(self._topic_name, message))\n self._executor.send(json.dumps({\n 'op': 'publish',\n 'id': 'publish:{0}:{1}'.format(self._topic_name, self._id),\n 'topic': self._topic_name,\n 'msg': message\n }))", "def test_publish1(self):\n publish = self.queue.publish(TEST_QUEUE, 'this is a test msg')\n assert publish", "def process(self, message):\n if self.debug:\n self.log(\"Publishing: \" + str(message.data))\n self.channel.basic.publish(\n AMQPMessage(str(message.data)),\n self.exchange, self.routing_key)", "def process(self, message):\n if self.debug:\n self.log(\"Publishing: \" + str(message.data))\n self.channel.basic.publish(\n AMQPMessage(str(message.data)),\n self.exchange, self.routing_key)", "def publish(self, message: str) -> None:", "def test_message_queue_preserves_time_data(self):\n today = date.today()\n now = datetime.now()\n body = {'event_name': 'job.created', 'date': today, 'timestamp': now}\n unbound_message = SQSMessage(self.schema, body=body)\n\n queue_message = self.create_message(json.dumps(unbound_message.body))\n\n message = SQSMessage(self.schema, message=queue_message)\n\n assert isinstance(message, SQSMessage)\n assert message.body['event_name'] == 'job.created'\n assert isinstance(message.body['date'], date)\n assert isinstance(message.body['timestamp'], datetime)\n assert message.body['date'] == today\n assert message.body['timestamp'] == now", "def put_message(cls, message):\n rp = cls.get()\n rp.queue_receive.put(message)", "def publish(topic, message):\n if DEBUG:\n print(\"Publish: '\" + message + \"' (topic: '\" + topic + \"')\")\n DATA[\"client\"].publish(topic, message)", "def message(cls, user, message, context):\r\n q.enqueue(new_message_worker, args=(user, message, context), result_ttl=0)\r\n pass", "def enqueue_message(self, item: MessageQueueItem):\n heapq.heappush(self._message_queue, item)", "def publish(self, data, isAsync = True):\n time = now()\n dataWithId = (self.idGenerator.generateId(), data)\n self.messageQueue.setdefault(time, []).append(dataWithId)\n self.notify(time, dataWithId, isAsync)", "def check_and_send_message_to_queue(queue_url, str_message):\n msg_str, msg_sent_timestamp, receipt_handle = lib.get_from_sqs_queue(queue_url, 20, 5)\n\n if not msg_str:\n logger.warning('Unable to retrieve message during this cycle.')\n return \n msg_data = json.loads(msg_str)\n \n msg_ts = float(msg_sent_timestamp) * 0.001\n logger.info('Message from queue: {}'.format(msg_data))\n current_time = time.time()\n\n logger.info('msg ts: {} current ts: {}'.format(msg_ts, current_time))\n\n if (current_time - msg_ts) > 259200:\n logger.info('Message in queue needs to be updated')\n lib.send_message_to_queue(queue_url, str_message)\n lib.delete_message_from_queue(queue_url, receipt_handle) \n else:\n logger.info('Message in queue is still current.')", "def send_message(self, message):\n self.client.queue.put(message)", "def publish(self, name, data, timeout=None):\n\n message = Message(name, data)\n\n if self.encrypted:\n message.encrypt(self.__cipher)\n\n if self.ably.options.use_text_protocol:\n request_body = message.as_json()\n else:\n request_body = message.as_thrift()\n\n path = '/channels/%s/publish' % self.__name\n headers = HttpUtils.default_post_headers(not self.ably.options.use_text_protocol)\n return self.ably.http.post(\n path,\n headers=headers,\n body=request_body,\n timeout=timeout\n ).json()", "async def publish_message(self, body: str, priority: int = None):\n message = Message(body=body.encode('utf-8'), priority=priority, delivery_mode=DeliveryMode.PERSISTENT)\n await self._channel.default_exchange.publish(message, routing_key=self._queue)", "def add_to_queue(self, msg):\n if not self.queue.full():\n self.queue.put(msg)", "def create_queue(self, queue_name='', exclusive=True, queue_size=10,\n message_ttl=60000, overflow_behaviour='drop-head',\n expires=600000):\n args = {\n 'x-max-length': queue_size,\n 'x-overflow': overflow_behaviour,\n 'x-message-ttl': message_ttl,\n 'x-expires': expires\n }\n\n result = self._channel.queue_declare(\n exclusive=exclusive,\n queue=queue_name,\n durable=False,\n auto_delete=True,\n arguments=args)\n queue_name = result.method.queue\n self.logger.debug('Created queue [{}] [size={}, ttl={}]'.format(\n queue_name, queue_size, message_ttl))\n return queue_name", "def publish(self, message: str, message_id: int) -> None:\n payload: str = self._create_payload(message, message_id)\n max_payload_bytes = 268435455\n if size(payload) > max_payload_bytes:\n msg = Message.status_message('Message too large.')\n self.client.queue.put(msg)\n return\n return_value: mqtt.MQTTMessageInfo = self.client.publish(self.client.topic, payload, qos=2)\n if return_value.rc == 0: # Publication successful\n return\n else:\n raise SubscriptionError(f'MQTTMessageInfo error code: {return_value.rc}')", "def publish_message(message: str, broker_ip: str, exchange_name: str, exchange_type: str):\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=broker_ip))\n channel = connection.channel()\n channel.exchange_declare(exchange=exchange_name, exchange_type=exchange_type, durable=True)\n channel.basic_publish(exchange=exchange_name, routing_key='', body=message)\n print(f'Published {message} to the exchange')\n connection.close()", "def message(cls, user, message, context):\n q.enqueue(foo, args=(user, message, context), result_ttl=0)\n pass", "def _put_new_message_in_queue(self, message):\n message_type = message.TYPE_STRING\n self.messages[message_type].put(message)" ]
[ "0.66769063", "0.6397617", "0.627912", "0.61567104", "0.60542965", "0.5991828", "0.5984815", "0.59224325", "0.59119457", "0.5885555", "0.58437407", "0.5831145", "0.5831145", "0.57749146", "0.5731569", "0.572015", "0.57037824", "0.5698528", "0.5670229", "0.56518567", "0.56388974", "0.56305766", "0.56260747", "0.5615777", "0.5605588", "0.5598982", "0.55891937", "0.55742264", "0.55735886", "0.55686784" ]
0.81558275
0
Return key for subscribers list for given queue.
def _ns_subscriptions(self, queue): return self._ns(queue, "consumers")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def key_for_name(name):\n return 'hotqueue:%s' % name", "def QueueId(self):\n\t\treturn self._get_attribute('queueId')", "def get_queue_items(self, queue_name):\n proc = start_proc([\"/usr/bin/sudo\", \"rabbitmqctl\", \"list_queues\"],\n shell=False)\n for line in iter(proc.stdout.readline, \"\"):\n print(\"LIST QUEUES:\" + line)\n m = re.search(r\"%s\\s+([0-9]+)\" % queue_name, line)\n if m:\n return int(m.group(1))\n return None", "def get_queue(self, task_name):\n for name, queue in self.queues.items():\n if task_name in queue:\n return name\n return self.default_queue", "def _ns_queue(self, queue, consumer_id):\n return self._ns(queue, consumer_id, \"messages\")", "def service_bus_queue_endpoint_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def get_rabbit_queue():\n\n return \"metrics_queue\"", "def service_bus_queue_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def service_bus_queue_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def list_queues():\n result = set()\n for s in list(systems.values()):\n for q in list(s[\"queue\"].keys()):\n result.add(q)\n\n return result", "def queue_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"queue_name\")", "def get_queue_num(self, qos_id, queue_id):\n\n q_num = None\n queues = self.qos_dict[qos_id][\"ovsdb:qos-entries\"][0][\"queue-list\"]\n\n # Go through all queues\n for queue in queues:\n cur_queue_id = queue[\"queue-ref\"].split(\"'\")[-2]\n # If we have a match, get the q_num and break\n if cur_queue_id == queue_id:\n q_num = queue[\"queue-number\"]\n break\n\n # queue_id is not found in the qos\n if q_num is None:\n #print(json.dumps(self.qos_dict[qos_id], indent=3))\n raise KeyError\n\n return q_num", "def queue_name(is_parallel):\n return QUEUE_NAMES[int(bool(is_parallel))]", "def queue_job_ids(self):\n return list(self.queue.keys())", "def get_subscription_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.subscription_controller", "def queue_path(self, project, location, queue):\n # This is value is not actually used, but it might be good for debugging.\n return \"projects/{project}/locations/{location}/queues/{queue}\".format(\n project=project, location=location, queue=queue)", "def key( self, mess, args):\n user = mess.getFrom()\n if user in self.users:\n return 'You are already subscribed.'\n else:\n self.users[user] = args\n self.log( '%s subscribed to the broadcast.' % user)\n return 'You are now subscribed.'", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def queue_maker(queue, bucket_name):\n scraper = key_scraper.KaleidoscopeKeyScraper(\n bucket_name=bucket_name,\n queue=queue,\n )\n scraper.add_keys_to_queue()\n\n return None", "def sqs_lookup_url(session, queue_name):\n client = session.client('sqs')\n resp = client.get_queue_url(QueueName=queue_name)\n return resp['QueueUrl']", "def _GetParentKeyFromTag(cls, tag):\n return ndb.Key('FrontendJobList', tag)", "def key(self):\n\n for member in self.members:\n if member.key:\n return member.name" ]
[ "0.6073377", "0.5581305", "0.55699074", "0.5561635", "0.55190897", "0.5469011", "0.540408", "0.5387118", "0.5387118", "0.5283115", "0.5264972", "0.5261164", "0.52112347", "0.51992536", "0.5160349", "0.5156108", "0.51445335", "0.5114686", "0.5114686", "0.5114686", "0.5114686", "0.5114686", "0.5114686", "0.5114686", "0.5114686", "0.5114686", "0.5094159", "0.5080408", "0.5075173", "0.50626355" ]
0.5925365
1
Unsubscribe from message queue and destroy it. Do not call if you want persistent queues or if you access one queue from multiple processes.
def unsubscribe(self): # Unsubscribe self.pyrps.redis.srem(self.pyrps._ns_subscriptions(self.queue), self.consumer_id) # Remove message queue self.pyrps.redis.delete(self.pyrps._ns_queue(self.queue, self.consumer_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def destroy_queue(self):\n response = self.queue.delete()\n if self._is_error_call(response):\n raise RuntimeError('SQS could not delete queue: %s' % response)\n self.queue, self.queue_name = None, None", "def __clear_message_queue(self):\r\n self.__lib.CC_ClearMessageQueue(self.__serno)", "def unsubscribe(self):\r\n self._unregister()", "def remove_queue(self, queue) -> None:\r\n self.receive_queues.remove(queue)", "async def unsubscribe(self):\n LOGGER.info('Subscription removed')\n await self._ros.send(self._unsubscribe_msg)", "def delete_queue(self, queue_name):\n amqp_session = self.__broker.getAmqpSession()\n amqp_session.queue_delete(queue_name)", "def __del__(self):\n self.unsubscribe()", "def del_queue(self, queue_id):\n del self.queue_dict[queue_id]", "def drop_message(self):\n heapq.heappop(self._message_queue)", "def remove_queue(self, queue):\n with self.mutex:\n self.queues.remove(queue)", "def delete_queue(self):\n self.work_queue_client.delete_queue()", "def unsubscribe(self):\n pass # pragma: no cover", "def unlisten(self, prefix: str) -> None:\n assert len(prefix) == 1\n del self.queues[prefix]\n self.logger.info(\"No longer polling for message type: %s\", prefix)", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def _cleanup_method(self, queue_name, ep=None):\n if ep._chan is not None and not ep._chan._queue_auto_delete:\n # only need to delete if AMQP didn't handle it for us already!\n # @TODO this will not work with XOs (future)\n try:\n ch = self.container.node.channel(RecvChannel)\n ch._recv_name = NameTrio(get_sys_name(), \"%s.%s\" % (get_sys_name(), queue_name))\n ch._destroy_queue()\n except TransportError as ex:\n log.warn(\"Cleanup method triggered an error, ignoring: %s\", ex)", "def _queue_delete(self, queue):\n\n queue.delete()", "def message_delete(self):\r\n SlTrace.lg(\"Destroying timed message\", \"message\")\r\n if self.cur_message is not None:\r\n SlTrace.lg(\"Found message to destroy\", \"message\")\r\n self.cur_message.destroy()\r\n self.cur_message = None", "def purge_queue(client, queue):\n channel = client.channel()\n\n channel.queue_declare(queue=queue, durable=True, auto_delete=False)\n channel.queue_purge(queue)\n channel.close()", "def on_close(self):\n self.subscrib.unsubscribe(self.channel)\n self.thread.stop()", "def purge_mailbox(self):\n self._mailbox.clear()", "def unsubscribe(self):\n if self._subscribed and self._connected:\n try:\n msg = self._create_message(strings.UNSUB_MSG)\n self.write(msg)\n except (OSError, KeyError) as ex:\n _LOGGER.error(\n \"PyISY encountered a socket error while writing unsubscribe message to the socket: %s.\",\n ex,\n )\n self._subscribed = False\n self.disconnect()", "def stop_messenger(self):\n if self.connected:\n self.messenger.stop()\n self.connected = False", "def stop(self):\n self.running = False\n with self.lock:\n self.websockets.clear()\n self.poller.release()", "def purge(self):\n self._rpc(specification.Queue.Purge())", "def unregister(self):\n self._executor.unregister_publisher(self)", "def delete_queue(client, vhost, queue):\n client.delete_queue(vhost, queue)" ]
[ "0.7262633", "0.6581309", "0.6575992", "0.6442243", "0.6403433", "0.63751954", "0.63708746", "0.63472867", "0.6332669", "0.6327779", "0.6296495", "0.6264714", "0.6237197", "0.61690885", "0.61690885", "0.61690885", "0.61690885", "0.61690885", "0.61083573", "0.6106147", "0.6098599", "0.6095181", "0.6090346", "0.6051243", "0.6048008", "0.5994001", "0.5989376", "0.59489375", "0.5945131", "0.5913578" ]
0.8235168
0
Instead of rendering each wall block, we create a single shape which can be drawn in a single call, rather than a call for each wall block
def create_wall_shape(self): self.shape_walls = arcade.ShapeElementList() self.shape_walls.center_x = 0 self.shape_walls.center_y = 0 self.shape_walls.angle = 0 point_list = [] color_list = [] # create the walls into a single shape walls = self.game.walls for wall in walls: points = self.get_entity_dimensions(wall) point_list.append(points[0]) point_list.append(points[1]) point_list.append(points[2]) point_list.append(points[3]) # as we have 4 points for i in range(4): color_list.append(COLOUR_MAP[wall.base_colour]) self.shape_walls.append( arcade.create_rectangles_filled_with_colors(point_list, color_list) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_walls(self):\n for x in range(self.width):\n self.add_thing(Wall(), (x, 0))\n self.add_thing(Wall(), (x, self.height - 1))\n\n for y in range(self.height):\n self.add_thing(Wall(), (0, y))\n self.add_thing(Wall(), (self.width - 1, y))", "def _draw_walls(self, draw_grid):\n for yi, y in enumerate(self._grid):\n for xi, x in enumerate(y):\n for i, w in enumerate(x.walls):\n if i == 0 and w:\n draw_grid[yi * 2 + 1][xi * 2] = self._wall_color\n if i == 1 and w:\n draw_grid[yi * 2 + 1][xi * 2 + 2] = self._wall_color\n if i == 2 and w:\n draw_grid[yi * 2][xi * 2 + 1] = self._wall_color\n if i == 3 and w:\n draw_grid[yi * 2 + 2][xi * 2 + 1] = self._wall_color\n return draw_grid", "def add_walls(self):\n for x in range(self.width + 1):\n if not self.some_things_at((x, 0), Wall):\n self.add_thing(Wall(), (x, 0))\n if not self.some_things_at((x, self.height), Wall):\n self.add_thing(Wall(), (x, self.height))\n\n for y in range(self.height + 1):\n if not self.some_things_at((0, y), Wall):\n self.add_thing(Wall(), (0, y))\n if not self.some_things_at((self.width, y), Wall):\n self.add_thing(Wall(), (self.width, y))\n #self.add_thing(Wumpus(),(1,3))\n #self.add_thing(Pit(),(3,3))\n #self.add_thing(Pit(),(3,1))\n #self.add_thing(Gold(),(2,3))\n #self.add_thing(Pit(),(4,4))", "def build_blocks():\n block_1 = GRect(375, 80, x=20, y=330)\n block_1.filled = True\n block_1.color = 'firebrick'\n block_1.fill_color = 'firebrick'\n window.add(block_1)\n block_2 = GRect(375, 80, x=405, y=330)\n block_2.filled = True\n block_2.color = 'steelblue'\n block_2.fill_color = 'steelblue'\n window.add(block_2)\n block_3 = GRect(375, 80, x=20, y=420)\n block_3.filled = True\n block_3.color = 'goldenrod'\n block_3.fill_color = 'goldenrod'\n window.add(block_3)\n block_4 = GRect(375, 80, x=405, y=420)\n block_4.filled = True\n block_4.color = 'forestgreen'\n block_4.fill_color = 'forestgreen'\n window.add(block_4)\n block_5 = GRect(60, 40, x=720, y=120)\n block_5.filled = True\n block_5.color = 'dodgerblue'\n block_5.fill_color = 'dodgerblue'\n window.add(block_5)\n circle_1 = GOval(90, 90, x=20, y=170)\n circle_1.filled = True\n circle_1.color = 'blueviolet'\n circle_1.fill_color = 'blueviolet'\n window.add(circle_1)", "def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)", "def build_wall(self, type, pos1, pos2, thickness=1):\n raise NotImplementedError", "def make_boundary_wall(self, height, width) -> None:\n for x in range(0, width):\n Wall(self, x, 0)\n Wall(self, x, height - 1)\n for y in range(1, height - 1):\n Wall(self, 0, y)\n Wall(self, width - 1, y)", "def corridor(x,z, emap, width=10, length=10, height=10, details=None, walls=\"ns\", name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n solid_objects = []\r\n\r\n if \"n\" in walls:\r\n # TODO: abstract out the mostly-duplicate code in these cases...\r\n nwall = SolidObject(name+str(wallnum),\r\n Size(length, height, 1),\r\n Position(x, emap.calcHeight(x, z) + height / 2, n-0.5), 0)\r\n solid_objects.append(nwall)\r\n nwallmodel = createMyCuboid(nwall.w() * 2, nwall.h() * 2, nwall.d() * 2,\r\n name=name+str(wallnum),\r\n x=nwall.x(),y=nwall.y(),z=nwall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(nwallmodel)\r\n else:\r\n nwall.setmodel(nwallmodel, details)\r\n\r\n\r\n wallnum += 1\r\n\r\n if \"s\" in walls:\r\n swall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, emap.calcHeight(x, z)+height / 2, s+0.5), 0)\r\n solid_objects.append(swall)\r\n swallmodel = createMyCuboid(swall.w()*2, swall.h()*2, swall.d()*2,\r\n name=name+str(wallnum),\r\n x=swall.x(), y=swall.y(), z=swall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0,cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(swallmodel)\r\n else:\r\n swall.setmodel(swallmodel, details)\r\n\r\n wallnum += 1\r\n\r\n if \"e\" in walls:\r\n ewall = SolidObject(name+str(wallnum), Size(1, height, width), Position(e-0.5, emap.calcHeight(x, z)+height / 2, z), 0)\r\n solid_objects.append(ewall)\r\n ewallmodel = createMyCuboid(ewall.w()*2, ewall.h()*2, ewall.d()*2,\r\n name=name+str(wallnum),\r\n x=ewall.x(), y=ewall.y(), z=ewall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(ewallmodel)\r\n else:\r\n ewall.setmodel(ewallmodel, details)\r\n\r\n wallnum += 1\r\n\r\n if \"w\" in walls:\r\n wwall = SolidObject(name+str(wallnum), Size(1, height, width), Position(w+0.5, emap.calcHeight(x, z)+height / 2, z), 0)\r\n solid_objects.append(wwall)\r\n wwallmodel = createMyCuboid(wwall.w()*2, wwall.h()*2, wwall.d()*2,\r\n name=name+str(wallnum),\r\n x=wwall.x(), y=wwall.y(), z=wwall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(wwallmodel)\r\n else:\r\n wwall.setmodel(wwallmodel, details)\r\n wallnum += 1\r\n\r\n if \"o\" not in walls:\r\n ceiling = SolidObject(name+str(wallnum), Size(length, 1, width), Position(x, emap.calcHeight(x, z)+height+0.5, z), 0)\r\n solid_objects.append(ceiling)\r\n ceilingmodel = createMyCuboid(ceiling.w()*2, ceiling.h()*2, ceiling.d()*2,\r\n name=name+str(wallnum),\r\n x=ceiling.x(), y=ceiling.y(), z=ceiling.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(ceilingmodel)\r\n else:\r\n ceiling.setmodel(ceilingmodel, details)\r\n\r\n wallnum += 1\r\n\r\n return solid_objects", "def draw_long_shape():\n turtle.fillcolor('blue')\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.back(150)", "def __init__(self, mapfile, xpos, zpos, emap, width=10.0, depth=10.0, height=10.0, name=\"building\", draw_details=None, yoff=0.0, scheme=None):\r\n self.xpos = xpos\r\n self.zpos = zpos\r\n self.width = width\r\n self.depth = depth\r\n self.height = height\r\n self.name = name\r\n self.ceilingthickness = 1.0\r\n self.walls = []\r\n\r\n if scheme == None:\r\n self.scheme = Building.baseScheme\r\n else:\r\n self.scheme = scheme\r\n\r\n # We don't have to be rigorous here, this should only be a draw_details or an iterable of draw_details.\r\n if hasattr(draw_details, \"__getitem__\") or hasattr(draw_details, \"__iter__\"):\r\n assert (len(draw_details) == self.scheme[\"#models\"])\r\n self.details = draw_details\r\n else:\r\n self.details = [draw_details for x in range(self.scheme[\"#models\"])]\r\n # having a method like this allows draw details to be set later\r\n\r\n self.yoff = yoff\r\n\r\n self.model = [MergeShape(name=name+\".\"+str(x)) for x in range(self.scheme[\"#models\"])]\r\n\r\n if mapfile[0] != '/':\r\n mapfile = sys.path[0] + '/' + mapfile\r\n print(\"Loading building map ...\", mapfile)\r\n\r\n im = Image.open(mapfile)\r\n im = ImageOps.invert(im)\r\n ix,iy = im.size\r\n\r\n print(\"image size\", ix, \",\", iy)\r\n\r\n startx = xpos - ix / 2 * width\r\n starty = zpos - ix / 2 * depth\r\n\r\n yoff += emap.calcHeight(-xpos,-zpos)\r\n\r\n if not im.mode == \"P\":\r\n im = im.convert('P', palette=Image.ADAPTIVE)\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\r\n pixels = im.load()\r\n\r\n for y in range(1,iy-1):\r\n print(\".\", end='')\r\n for x in range(1,ix-1):\r\n colour = pixels[x,y]\r\n\r\n if x == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y], \"edge\"), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y]), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if x == ix-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y], \"edge\"), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y]), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1], \"edge\"), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1]), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == iy-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x, y+1], \"edge\"), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y+1]), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self._executeScheme(x, y, startx, starty, (colour, None), wallfunc=None, ceilingedgefunc=None, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self.set_draw_details(self.details) # after models created otherwise\r\n # details lost by merging\r", "def create_outer_walls(space,width,height):\n static_lines = [pymunk.Segment(space.static_body, (0.0, 0.0), (width, 0.0), 0.0),\n pymunk.Segment(space.static_body, (width, 0.0), (width, height), 0.0),\n pymunk.Segment(space.static_body, (width, height), (0.0, height), 0.0),\n pymunk.Segment(space.static_body, (0.0, 600.0), (0.0, 0.0), 0.0)]\n for line in static_lines:\n line.friction = 0.5\n line.elasticity = 0.9\n\n return static_lines", "def create(self):\n\t\t# Pick a random starting position not on the parameter\n\t\tx = random.randint(1, self.width - 2)\n\t\ty = random.randint(1, self.height - 2)\n\n\t\t# Set node as floor and adjacent nodes as walls\n\t\tself.setFloor(x, y)\n\t\tself.setWall(x - 1, y)\n\t\tself.setWall(x + 1, y)\n\t\tself.setWall(x, y - 1)\n\t\tself.setWall(x, y + 1)\n\n\t\t# Create list of wall positions\n\t\tself._walls = []\n\t\tself._walls.append((x - 1, y))\n\t\tself._walls.append((x + 1, y))\n\t\tself._walls.append((x, y - 1))\n\t\tself._walls.append((x, y + 1))\n\t\t\n\t\twhile self._walls:\n\t\t\t# Pick random wall position\n\t\t\tx, y = random.choice(self._walls)\n\n\t\t\t# Check if this node divides an empty node and a floor node\n\t\t\tif (x > 0 and x < self.width - 1) and (y > 0 and y < self.height - 1):\n\t\t\t\tif ((self._isEmpty(x - 1, y) and self.isFloor(x + 1, y))\n\t\t\t\tor (self._isEmpty(x + 1, y) and self.isFloor(x - 1, y))\n\t\t\t\tor (self._isEmpty(x, y - 1) and self.isFloor(x, y + 1))\n\t\t\t\tor (self._isEmpty(x, y + 1) and self.isFloor(x, y - 1))):\n\t\t\t\t\t# Check there are less than 2 adjacent floor nodes\n\t\t\t\t\tif self.countAdjacentFloorNodes(x, y) < 2:\n\t\t\t\t\t\t# Set current node as a floor\n\t\t\t\t\t\tself.setFloor(x, y)\n\n\t\t\t\t\t\t# Set adjacent empty tiles to walls and add to list of wall positions\n\t\t\t\t\t\tif x > 0:\n\t\t\t\t\t\t\tself._makeWall(x - 1, y)\n\t\t\t\t\t\tif x < self.width - 1:\n\t\t\t\t\t\t\tself._makeWall(x + 1, y)\n\t\t\t\t\t\tif y > 0:\n\t\t\t\t\t\t\tself._makeWall(x, y - 1)\n\t\t\t\t\t\tif y < self.height - 1:\n\t\t\t\t\t\t\tself._makeWall(x, y + 1)\n\n\t\t\t# Remove the current position from the list of wall positions\n\t\t\tfor wall in self._walls:\n\t\t\t\tif (wall[0] == x and wall[1] == y):\n\t\t\t\t\tself._walls.remove(wall)\n\t\t\n\t\t# Fill in any empty nodes as walls\n\t\tfor y in range(self.height):\n\t\t\tfor x in range(self.width):\n\t\t\t\tif self._isEmpty(x, y):\n\t\t\t\t\tself.setWall(x, y)", "def draw(self):\n if self.master != None :\n fill = Cell.FILLED_COLOR_BG\n outline = Cell.FILLED_COLOR_BORDER\n\n if not self.fill:\n fill = Cell.EMPTY_COLOR_BG\n outline = Cell.EMPTY_COLOR_BORDER\n walls[self.ord][self.abs] = 0\n else:\n walls[self.ord][self.abs] = 1\n\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = fill, outline = outline)", "def build_wall(self): #py:UR.build_wall\n RUR._UR.build_wall_(self.body)", "def draw_block():\n turtle.down()\n turtle.begin_fill()\n turtle.pensize(3)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.end_fill()\n turtle.up()", "def empty_diff_walls():\n\t# 4 side walls are absorptive\n\troom_materials = [pra.Material(energy_absorption=0.1, scattering=None)] * 4\n\t# floor and ceiling are reflective\n\troom_materials.extend([pra.Material(energy_absorption=0.98, scattering=None)] * 2)\n\t\n\troom_faces = make_polygon(\n\t\tcentre=[0,0,2.5],\n\t\tradius=10,\n\t\theight=5,\n\t\tN=4,\n\t\trpy=[0,0,np.pi/4]\n\t)\n\n\t# create room\n\twalls = []\n\twalls.extend(create_walls(room_faces, room_materials))\n\n\troom = pra.Room(walls, fs=fs, max_order=3, ray_tracing=False, air_absorption=False)\n\n\troom.add_source([-5, 2, 2.])\n\troom.add_microphone([1, 0, 2.])\n\n\t# compute rir\n\troom.image_source_model()\n\troom.compute_rir()\n\n\treturn room", "def draw_block_element(self, cr, x, y):\n cr.rectangle(\n self.wall_width+x*self.block_size, \n (self.block_height-y-1)*self.block_size, \n self.block_size, self.block_size\n )\n \n cr.set_source_rgb(0.2, 0.25, 0.5)\n cr.fill_preserve()\n\n cr.set_source_rgb(0.8,0.8,0.8)\n cr.set_line_width(self.block_size/10)\n cr.stroke()", "def draw_house_walls(x, y, width, height):\n print('Типа рисую стены...', x, y, width, height)", "def circlePrimitive(self):\n\n def drawWireframe(self):\n\n # Draw line\n glFT.glBegin(OpenMayaRender.MGL_LINE_STRIP)\n\n # Get Color\n # self.getColor(\"edge\")\n\n def_circle = 100\n for idx in range(def_circle + 1):\n\n theta = 2 * 3.141592 * idx / def_circle\n x = self.shape_size * math.cos(theta)\n z = self.shape_size * math.sin(theta)\n\n point = OpenMaya.MVector(x, 0.0, z)\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n \n def drawShaded(self):\n\n # Draw quad\n glFT.glBegin(OpenMayaRender.MGL_POLYGON)\n\n # Get Color\n # self.getColor(\"polygon\")\n\n def_circle = 100\n for idx in range(def_circle + 1):\n\n theta = 2 * 3.141592 * idx / def_circle\n x = self.shape_size * math.cos(theta)\n z = self.shape_size * math.sin(theta)\n\n point = OpenMaya.MVector(x, 0.0, z)\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n glFT.glNewList(self.vList_id, OpenMayaRender.MGL_COMPILE)\n\n # Draw lines\n if self.draw_type == 0 or self.draw_type == 2:\n drawWireframe(self)\n \n # Draww Polygons\n if self.draw_type == 1 or self.draw_type == 2:\n drawShaded(self)\n\n glFT.glEndList()", "def vizualize_wall(self):\n\n\t\t#Points are converted from polar to cartesian here\n\t\tpoint1 = Point()\n\t\t#(-math.pi/4) represents the 45 degree rotation of the front point\n\t\t#from the front of the robot\n\t\tpoint1.x = math.cos((-math.pi/4))*self.front_point\n\t\tpoint1.y = math.sin((-math.pi/4))*self.front_point\n\t\tpoint2 = Point()\n\t\t#(-3*math.pi/4) represents the back point's 90 degree rotaion from\n\t\t#the front point\n\t\tpoint2.x = math.cos((-3*math.pi/4))*self.back_point\n\t\tpoint2.y = math.sin((-3*math.pi/4))*self.back_point\n\t\tmy_marker = Marker(type=Marker.LINE_STRIP)\n\t\tmy_marker.header.frame_id = \"base_link\"\n\t\tmy_marker.color.a = 1\n\t\tmy_marker.scale.x = .1\n\t\tmy_marker.points = [point1, point2]\n\t\tself.visualizer.publish(my_marker)", "def render_wall(win, color, direction, pos):\n x, y = pos\n\n if direction == 'S':\n width = CELL_SIZE\n height = BORDER\n x = x*CELL_SIZE\n y = (y+1)*CELL_SIZE\n\n elif direction == 'E':\n width = BORDER\n height = CELL_SIZE\n x = (x+1)*CELL_SIZE\n y = y*CELL_SIZE\n\n pygame.draw.rect(win, color, (x, y, width, height))", "def south_wall(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n swall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, y+height / 2, s), 0)\r\n self.walls.append(swall)\r\n model = Plane(w=swall.w()*2, h=swall.h()*2, name=name+str(wallnum))\r\n mergeshape.add(model, swall.x(),swall.y(),swall.z(), rx=0.0,ry=0.0,rz=0.0)\r\n\r\n wallnum += 1", "def squarePrimitive(self):\n\n def drawWireframe(self):\n\n # Draw line\n glFT.glBegin(OpenMayaRender.MGL_LINE_STRIP)\n\n # Get Color\n # self.getColor(\"edge\")\n\n for edges in SQUARE[\"EDGES\"]:\n for edge in edges:\n\n point = OpenMaya.MVector(edge[0], edge[1], edge[2])\n point *= self.shape_size\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n \n \n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n def drawShaded(self):\n\n # Draw quad\n glFT.glBegin(OpenMayaRender.MGL_POLYGON)\n\n # Get Color\n # self.getColor(\"polygon\")\n\n for polygons in SQUARE[\"POLYGONS\"]:\n for polygon in polygons:\n\n point = OpenMaya.MVector(polygon[0], polygon[1], polygon[2])\n point *= self.shape_size\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n glFT.glNewList(self.vList_id, OpenMayaRender.MGL_COMPILE)\n\n # Draw lines\n if self.draw_type == 0 or self.draw_type == 2:\n drawWireframe(self)\n \n # Draww Polygons\n if self.draw_type == 1 or self.draw_type == 2:\n drawShaded(self)\n\n glFT.glEndList()", "def add_to_default_batch(self):\n\n '''\n self.shape = shared.batch.add(4, gl.GL_QUADS, None,\n ('v2f', (self.x, self.y,\n self.x + self.width, self.y,\n self.x + self.width, self.y + self.height,\n self.x, self.y + self.height)))\n \n numPoints = 50\n verts = []\n for i in range(numPoints):\n angle = math.radians(float(i)/numPoints * 360.0)\n x = self.radius*cos(angle) + self.x\n y = self.radius*sin(angle) + self.y\n verts += [int(x),int(y)]\n \n '''\n data = create_circle(self.x, self.y, self.radius, shared.batch)\n\n self.shape = shared.batch.add_indexed(data[0], data[1], data[2], data[3], data[4], data[5])\n\n #self.shape = shared.batch.add(numPoints, gl.GL_POLYGON, None,\n # ('v2f', verts))", "def west_wall(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n wwall = SolidObject(name+str(wallnum), Size(1, height, width), Position(w, y+height / 2, z), 0)\r\n self.walls.append(wwall)\r\n model = Plane(w=wwall.d()*2, h=wwall.h()*2, name=name+str(wallnum))\r\n mergeshape.add(model, wwall.x(),wwall.y(),wwall.z(),rx=0.0,ry=90.0,rz=0.0)\r\n\r\n wallnum += 1", "def roof(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None, makeroof=True, makeceiling=True):\r\n global wallnum\r\n\r\n roof = SolidObject(name+str(wallnum), Size(length, 1, width), Position(x, y+height+self.ceilingthickness / 2, z), 0)\r\n self.walls.append(roof)\r\n roofmodel = Plane(w=length, h=width, name=name+str(wallnum))\r\n mergeshape.add(roofmodel,x,y+height+self.ceilingthickness,z,rx=90.0,ry=0.0,rz=0.0)\r\n\r\n wallnum += 1", "def create_block():\n global BLOCK\n posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE)\n posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE)\n BLOCK = c.create_oval(posx, posy,\n posx+SEG_SIZE, posy+SEG_SIZE,\n fill=\"red\")\n # print(posx, posy)\n return posx, posy", "def build_wall(): #py:build_wall\n RUR._build_wall_()", "def north_wall(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n nwall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, y + height / 2, n), 0)\r\n self.walls.append(nwall)\r\n model = Plane(w=nwall.w()*2, h=nwall.h()*2, name=name+str(wallnum))\r\n mergeshape.add(model, nwall.x(), nwall.y(), nwall.z())\r\n\r\n\r\n wallnum += 1", "def _draw_blocks(self):\n\t\tsurface = pygame.display.get_surface()\n\t\tcolors = {\"J\": (15, 105, 245), \"I\": (85, 235, 255), \n\t\t\t\t \"L\":(255, 170, 0), \"S\": (45, 255, 55), \"Z\": (255, 4, 0),\n\t\t\t\t \"O\": (238, 255, 0), \"T\": (245, 0, 255)}\n\t\ty = math.floor((self.window_height - (self.window_height*0.9))/2)\n\t\tx = math.floor((self.window_width - ((self.window_height*0.9)/20)*10)/2)\n\t\tincrement = math.floor((self.window_height*0.9)/20)\n\t\t# loops through board and draws to the correct spot\n\t\tfor i in range(4, len(self.gameboard.get_board())):\n\t\t\tfor j in range(len(self.gameboard.get_board()[i])):\n\t\t\t\tx_incremented = math.floor(x + (increment * j))\n\t\t\t\ty_incremented = math.floor(y + (increment * (i-4)))\n\t\t\t\tif self.gameboard.get_board()[i][j][0] in colors:\n\t\t\t\t\tpygame.draw.rect(surface, colors[self.gameboard.get_board()[i][j][0]],\n\t\t\t\t\t\t\t\t\t(x_incremented, y_incremented, increment, increment))\n\t\t\t\t\t\t\t\t\t# x, y, x_wid, y_len\n\t\t\t\telse:\n\t\t\t\t\tpygame.draw.rect(surface, (0,0,0),\n\t\t\t\t\t\t\t\t\t(x_incremented, y_incremented, increment, increment))" ]
[ "0.6985171", "0.6947199", "0.689782", "0.6734781", "0.6710795", "0.67082715", "0.66530377", "0.6573703", "0.6563552", "0.64633447", "0.64461184", "0.6377385", "0.63731766", "0.63545173", "0.6318463", "0.63116866", "0.6260402", "0.62379223", "0.6211161", "0.61905986", "0.61559576", "0.6143857", "0.6141515", "0.61299866", "0.61172783", "0.61165136", "0.60922706", "0.6076112", "0.60575217", "0.6042645" ]
0.7463419
0
Create/Update the sprite shape for an entity and add/update the entry for it in `self.entities_shapelist`
def update_shape_sprite(self, entity: Entity): shape_sprite: ShapeSprite = entity.shape_sprite if entity.id not in self.entities_shapelist: entity_shapelist = arcade.ShapeElementList() # we need to convert from general colours to arcade specific colours entity_shapelist.append(arcade.create_rectangles_filled_with_colors( shape_sprite.point_list, [COLOUR_MAP[x] for x in shape_sprite.color_list]) ) else: entity_shapelist = self.entities_shapelist[entity.id] entity_shapelist.center_x = shape_sprite.position_x entity_shapelist.center_y = SCREEN_HEIGHT - shape_sprite.position_y entity_shapelist.draw() self.entities_shapelist[entity.id] = entity_shapelist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_entity(self, entity: Entity):\n \n if entity.shape_sprite:\n return self.update_shape_sprite(entity)\n \n left = (entity.x - entity.half_width)\n right = (entity.x + entity.half_width)\n # because arcade 0 on y is the bottom of the screen not the top\n bottom = abs((entity.y + entity.half_height) - SCREEN_HEIGHT)\n # bottom = entity.y - entity.half_height - SCREEN_HEIGHT\n top = abs((entity.y - entity.half_height) - SCREEN_HEIGHT)\n # top = entity.y + entity.half_height - SCREEN_HEIGHT\n \n arcade.draw_lrtb_rectangle_filled(\n left = left,\n right = right,\n bottom = bottom,\n top = top,\n color = COLOUR_MAP[entity.base_colour],\n )", "def create_sprite(self, pos):\n group = pyglet.sprite.SpriteGroup(\n self.TEXTURE, gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA\n )\n texcoords = []\n for i in xrange(self.length + 1):\n texcoords.extend([\n self.TEXTURE.tex_coords[0], i,\n self.TEXTURE.tex_coords[3], i,\n ])\n count = 2 * (self.length + 1)\n verts = [0, 0] * count # set vertices later from body\n self.vlist = batch.add(\n count, gl.GL_TRIANGLE_STRIP, group,\n ('v2f', verts),\n ('t2f', texcoords)\n )", "def shapes(self, shape_list):\n for item in shape_list:\n item.store()\n shape_list_uuids = [item.uuid for item in shape_list]\n self.set_attribute('shapes', shape_list_uuids)", "def add_entity(self, ent):\n self.tiles[ent.position[x]][ent.position[y]].add_entity(ent)", "def __init__(self, entities):\n self._shape_to_ent = dict()\n self._ent_to_shapes = dict()\n for entity in entities:\n shapes = entity.shapes\n self._ent_to_shapes[entity] = shapes\n for shape in shapes:\n assert shape not in self._shape_to_ent, \\\n f\"shape {shape} appears in {entity} and \" \\\n f\"{self._shape_to_ent[shape]}\"\n self._shape_to_ent[shape] = entity", "def shapes(self, shapes):\n\n self.container['shapes'] = shapes", "def add_shape(self, spec):\n color_, shape_ = spec\n if shape_ is None:\n shape_ = self.random_shape()\n if color_ is None:\n color_ = self.random_color()\n x = shape.rand_pos()\n y = shape.rand_pos()\n return shape.SHAPE_IMPLS[shape_](x=x, y=y, color_=color_)", "def place_entity(entity, base, x, y):\n \n img = entity.copy().convert(\"RGBA\")\n\n # Get random angle for placement\n angle = random.randint(-ROTATION_RATE, ROTATION_RATE)\n img = img.rotate(angle, expand=1)\n\n # Placement\n base.paste(img, (x, y), img)", "def update_shape_vaos(self, instance, show):\n shape = self._shape(instance)\n\n shape_object_id = id(shape)\n if not shape_object_id in self._shape_vaos:\n self._shape_vaos[shape_object_id] = VertexArray({\n 'vertex_position': VertexBuffer.from_numpy(shape.verticies),\n 'texture_coords': VertexBuffer.from_numpy(shape.texture_coords),\n }, self.program.attributes)", "def process_spawned_event(self, event):\n self.sprites[event.id] = [event.point, event.sprite]\n self.img[event.point.y, event.point.x] = self.sprite_colors[event.sprite]", "def add_to_default_batch(self):\n\n '''\n self.shape = shared.batch.add(4, gl.GL_QUADS, None,\n ('v2f', (self.x, self.y,\n self.x + self.width, self.y,\n self.x + self.width, self.y + self.height,\n self.x, self.y + self.height)))\n \n numPoints = 50\n verts = []\n for i in range(numPoints):\n angle = math.radians(float(i)/numPoints * 360.0)\n x = self.radius*cos(angle) + self.x\n y = self.radius*sin(angle) + self.y\n verts += [int(x),int(y)]\n \n '''\n data = create_circle(self.x, self.y, self.radius, shared.batch)\n\n self.shape = shared.batch.add_indexed(data[0], data[1], data[2], data[3], data[4], data[5])\n\n #self.shape = shared.batch.add(numPoints, gl.GL_POLYGON, None,\n # ('v2f', verts))", "def draw(self, shape):\n shape.draw(shader=self.shader)", "def _add_full_entity(self, entity):\n marked_id = utils.get_peer_id(\n utils.get_input_peer(entity, allow_self=False), add_mark=True\n )\n try:\n old_entity = self._entities[marked_id]\n old_entity.__dict__.update(entity.__dict__) # Keep old references\n\n # Update must delete old username and phone\n username = getattr(old_entity, 'username', None)\n if username:\n del self._username_id[username.lower()]\n\n phone = getattr(old_entity, 'phone', None)\n if phone:\n del self._phone_id[phone]\n except KeyError:\n # Add new entity\n self._entities[marked_id] = entity\n\n # Always update username or phone if any\n username = getattr(entity, 'username', None)\n if username:\n self._username_id[username.lower()] = marked_id\n\n phone = getattr(entity, 'phone', None)\n if phone:\n self._username_id[phone] = marked_id", "def __init__(self, size:Point, **kwargs):\n PhysicsEntity.__init__(self, **kwargs)\n self.size = size\n self.collision_shape = to_collision_rect(self.size)", "def set_shape(self, shape):\n self._shape = self._shape.merge_with(shape)", "def update_counter(cls, value):\n SFFShape.shape_id = value", "def __init__(self,canvas=None,spritePath=defaultSpritePath,hitboxRadius=0,xPos=0,yPos=0):\n global entityCounter\n global registeredEntities\n self.ID=entityCounter\n entityCounter+=1\n registeredEntities[self.ID]=self \n \n #these variables deal with position and motion\n self.xPos=xPos #the x postion of the entity\n self.yPos=yPos #the y position of the entity\n self.xMomentum=0.0\n self.yMomentum=0.0\n self.faceHeading=0.0\n \n self.hitboxRadius=hitboxRadius\n \n #these variabls and other junk deal with drawing the sprite onscreen\n self.spritePath=spritePath#the path to the image that this entity instance will use\n self.spriteImageFile=(Image.open(self.spritePath)) #the image file that we'll manipulate mainly when doing rotations\n self.spriteImage = ImageTk.PhotoImage(self.spriteImageFile.rotate(self.faceHeading,expand=True)) #the thing that tkinter uses as an image to draw on a canvas\n\n #theres two spriteImage variables because of the weird way that you basically have to reload the image if you want to rotate it. its a weirdity with tkinter\n self.spriteOnCanvas=None #the variable that holds a refrence to the actual drawn-on-screen thingy that's actually on the canvas\n self.canvasIGetDrawnOn=None #the canvas that this instance of the entitiy class will have its sprite drawn on\n self.canvasIGetDrawnOnsWidth=0\n self.canvasIGetDrawnOnsHeight=0\n if (canvas!=None):\n self.setCanvas(canvas)\n \n #these variables deal with motion and rotation due to player interaction\n self.isAcceleratingForward=False", "def __init__(self, shape, ssize, pos=None):\n super(Obstacle, self).__init__()\n self.pos = pos or Vec2d(0, 0)\n self.shape = shape\n # image\n self.image = pygame.Surface(ssize).convert_alpha()\n self.color = pygame.Color(\"black\")\n self.ssize = ssize\n self.rect = pygame.Rect((0, 0), self.ssize)", "def draw_item(self):\r\n self.screen.blit(self.spawned_item, self.rect)", "def Place(self, ref, scent):\n coords = self.Map.layerSolid.GetXYByRef(ref)\n self[coords] = Pheromone(scent, coords)", "def add_row(self, shape, attributes=[]):\n if isinstance(shape, shapefile.shapefile._Shape):\n self.shapes.append(shape)\n self.__shapeHolder._shapes.append(shape)\n else:\n if self.shapeType in (1, 8, 11, 21, 25, 31):\n self.__shapeHolder.point(*shape)\n elif self.shapeType in (3, 13, 23):\n addShp = self.__shapeHolder.line(shape)\n else:\n self.__shapeHolder.poly(shape)\n\n self.shapes.append(self.__shapeHolder.shapes()[-1])\n self.records.append(self.addDefaults(attributes))\n self.__isBuilt = False", "def add_sprite(self, segment, name, x, y=0.0):\n sprite = sp.Sprite(name, x, y)\n segment.sprites.append(sprite)", "def redraw_all_shapes(self):\n\n for shape_id in self.variables.shape_ids:\n pixel_coords = self.get_vector_object(shape_id).image_coords\n if pixel_coords:\n new_canvas_coords = self.shape_image_coords_to_canvas_coords(shape_id)\n self.modify_existing_shape_using_canvas_coords(shape_id, new_canvas_coords, update_pixel_coords=False)", "def register_shapes():\n turtle.Screen().register_shape(\"saphire.gif\")\n turtle.Screen().register_shape(\"player_right.gif\")\n turtle.Screen().register_shape(\"player_left.gif\")\n turtle.Screen().register_shape(\"walls.gif\")", "def add_shape(self, shape):\n\n if isinstance(shape, Shape):\n self.shapes.append(shape)\n else:\n raise TypeError", "def create_wall_shape(self):\n self.shape_walls = arcade.ShapeElementList()\n self.shape_walls.center_x = 0\n self.shape_walls.center_y = 0\n self.shape_walls.angle = 0\n\n point_list = []\n color_list = []\n \n # create the walls into a single shape\n walls = self.game.walls\n for wall in walls:\n points = self.get_entity_dimensions(wall)\n point_list.append(points[0])\n point_list.append(points[1])\n point_list.append(points[2])\n point_list.append(points[3])\n \n # as we have 4 points\n for i in range(4):\n color_list.append(COLOUR_MAP[wall.base_colour])\n \n self.shape_walls.append(\n arcade.create_rectangles_filled_with_colors(point_list, color_list)\n )", "def paintShoes(self):\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"bodySize\"], \"shoes\", self.avatarConfiguration[\"shoes\"] + IMG_EXTENSION))\n self.newAvatarImage(imgPath, \"shoes\")", "def assignPointsToShapes(self):\n pointsCopy = self.mission['points'].copy()\n\n while len(pointsCopy):\n shape = []\n self.recursiveAddPointToShape(pointsCopy, [pointsCopy[0]], shape)\n shape.append(shape[0])\n self.mission['shapes'].append(shape)", "def modify_existing_shape_using_canvas_coords(self, shape_id, new_coords, update_pixel_coords=True):\n vector_object = self.get_vector_object(shape_id)\n if vector_object.type == SHAPE_TYPES.POINT:\n point_size = vector_object.point_size\n x1, y1 = (new_coords[0] - point_size), (new_coords[1] - point_size)\n x2, y2 = (new_coords[0] + point_size), (new_coords[1] + point_size)\n canvas_drawing_coords = (x1, y1, x2, y2)\n else:\n canvas_drawing_coords = tuple(new_coords)\n self.coords(shape_id, canvas_drawing_coords)\n if update_pixel_coords:\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, new_coords)", "def SetShape(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_SetShape(self, *args)" ]
[ "0.61834466", "0.58497405", "0.5760513", "0.5680979", "0.55177814", "0.5469176", "0.5436238", "0.54036695", "0.5392178", "0.5378375", "0.53718156", "0.53654325", "0.53457034", "0.5342032", "0.5310451", "0.5253857", "0.5238692", "0.52258265", "0.5207183", "0.52023184", "0.5193018", "0.5178425", "0.5167262", "0.51654625", "0.5156848", "0.51563925", "0.5141927", "0.512304", "0.5112747", "0.5104402" ]
0.82262385
0
Get the pixel positions for positioning a menu in the center of the screen
def get_menu_coords(self, menu): menu_center_x = (self.width // 2) menu_center_y = (self.height // 2) # get a mapping of the menu co-ordinates for relative positioning of things inside the menu menu_cords = ( (menu_center_x - (menu.width // 2), menu_center_y + (menu.height // 2)), (menu_center_x + (menu.width // 2), menu_center_y + (menu.height // 2)), (menu_center_x - (menu.width // 2), menu_center_y - (menu.height // 2)), (menu_center_x + (menu.width // 2), menu_center_y - (menu.height // 2)), ) return menu_center_x, menu_center_y, menu_cords
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getMenuItemPixels(cls):\n return cls.menuItemPixels", "def getCenter(self):\n return [self.tx/self.tw, self.ty/self.tw]", "def screen_coordinates(pos):\n\n return [int((pos[0] % screen_width) / px), screen_height - int((pos[1] % screen_height) / px)]", "def get_pix_pos(self):\r\n return vec((self.grid_pos[0]*self.app.cell_width)+TOP_BOTTOM_BUFFER//2+self.app.cell_width//2,\r\n (self.grid_pos[1]*self.app.cell_height) +\r\n TOP_BOTTOM_BUFFER//2+self.app.cell_height//2)\r\n # where Pac-Man starts relative to the board\r", "def mouse_position(pos):\n x, y = pos\n m = x // SQUARE_SIZE\n n = y // SQUARE_SIZE\n return n, m", "def get_center(self):\n x = round(self.x_pos)\n y = round(self.y_pos)\n return [int(x),int(y)]", "def get_pixel_pos(self):\n\n c = self.get_center()\n\n return Tank.three_by_three(c[0],c[1])", "def _pos(self):\n sw = self.parent.winfo_screenwidth()\n sh = self.parent.winfo_screenheight()\n w = sw * 0.8\n h = sh * 0.8\n x = (sw - w) / 2\n y = (sh - h) / 2\n self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def pos(self):\n x = (self.ec._win._mouse_x -\n self.ec._win.width / 2.) / (self.ec._win.width / 2.)\n y = (self.ec._win._mouse_y -\n self.ec._win.height / 2.) / (self.ec._win.height / 2.)\n return np.array([x, y])", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def center(self):\n return (self.centerx, self.centery)", "def get_pos(self) -> tuple:\n return self.rect.center", "def GetCenter(self):\n ...", "def GetCenter(self):\n ...", "def GetCenter(self):\n ...", "def GetCenter(self):\n ...", "def calculate_screen_position(self):\r\n\r\n character_select_start_y = 604\r\n character_select_end_y = 646\r\n\r\n if self.slotNumber <= 6:\r\n start_y = 585 # 595\r\n end_y = 627 # 637\r\n x_hero_number = self.slotNumber\r\n else:\r\n start_y = 300 # 290\r\n end_y = 342 # 332\r\n x_hero_number = self.slotNumber - 6\r\n\r\n start_x = 249 + (x_hero_number * 192)\r\n end_x = 326 + (x_hero_number * 192)\r\n\r\n self.screenPositionCharacterSelect = {\r\n \"start_x\": start_x,\r\n \"end_x\": end_x,\r\n \"start_y\": character_select_start_y,\r\n \"end_y\": character_select_end_y\r\n }\r\n self.screenPositionTab = {\r\n \"start_x\": start_x,\r\n \"end_x\": end_x,\r\n \"start_y\": start_y,\r\n \"end_y\": end_y\r\n }", "def center(self):\n xc = (self.x.max() + self.x.min())/2.\n yc = (self.y.max() + self.y.min())/2.\n return (xc, yc)", "def get_pos_in_pixels(self):\n pixelpos = Vector(self.pos.x * 32, -self.pos.y * 32)\n return pixelpos + self.offset", "def center(self):\r\n self.centerx = self.screen_rect.centerx \r\n self.centery = self.screen_rect.centery", "def get_pos(self):\n return self.rect.midtop", "def midtop(self):\n return (self.centerx, self.top)", "def get_center_scr(self):\r\n return self.rect.center", "def from_screen_coordinates(pos):\n\n return [float(pos[0]) * px, float(screen_height - pos[1]) * px]", "def get_center_coordinates(game):\n \n return math.ceil(game.height / 2), math.ceil(game.width / 2)", "def calculate_window_position(self):\n self.x = SQUARE_SIZE * self.col + SQUARE_SIZE // 2\n self.y = SQUARE_SIZE * self.row + SQUARE_SIZE // 2", "def center(self):\n # minz to offset the heights to 0\n mz = (self.maxz-self.minz)/2\n #mz = self.minz\n return (self.minx + self.width / 2, self.miny + self.height / 2, mz)", "def get_aa_pos_on_screen(self,position,frame):\n position=position*3+float(frame)-1\n x,y=self.get_base_pos_on_screen(position)\n y=y+20.0+float(frame)*15.0\n return x,y", "def mousePos():\n data = display.Display().screen().root.query_pointer()._data\n return data[\"root_x\"], data[\"root_y\"]" ]
[ "0.668973", "0.6660493", "0.65916294", "0.65874016", "0.65281975", "0.6518283", "0.64817595", "0.64489305", "0.64457977", "0.64295113", "0.64295113", "0.6412496", "0.64010537", "0.6398035", "0.6398035", "0.6398035", "0.6398035", "0.6364081", "0.63268316", "0.63001156", "0.62878335", "0.6257889", "0.62543863", "0.62534195", "0.6240144", "0.6215924", "0.618805", "0.6165505", "0.61486715", "0.6146912" ]
0.74038523
0
Computes logits based on features from the model
def logits_on_features(self, h, batch): batch = batch.to(h.device) # Extract features with the model features = h.view(batch.size, -1) # Log loss logits = self.head(features) return logits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logits(self, model, batch):\n device = list(model.parameters())[0].device\n batch = batch.to(device)\n inputs = batch.inputs\n # Extract features with the model\n features = model(*inputs)\n logits = self.logits_on_features(features, batch)\n return logits", "def logits(self, features: torch.Tensor) -> torch.Tensor:\n return self.temporal_module(features)", "def get_logits(image):\n x = image\n for filters in (32, 64):\n x = tf.layers.conv2d(x, filters, 3)\n x = tf.nn.relu(x)\n x = tf.layers.max_pooling2d(x, 3, 2)\n x = tf.reduce_mean(x, axis=(1, 2))\n logits = tf.layers.dense(x, 10)\n return logits", "def compute_logits(self):\n # [num train labels, num classes] where each row is a one-hot-encoded label.\n one_hot_train_labels = tf.one_hot(self.data.train_labels, self.way)\n\n # Undocumented in the paper, but *very important*: *only* the support set\n # embeddings is L2-normalized, which means that the distance is not exactly\n # a cosine distance. For comparison we also allow for the actual cosine\n # distance to be computed, which is controlled with the\n # `exact_cosine_distance` instance attribute.\n train_embeddings = tf.nn.l2_normalize(\n self.train_embeddings, 1, epsilon=1e-3)\n test_embeddings = self.test_embeddings\n if self.exact_cosine_distance:\n test_embeddings = tf.nn.l2_normalize(test_embeddings, 1, epsilon=1e-3)\n # [num_test_images, num_train_images]\n similarities = tf.matmul(\n test_embeddings, train_embeddings, transpose_b=True)\n attention = tf.nn.softmax(similarities)\n\n # [num_test_images, way]\n probs = tf.matmul(attention, one_hot_train_labels)\n self.test_logits = tf.log(probs)\n return self.test_logits", "def forward(self, features):\n activations = {}\n for index, layer in enumerate(self.layers):\n if index == 0:\n activations[index] = layer(features)\n else:\n activations[index] = layer(activations[index - 1])\n logits = activations[len(activations) - 1]\n return logits", "def forward(self, x):\n clf_tokens_mask = (x.transpose(0, 1).contiguous().to('cpu') == self.tokenizer.vocab['[CLS]'])\n hidden_states = self.transformer(x)\n\n lm_logits = self.lm_head(hidden_states)\n clf_tokens_states = (hidden_states * clf_tokens_mask.unsqueeze(-1).float()).sum(dim=0)\n clf_logits = self.classification_head(clf_tokens_states)\n\n return lm_logits, clf_logits", "def log_reg(x_train, y_train):\n\n log_reg_classifier = LogisticRegression(max_iter=1000, solver='lbfgs')\n log_reg_classifier.fit(x_train, y_train)\n return log_reg_classifier\n\n # log_reg_classifier.fit(x_train, y_train)", "def logistic(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def logistic_regression_model_by_features(xTrain, yTrain, features, iter_step, resolution, initial_w0, step, max_iters):\r\n\r\n model = lgm.LogisticRegressionModel(initial_w0=initial_w0,\r\n initial_weights=[0.0] * len(features))\r\n\r\n # Extend xTrains and xTest with 1 at [0]\r\n xTrain = [[1] + x for x in xTrain]\r\n\r\n for i, iters in enumerate([iter_step] * resolution):\r\n fit_tic = time.time()\r\n model.fit(xTrain, yTrain, iterations=iters, step=step)\r\n fit_toc = time.time() - fit_tic\r\n iter_cnt = iter_step * (i + 1)\r\n print(\"Took {} sec. Fitted data for {} iterations\".format(fit_toc, iter_cnt))\r\n\r\n return model", "def compute_edge_logits(self):", "def __call__(self,logits):\n \n #sample from Gumbel(0, 1)\n uniform = self._srng.uniform(logits.shape,low=0,high=1)\n gumbel = -T.log(-T.log(uniform + self.eps) + self.eps)\n \n #draw a sample from the Gumbel-Softmax distribution\n return T.nnet.softmax((logits + gumbel) / self.temperature)", "def forward(self, x):\n hidden_states = self.transformer(x)\n logits = self.lm_head(hidden_states)\n\n return logits", "def _get_logits(images,\n model_options,\n outputs_to_num_classes,\n weight_decay=0.0001,\n reuse=tf.AUTO_REUSE,\n is_training=False,\n fine_tune_batch_norm=False):\n features, end_points = _extract_features(\n images,\n model_options,\n weight_decay=weight_decay,\n reuse=reuse,\n is_training=is_training,\n fine_tune_batch_norm=fine_tune_batch_norm)\n\n # TODO: CHECK\n DEBUG_VARS.aspp_result = features\n if model_options.decoder_output_stride is not None:\n decoder_height = scale_dimension(model_options.crop_size[0],\n 1.0 / model_options.decoder_output_stride)\n decoder_width = scale_dimension(model_options.crop_size[1],\n 1.0 / model_options.decoder_output_stride)\n features = refine_by_decoder(\n features,\n end_points,\n decoder_height=decoder_height,\n decoder_width=decoder_width,\n decoder_use_separable_conv=model_options.decoder_use_separable_conv,\n model_variant=model_options.model_variant,\n weight_decay=weight_decay,\n reuse=reuse,\n is_training=is_training,\n fine_tune_batch_norm=fine_tune_batch_norm)\n\n outputs_to_logits = {}\n for output in sorted(outputs_to_num_classes):\n outputs_to_logits[output] = _get_branch_logits(\n features,\n outputs_to_num_classes[output],\n model_options.atrous_rates,\n aspp_with_batch_norm=model_options.aspp_with_batch_norm,\n kernel_size=model_options.logits_kernel_size,\n weight_decay=weight_decay,\n reuse=reuse,\n scope_suffix=output)\n\n return outputs_to_logits", "def dnn_logit_fn(features, mode):\n with tf.variable_scope(\n 'input_from_feature_columns',\n values=tuple(six.itervalues(features)),\n partitioner=input_layer_partitioner):\n net = tf.feature_column.input_layer(\n features=features, feature_columns=feature_columns)\n for layer_id, num_hidden_units in enumerate(hidden_units):\n with tf.variable_scope(\n 'hiddenlayer_%d' % layer_id, values=(net,)) as hidden_layer_scope:\n net = tf.layers.dense(\n net,\n units=num_hidden_units,\n activation=activation_fn,\n kernel_initializer=tf.glorot_uniform_initializer(),\n name=hidden_layer_scope)\n if dropout is not None and mode == 'train':\n net = tf.layers.dropout(net, rate=dropout, training=True)\n # _add_hidden_layer_summary(net, hidden_layer_scope.name)\n\n with tf.variable_scope('logits', values=(net,)) as logits_scope:\n logits = tf.layers.dense(\n net,\n units=units,\n activation=None,\n kernel_initializer=tf.glorot_uniform_initializer(),\n name=logits_scope)\n # _add_hidden_layer_summary(logits, logits_scope.name)\n\n return logits", "def forward(self, reps):\n assert reps.shape[-1] == N_DIMS_PER_REP\n logits = torch.zeros(len(reps), N_UNIQUE_FEATS)\n logits[:, feat] = 1\n return logits", "def logits(self):\n return np.array([m['actor'] for m in self.model_outs], dtype=np.float32)", "def forward(self, inputs=None, **kwds):\n\n h = inputs\n h = self.feat_drop(h)\n\n for l in range(self.num_layers-1):\n\n h = self.layers[l](h)\n h = self.activation(h)\n \n logits = self.layers[-1](h)\n\n return logits", "def infer_ensemble_logits(features, model, checkpoints, session, num_steps,\n data):\n _, inferred = model.multi_gpu([features], 1)\n logits = []\n saver = tf.train.Saver()\n for checkpoint in checkpoints:\n saver.restore(session, checkpoint)\n for i in range(num_steps):\n logits.append(\n session.run(\n inferred[0].logits,\n feed_dict={\n features['recons_label']: data[i]['recons_label'],\n features['labels']: data[i]['labels'],\n features['images']: data[i]['images'],\n features['recons_image']: data[i]['recons_image']\n }))\n return logits", "def get_logCRF(train, model):\n word = train[0]\n Y = train[1]\n char_count, _ = word.shape\n # calculating forward messages\n alpha = np.zeros((char_count, model.dimY))\n first_term = np.dot(word, model.getW(model.labels))\n second_term = model._T\n for i in range(1, char_count):\n sum_term = (first_term[i-1] + alpha[i-1]) + second_term\n alpha[i] = np.apply_along_axis(logsumexp_trick, 1, sum_term) \n # getting logZ from messages\n logZ = logsumexp_trick(first_term[char_count-1]+alpha[char_count-1])\n w_term = np.sum(model.getW(Y).transpose() * word) # $\\sum_{j=1}^m {W_{yj} . x_j}$\n t_term = np.sum(model.getT(Y[:-1], Y[1:])) #$T_{yj, yj+1}\n value = -logZ + w_term + t_term\n return value", "def train(self, documents):\n prior_log_prob, label_to_col = self.get_prior_log_probabilities(documents)\n self.my_model[\"vocabulary\"] = make_vocabulary(documents)\n\n # find frequencies of features\n num_classes = len(label_to_col)\n num_features = len(self.extract_f_vector(documents[0]))\n features_freq = np.zeros((num_features, num_classes))\n for doc in documents:\n f_vector = self.extract_f_vector(doc)\n col_for_f_vector = label_to_col[doc.label]\n features_freq[:, col_for_f_vector] += f_vector\n\n # laplace smoothing\n total_per_label = np.sum(features_freq, axis=0)\n features_freq += np.ones(total_per_label.shape, int)\n normalizer = total_per_label + np.full(total_per_label.shape, num_features, int)\n features_freq /= normalizer\n\n # stack all probabilities to one matrix and take log\n # result: self.all_log_prob\n # |-----------------------------------|\n # | log P(f1|C1) | ... | log P(f1|Cn) |\n # | log P(f2|C1) | ... | log P(f2|Cn) |\n # | . | . | . |\n # | . | . | . |\n # | . | . | . |\n # | log P(fm|C1) | ... | log P(fm|Cn) |\n # | log P(C1) | ... | log P(Cn) |\n # |-----------------------------------|\n likelihood_log_prob = np.log(features_freq)\n all_log_prob = np.vstack((likelihood_log_prob, prior_log_prob))\n self.my_model[\"all_log_prob\"] = all_log_prob", "def compute_logits(self):\n # [num test images, 1, embedding size].\n test_embeddings = tf.expand_dims(self.test_embeddings, 1)\n\n # [1, num_clases, embedding_size].\n prototypes = tf.expand_dims(self.prototypes, 0)\n\n # Squared euclidean distances between each test embedding / prototype pair.\n distances = tf.reduce_sum(tf.square(test_embeddings - prototypes), 2)\n self.test_logits = -distances\n return self.test_logits", "def train_logistic_regression(train_exs: List[SentimentExample], feat_extractor: FeatureExtractor) -> LogisticRegressionClassifier:\n lr = LogisticRegressionClassifier(feat_extractor.corpus_length, feat_extractor)\n alpha = 1e0\n # beta = 1e-4\n for epoch in range(8):\n loss = 0.\n acc = 0\n indices = np.arange(len(train_exs))\n np.random.shuffle(indices)\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z) \\\n # + beta * np.expand_dims(lr.w, axis=0).dot(np.expand_dims(lr.w, axis=1))[0, 0]\n predict = int(feat.dot(np.expand_dims(lr.w, axis=1))[0, 0] > 0)\n acc += (predict == y)\n grad = (z - y) * feat.toarray()[0] # + 2 * beta * lr.w\n lr.w = lr.w - alpha * grad\n print(\"epoch {:d}, loss: {:f}, accuracy: {:f}\".format(epoch, loss / len(train_exs), acc / len(train_exs)))\n\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z)\n print(\"training loss: {:f}\".format(loss / len(train_exs)))\n\n return lr", "def forward(self, x):\n\n embeds = self.dvector(x)\n logits = self.linear(embeds)\n\n return logits", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n logits = torch.softmax(x, dim=1)\n return logits", "def build_linear_model(inputs, columns, config):\r\n features = inputs['features']\r\n\r\n cols_to_vars = {}\r\n units = int(config['linear_model'].get('units', 1))\r\n combiner = config['linear_model'].get('combiner', 'sum')\r\n linear_logits = tf.feature_column.linear_model(\r\n features=features,\r\n feature_columns=columns,\r\n units=units,\r\n sparse_combiner=combiner,\r\n cols_to_vars=cols_to_vars)\r\n\r\n return linear_logits", "def logistic(weights, data, targets, hyperparameters):\n \n t = np.transpose(np.repeat(np.reshape(weights[:-1], (len(weights)-1, 1)), len(data), axis = 1))\n f_e = data * t\n z_sums = np.sum(f_e, axis=1)\n y = sigmoid(z_sums +weights[-1])\n f = np.sum(np.log(1 + np.exp(-z_sums - weights[-1])) + (1 - np.transpose(targets)) * (z_sums + weights[-1]))\n df = np.sum(data * np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0)\n df = np.append(df, np.sum(np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0))\n df = np.reshape(df, ((len(df), 1)))\n\n return f, df, np.reshape(y, (len(y), 1))", "def estimate_logreg(x,y,N_its,learning_rate=1e-4,regularizer=1e-2,lazy_reg=True):\n weights = defaultdict(float)\n weight_hist = [] #keep a history of the weights after each iteration\n all_labels = set(y)\n \n # this block is for lazy regularization\n ratereg = learning_rate * regularizer\n def regularize(base_feats):\n for base_feat in base_feats:\n for label in all_labels:\n #print \"regularizing\",(label,base_feat),t,last_update[base_feat],(1. - ratereg) ** (t-last_update[base_feat])\n weights[(label,base_feat)] *= (1. - ratereg) ** (t-last_update[base_feat])\n last_update[base_feat] = t\n\n t = 0\n last_update = defaultdict(int)\n\n eeta = learning_rate\n\n for it in xrange(N_its):\n\n for i,(x_i,y_i) in enumerate(zip(x,y)): #keep\n t += 1\n\n # regularization\n if lazy_reg: # lazy regularization is essential for speed\n regularize(x_i) # only regularize features in this instance\n if not lazy_reg: # for testing/explanatory purposes only\n for feat,weight in weights.iteritems():\n if feat[1] is not OFFSET: # usually don't regularize offset\n weights[feat] -= ratereg * weight\n\n p_y = compute_py(x_i,weights,all_labels) #hint\n\n term2 = make_feature_vector(x_i, y_i)\n\n for key in term2.keys():\n weights[key] = weights[key] + (term2[key]*eeta)\n\n for label in all_labels:\n temp = make_feature_vector(x_i, label)\n for key in temp.keys():\n weights[key] = weights[key] - (temp[key]*eeta*p_y[label])\n\n\n print it,\n weight_hist.append(weights.copy()) \n\n # if lazy, let regularizer catch up\n if lazy_reg:\n # iterate over base features\n regularize(list(set([f[1] for f in weights.keys() if f[1] is not OFFSET])))\n\n return weights,weight_hist", "def logreg(mode, vectorizer, training_dir):\n # 1. load the training dataset\n NORMALIZE = True\n pre_load = True\n\n logging.basicConfig(level=logging.INFO)\n logging.info(\"loading training dataset\")\n if not pre_load:\n x, y_age, y_gender, y_occ, cid = \\\n load_dataset(training_dir, mode, vectorizer)\n\n x_train = x[0:TRAIN_COUNT, :]\n\n y_train_age = y_age[0:TRAIN_COUNT]\n y_train_gender = y_gender[0:TRAIN_COUNT]\n y_train_occ = y_occ[0:TRAIN_COUNT]\n\n x_test = x[TRAIN_COUNT:TRAIN_COUNT+TEST_COUNT, :]\n y_test_age = y_age[TRAIN_COUNT:]\n y_test_gender = y_gender[TRAIN_COUNT:]\n y_test_occ = y_occ[TRAIN_COUNT:]\n cid = cid[TRAIN_COUNT:]\n\n if NORMALIZE:\n x_train = normalize(x_train, axis=1, norm='l1')\n x_test = normalize(x_test, axis=1, norm='l1')\n\n data_path = 'data/loaded_data.npz'\n with open(data_path, 'wb') as f:\n pickle.dump([x_train, y_train_age, y_train_gender, y_train_occ, x_test, y_test_age, y_test_gender, y_test_occ, cid], f)\n\n else:\n data_path = 'data/loaded_data.npz'\n if os.path.isfile(data_path):\n with open(data_path, 'rb') as f:\n x_train, y_train_age, y_train_gender, y_train_occ, x_test, y_test_age, y_test_gender, y_test_occ, cid = pickle.load(f)\n # exit()\n # 2. train models\n y_train_age = [x if isinstance(x, int) else 0 for x in y_train_age]\n y_test_age = [x if isinstance(x, int) else 0 for x in y_test_age]\n logging.info(\"fitting model age\")\n # age_model = LogisticRegression(multi_class='multinomial', solver=\"newton-cg\")\n # age_model = SVC()\n # age_model = DecisionTreeClassifier()\n age_model = RandomForestClassifier(n_estimators=15)\n # age_model = MultinomialNB()\n age_model.fit(x_train, y_train_age)\n logging.info(\"fitting model gender\")\n # gender_model = LogisticRegression(multi_class='multinomial', solver=\"newton-cg\")\n # gender_model = SVC(verbose=True, C=10, class_weight={0: 10, 1:1})\n # gender_model = DecisionTreeClassifier()\n gender_model = RandomForestClassifier(n_estimators=15)\n # gender_model = MultinomialNB()\n gender_model.fit(x_train, y_train_gender)\n logging.info(\"fitting model acc\")\n # occ_model = LogisticRegression(multi_class='multinomial', solver=\"newton-cg\")\n # occ_model = SVC(verbose=True)\n # occ_model = DecisionTreeClassifier()\n occ_model = RandomForestClassifier(n_estimators=15)\n # occ_model = MultinomialNB()\n occ_model.fit(x_train, y_train_occ)\n\n # 3. load the test dataset\n logging.info(\"loading test dataset ...\")\n # x_test, y_test_age, y_test_gender, y_test_occ, cid = \\\n # load_dataset(test_dir, mode, vectorizer)\n\n # 4. Predict and Evaluate\n logging.info(\"predicting\")\n age_pred = age_model.predict(x_test)\n gender_pred = gender_model.predict(x_test)\n occ_pred = occ_model.predict(x_test)\n\n # gender_pred = gender_model.predict(x_train)\n # occ_pred = occ_model.predict(x_train)\n output_labels = [{\"id\": i, \"occupation\": inv_o_dict[o], \"gender\": inv_g_dict[g], \"birthyear\": int(a) }\n for i, o, g, a in zip(cid, occ_pred, gender_pred, age_pred)]\n # output_labels = [{\"id\": i, \"gender\": inv_g_dict[g], \"occupation\": inv_o_dict[o]}\n # for i, g, o in zip(cid, gender_pred, occ_pred)]\n\n if not os.path.isdir('./results'):\n os.makedirs('./results')\n\n open(\"./results/all-predictions.ndjson\", \"w\").writelines(\n [json.dumps(x) + \"\\n\" for x in output_labels]\n )\n\n pred_dict = {\"prediction\": output_labels[0:10]}\n with open('./results/pred.json', 'w') as outfile:\n json.dump(pred_dict, outfile)\n\n gt_labels = [{\"id\": i, \"occupation\": inv_o_dict[o], \"gender\": inv_g_dict[g], \"birthyear\": int(a) }\n for i, o, g, a in zip(cid, y_test_occ, y_test_gender, y_test_age)]\n gt_dict = {\"ground_truth\": gt_labels[0:10]}\n with open('./results/gt.json', 'w') as outfile:\n json.dump(gt_dict, outfile)\n\n # saving trained models\n if not os.path.isdir(\"./pretrained-models\"):\n os.makedirs(\"./pretrained-models\")\n\n pickle.dump(age_model, open(\"./pretrained-models/age-model\", 'wb'))\n pickle.dump(gender_model, open(\"./pretrained-models/gender-model\", 'wb'))\n pickle.dump(occ_model, open(\"./pretrained-models/occ-model\", 'wb'))\n\n print(\"Accuracy for age model: {:.2f}%\".format(accuracy_score(age_pred, y_test_age) * 100.0))\n\n print(\"Accuracy for gender model: {:.2f}%\".format(accuracy_score(gender_pred, y_test_gender) * 100.0))\n\n print(\"Accuracy for occupation model: {:.2f}%\".format(accuracy_score(occ_pred, y_test_occ) * 100.0))", "def forward(self, x):\n return F.log_softmax(self.proj(x), dim=-1)", "def forward(self, logits, temperature):\n flat = logits.view(logits.shape[:-2] + (-1,))\n weights = F.softmax(flat / temperature, dim=-1).view_as(logits)\n\n x = (weights.sum(-2) * torch.linspace(-1, 1, logits.shape[-1]).type_as(logits)).sum(-1)\n y = (weights.sum(-1) * torch.linspace(-1, 1, logits.shape[-2]).type_as(logits)).sum(-1)\n\n return torch.stack((x, y), -1), weights" ]
[ "0.7213141", "0.6972253", "0.6765194", "0.6761858", "0.67392176", "0.6513034", "0.64970493", "0.64832693", "0.6462769", "0.64080673", "0.6387591", "0.6311425", "0.6272004", "0.62515974", "0.62422395", "0.62104046", "0.62095785", "0.62022495", "0.61950845", "0.6186781", "0.61813974", "0.61476344", "0.61155623", "0.6109147", "0.6101893", "0.60928804", "0.6054881", "0.60447484", "0.60229427", "0.60227734" ]
0.71031106
1
Compute the NLL loss given features h and targets y This assumes that the features have already be computed with the model
def nll_on_features(self, h, batch, reduction="mean"): batch = batch.to(h.device) y = batch.outputs # Extract features with the model features = h.view(batch.size, -1) # Log loss logits = self.head(features) log_probs = F.log_softmax(logits, dim=-1) nll_loss = F.nll_loss(log_probs, y, reduction=reduction) return nll_loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # We are gonna store everythin in a dictionnary hidden\n hidden = {}\n hidden['h0'] = X.reshape(X.shape[0], np.prod(X.shape[1:]))\n\n for i in range(self.L):\n idx = i + 1\n # Naming of the variable\n w = self.params['W' + str(idx)]\n b = self.params['b' + str(idx)]\n h = hidden['h' + str(idx - 1)]\n\n # Computing of the forward pass.\n # Special case of the last layer (output)\n if idx == self.L:\n h, cache_h = affine_forward(h, w, b)\n hidden['h' + str(idx)] = h\n hidden['cache_h' + str(idx)] = cache_h\n\n # For all other layers\n else:\n h, cache_h = affine_relu_forward(h, w, b)\n hidden['h' + str(idx)] = h\n hidden['cache_h' + str(idx)] = cache_h\n\n scores = hidden['h' + str(self.L)]\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n # Computing of the loss\n data_loss, dscores = softmax_loss(scores, y)\n reg_loss = 0\n for w in [self.params[f] for f in self.params.keys() if f[0] == 'W']:\n reg_loss += 0.5 * self.reg * np.sum(w * w)\n\n loss = data_loss + reg_loss\n\n # Backward pass\n\n hidden['dh' + str(self.L)] = dscores\n for i in range(self.L)[::-1]:\n idx = i + 1\n dh = hidden['dh' + str(idx)]\n h_cache = hidden['cache_h' + str(idx)]\n if idx == self.L:\n dh, dw, db = affine_backward(dh, h_cache)\n hidden['dh' + str(idx - 1)] = dh\n hidden['dW' + str(idx)] = dw\n hidden['db' + str(idx)] = db\n else:\n dh, dw, db = affine_relu_backward(dh, h_cache)\n hidden['dh' + str(idx - 1)] = dh\n hidden['dW' + str(idx)] = dw\n hidden['db' + str(idx)] = db\n\n # w gradients where we add the regulariation term\n list_dw = {key[1:]: val + self.reg * self.params[key[1:]]\n for key, val in hidden.iteritems() if key[:2] == 'dW'}\n # Paramerters b\n list_db = {key[1:]: val for key, val in hidden.iteritems() if key[:2] == 'db'}\n # Parameters gamma\n list_dgamma = {key[1:]: val for key, val in hidden.iteritems() if key[:6] == 'dgamma'}\n # Paramters beta\n list_dbeta = {key[1:]: val for key, val in hidden.iteritems() if key[:5] == 'dbeta'}\n grads = {}\n grads.update(list_dw)\n grads.update(list_db)\n grads.update(list_dgamma)\n grads.update(list_dbeta)\n return loss, grads", "def lossFun(self, inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n # forward pass\n for t in range(len(inputs)):\n xs[t] = np.zeros((self._txt_reader.vocab_size,1)) # One-hot, encode in 1-of-k representation\n xs[t][inputs[t]] = 1\n hs[t] = np.tanh(np.dot(self._Wxh, xs[t]) + np.dot(self._Whh, hs[t-1]) + self._bh) # compute chidden state\n ys[t] = np.dot(self._Why, hs[t]) + self._by # logits \n ys[t] -= ys[t].max() # for numerical stability\n ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars\n\n loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)\n\n # backward pass: compute gradients going backwards\n dWxh, dWhh, dWhy = np.zeros_like(self._Wxh), np.zeros_like(self._Whh), np.zeros_like(self._Why)\n dbh, dby = np.zeros_like(self._bh), np.zeros_like(self._by)\n\n dhnext = np.zeros_like(hs[0])\n for t in reversed(range(len(inputs))):\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(self._Why.T, dy) + dhnext # backprop into h\n dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(self._Whh.T, dhraw)\n\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def lossFunc(inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {} # input, hidden, output, out_prob states for each time t\n hs[-1] = np.copy(hprev)\n loss = 0\n \n # forward pass\n for t in xrange(len(inputs)):\n xs[t] = np.zeros((vocab_size,1)) \n xs[t][inputs[t]] = 1. # convert input to one-hot\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh)\n ys[t] = np.dot(Why, hs[t]) + by\n ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))\n loss += -np.log(ps[t][targets[t],0])\n \n # backward pass\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n for t in reversed(xrange(len(inputs))):\n # backprop into y\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1\n # backprop into Why, hs, and by\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext\n # backprop through tanh activition\n dhraw = (1 - hs[t] * hs[t]) * dh\n # backprop into Wxh, Whh, hs, and bh\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(Whh.T, dhraw)\n # clip gradient preventing exploding\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam)\n\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def lossFun(review, target, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n\n # forward pass\n for t in range(len(review)):\n xs[t] = np.zeros((vector_len,1)) # encode in 1-of-k representation\n for j in range(32):\n xs[t][j] = review[t][j]\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state\n\n #Many 2 one\n last = len(review) - 1\n ys = np.dot(Why, hs[last]) + by # unnormalized log probabilities for next chars\n ps = np.exp(ys) / np.sum(np.exp(ys)) # probabilities for next chars\n loss = -np.log(ps[target,0]) # softmax (cross-entropy loss)\n\n # backward pass: compute gradients going backwards\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n\n dy = np.subtract(ps,target) # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here\n dWhy += np.dot(dy, hs[last].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext # backprop into h\n for t in reversed(range(len(review))):\n dhraw = (1 - (hs[t] * hs[t].T)) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(Whh.T, dhraw)\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[last]", "def loss_fn(self, targets, outputs, model):", "def _compute_loss(self, predictions, targets, **params):\n pass", "def L2(yhat, y):\n loss = np.dot((y - yhat).T,(y - yhat))\n \n return loss", "def calculate_loss(self, y, y_hat):\r\n return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=y_hat))", "def loss(self, y: np.ndarray, y_hat: np.ndarray) -> float:\n losses = -(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))\n return losses.mean() + self.reg / self.num_parameters * (\n (self.v[:, -1] ** 2).sum() + (self.w ** 2).sum()\n )", "def lossFun(inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n\n # forward pass\n for t in xrange(len(inputs)):\n xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation\n xs[t][inputs[t]] = 1\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state\n ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars\n ps[t] = np.exp(ys[t]-np.max(ys[t])) / np.sum(np.exp(ys[t]-np.max(ys[t]))) # probabilities for next chars\n loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)\n\n assert_array_equal(van.window_step,t)\n assert_array_equal(van.state[t-1],hs[t-1].T[0])\n assert_array_equal(van.statenet[t].forward([xs[t].T[0],hs[t-1].T[0]]),hs[t].T[0])\n assert_array_equal(van.statenet[t].net.elements[0].elements[0].elements[1].W.get(),Wxh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[1].elements[1].W.get(),Whh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[2].W.get(),bh.T[0])\n\n assert_array_equal(vantr.statenet[t].net.elements[0].elements[0].elements[1].W.get(),Wxh)\n assert_array_equal(vantr.statenet[t].net.elements[0].elements[1].elements[1].W.get(),Whh)\n assert_array_equal(vantr.statenet[t].net.elements[0].elements[2].W.get(),bh.T[0])\n assert_array_equal(vantr.outputnet[t].net.elements[0].elements[1].W.get(),Why)\n assert_array_equal(vantr.outputnet[t].net.elements[1].W.get(),by.T[0])\n\n #\n # #Neg\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].W,Why)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].W,by.T[0])\n # assert_array_almost_equal(van.outputnet[t].forward(hs[t].T[0]),ps[t].T[0])\n # assert_array_almost_equal(van.outputnet[t].forward(van.statenet[t].forward([xs[t].T[0],hs[t-1].T[0]])),ps[t].T[0])\n # assert_array_almost_equal(van.forward(xs[t].T[0]),ps[t].T[0])\n #\n # Cross\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].W.get(),Why)\n assert_array_equal(van.outputnet[t].net.elements[1].W.get(),by.T[0])\n assert_array_equal(van.outputnet[t].forward(hs[t].T[0]),ys[t].T[0])\n assert_array_equal(van.outputnet[t].forward(van.statenet[t].forward([xs[t].T[0],hs[t-1].T[0]])),ys[t].T[0])\n assert_array_equal(van.outputnet[t].forward(van.statenet[t].forward([xs[t].T[0],van.state[t-1]])),ys[t].T[0])\n assert_array_equal(van.forward(xs[t].T[0]),ys[t].T[0])\n assert_array_equal(soft.forward(ys[t].T[0]),ps[t].T[0])\n\n # backward pass: compute gradients going backwards\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n\n for t in reversed(xrange(len(inputs))):\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext # backprop into h\n dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n\n #\n # #Neg\n # van.backward(negLog.dJdy_gradient(ps[t].T[0],to_one_hot_vect(targets[t],vocab_size)),opt)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].x,hs[t].T[0])\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].dW,dWhy)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].dW,dby.T[0])\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].W,Why)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].W,by.T[0])\n #\n #Cross\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].x,hs[t].T[0])\n assert_array_equal(van.outputnet[t].net.forward(hs[t].T[0]),ys[t].T[0])\n assert_array_equal(soft.forward(van.outputnet[t].net.forward(hs[t].T[0])),ps[t].T[0])\n assert_array_equal(soft.forward(van.outputnet[t].net.forward(hs[t].T[0]))-to_one_hot_vect(targets[t],vocab_size),dy.T[0])\n\n err = cross.dJdy_gradient(ys[t].T[0],to_one_hot_vect(targets[t],vocab_size))\n\n assert_array_equal(soft.forward(van.outputnet[t].net.forward(hs[t].T[0]))-to_one_hot_vect(targets[t],vocab_size),dy.T[0])\n assert_array_equal(ps[t].T[0]-to_one_hot_vect(targets[t],vocab_size),dy.T[0])\n assert_array_equal(err,dy.T[0])\n\n van.backward(err,opt)\n\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].W.get_dW(),dWhy)\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].W.get(),Why)\n assert_array_equal(van.outputnet[t].net.elements[1].W.get_dW(),dby.T[0])\n assert_array_almost_equal(van.outputnet[t].net.elements[1].W.get(),by.T[0])\n #\n\n assert_array_equal(van.statenet[t].net.elements[0].elements[0].elements[1].W.get_dW(),dWxh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[1].elements[1].W.get_dW(),dWhh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[2].W.get_dW(),dbh.T[0])\n assert_array_equal(van.statenet[t].net.elements[0].elements[0].elements[1].W.get(),Wxh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[1].elements[1].W.get(),Whh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[2].W.get(),bh.T[0])\n assert_array_equal(van.dJdh[t],dhnext.T[0])\n\n dhnext = np.dot(Whh.T, dhraw)\n\n opt.update_model()\n trainer.learn_window(vantr,zip(to_hot_vect(inputs,vocab_size),to_hot_vect(targets,vocab_size)),crosstr,opttr)\n\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def loss(output, y):\n #Computes softmax cross entropy between logits and labels.\n xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y)\n loss = tf.reduce_mean(xentropy)\n\n return loss", "def loss_fn(self, lbl, y):\n\n binlbl = self._to_device(lbl[:,0]>.5)\n # center = self._to_device(lbl[:,3]) \n offset = 5. * self._to_device(lbl[:,1:]) \n\n loss = self.criterion(y[:,:2], offset) \n loss2 = self.criterion2(y[:,2], binlbl)\n\n # loss3 = self.criterion(y[:,3], center)\n\n loss = loss + loss2\n return loss", "def L1(yhat, y):\n\n loss = np.sum(np.abs(y - yhat))\n \n return loss", "def _discriminator_loss(self, y, y_hat):\n\n l1 = tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.ones(tf.shape(y)),logits = y)\n l2 = tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.zeros(tf.shape(y_hat)),logits = y_hat)\n l = tf.reduce_mean(l1+l2)\n print('_discriminator_loss shape,', tf.shape(l))\n return l", "def loss(self, X, y=None):\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the two-layer net, computing the #\n # class scores for X and storing them in the scores variable. #\n ############################################################################\n hid1, hid1cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n scores, scorecache = affine_forward(hid1, self.params['W2'], self.params['b2'])\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # If y is None then we are in test mode so just return scores\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the two-layer net. Store the loss #\n # in the loss variable and gradients in the grads dictionary. Compute data #\n # loss using softmax, and make sure that grads[k] holds the gradients for #\n # self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n loss, dscores = softmax_loss(scores, y)\n loss += 0.5 * self.reg *( np.sum(self.params['W1']**2) + np.sum(self.params['W2']**2) )\n\n dhid1, grads['W2'], grads['b2'] = affine_backward(dscores, scorecache)\n dx, grads['W1'], grads['b1'] = affine_relu_backward(dhid1, hid1cache)\n\n grads['W1'] += self.reg * self.params['W1']\n grads['W2'] += self.reg * self.params['W2']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def compute_loss(self, X, y):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n\r\n # Computing the loss using the below formula\r\n # Loss = -(1/m)*sum( (y_i)*log(σ(wTx_i)) + (1-y_i)*log(1 - σ(wTx_i)))\r\n # m = number of examples and i for ith example\r\n\r\n loss = 0\r\n X = np.append(X, np.array([[1]]*X.shape[0]), axis=1)\r\n # for idx,example in enumerate(X):\r\n # loss = loss + y[idx] * np.log(self.sigmoid(np.dot(example, self.w))) + (1 - y[idx]) * np.log(1 - self.sigmoid(np.dot(example, self.w)))\r\n # loss = -loss/ X.shape[0]\r\n\r\n loss = -np.mean(y * np.log(self.sigmoid(np.dot(X, self.w))) + (1 - y) * np.log(1 - self.sigmoid(np.dot(X, self.w))))\r\n return loss", "def __loss(self, h, y):\n return (-y*np.log(h)-(1-y)*np.log(1-h)).mean()", "def deep_feature_loss(self, y0, y1):\n assert (self.sess is not None) and (not self.sess._closed)\n if not self.vars_loaded:\n print((\"WARNING: `deep_feature_loss` called before loading vars\"))\n feed_dict={self.tensor_wave0: y0, self.tensor_wave1: y1}\n return self.sess.run(self.loss_deep_features, feed_dict=feed_dict)", "def loss_fn(outputs, labels, wts):\n\n # reshape labels to give a flat vector of length batch_size*seq_len\n loss_noreduce = nn.BCEWithLogitsLoss(reduce=False)\n loss = torch.mean(loss_noreduce(outputs, labels)*wts)\n\t\n # compute cross entropy loss for all tokens\n return loss", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE question 4 ***\"\n return nn.SoftmaxLoss(self.run(xs), y)", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)", "def setup_loss(self, h_s, h_e):\n with vs.variable_scope(\"loss\"):\n # masked_h_s = tf.boolean_mask(h_s, self.context_mask_placeholder)\n # masked_h_e = tf.boolean_mask(h_e, self.context_mask_placeholder)\n # loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(h_s, self.start_span_placeholder) +\n # tf.nn.softmax_cross_entropy_with_logits(h_e, self.end_span_placeholder))\n masked_h_s = tf.add(h_s, (1 - tf.cast(self.context_mask_placeholder, 'float')) * (-1e30))\n masked_h_e = tf.add(h_e, (1 - tf.cast(self.context_mask_placeholder, 'float')) * (-1e30))\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(masked_h_s, self.start_span_placeholder) +\n tf.nn.softmax_cross_entropy_with_logits(masked_h_e, self.end_span_placeholder))\n total_loss = loss\n return total_loss, masked_h_s, masked_h_e", "def log_loss_objective(y_true: npt.NDArray, y_pred: npt.NDArray) -> Tuple[npt.NDArray, npt.NDArray]:\n y_pred = sigmoid(y_pred)\n grad = y_pred - y_true\n hess = y_pred * (1.0 - y_pred)\n return grad, hess", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n scores = None\n ############################################################################\n # Implementing the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n ############################################################################\n\n l_input = X.copy()\n out = []\n cache = []\n for i in range(self.num_layers - 1):\n # layerwise compute the forward pass and store outputs in out list\n key = ['W' + str(i+1), 'b' + str(i+1)]\n lout, lcache = affine_sigmoid_forward(l_input, self.params[key[0]], self.params[key[1]])\n out.append(lout)\n cache.append(lcache)\n l_input = lout\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n scores, lcache = affine_forward(out[self.num_layers - 2], self.params[key[0]], self.params[key[1]])\n cache.append(lcache)\n \n # regularization parameter compute by summing square of all weight vectors\n R = 0\n for i in range(1, self.num_layers + 1):\n key = 'W' + str(i)\n R += np.sum(np.power(self.params[key], 2))\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n\n ########################\n # Backward pass to compute the loss and gradients\n ########################\n\n loss, dscore = softmax_loss(scores, y)\n # Apply regularization of the loss \n loss = loss + 0.5 * self.reg * R\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n dx, grads[key[0]], grads[key[1]] = affine_backward(dscore, cache[self.num_layers - 1])\n grads[key[0]] += self.reg * self.params[key[0]] \n\n for i in range(self.num_layers - 1, 0, -1):\n key = ['W' + str(i), 'b' + str(i)]\n dx, grads[key[0]], grads[key[1]] = affine_sigmoid_backward(dx, cache[i-1])\n # Apply regularization to the gradients\n grads[key[0]] += self.reg * self.params[key[0]]\n\n return loss, grads", "def train(self, X, y):\n h1_input, h1_output, h2_input, h2_output, final_output = self.forwardpass_train(\n X\n )\n # calculate average loss per one data\n train_loss = self.cross_entropy_loss(y, final_output)\n dW1, db1, dW2, db2, dW3, db3 = self.backpropagation(\n X, y, h1_input, h1_output, h2_input, h2_output, final_output\n )\n self.update_weights(dW1, db1, dW2, db2, dW3, db3)\n return train_loss", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n y_pred = self.run(xs)\n return nn.SoftmaxLoss(y_pred,y)", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # conv - relu - 2x2 max pool - affine - relu - affine - softmax\n\n\n # pass conv_param to the forward pass for the convolutional layer\n # Padding and stride chosen to preserve the input spatial size\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n h1, c1 = conv_forward_im2col(X, W1, b1, conv_param) #\n h1, r1 = relu_forward(h1)\n h1, p1 = max_pool_forward_fast(h1, pool_param) #\n max_pool_shape = h1.shape\n h1 = h1.reshape(X.shape[0], -1)\n h2, c2 = affine_relu_forward(h1, W2, b2)\n scores, c3 = affine_forward(h2, W3, b3)\n\n if y is None:\n return scores\n\n loss, dx = softmax_loss(scores, y)\n\n loss += self.reg / 2 * (self.params['W1']**2).sum()\n loss += self.reg / 2 * (self.params['W2']**2).sum()\n loss += self.reg / 2 * (self.params['W3']**2).sum()\n\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n grads = {}\n dx, grads['W3'], grads['b3'] = affine_backward(dx, c3)\n grads['W3'] += self.reg * self.params['W3']\n dx, grads['W2'], grads['b2'] = affine_relu_backward(dx, c2)\n dx = dx.reshape(max_pool_shape)\n dx = max_pool_backward_fast(dx, p1)\n dx = relu_backward(dx, r1)\n dx, grads['W1'], grads['b1'] = conv_backward_im2col(dx, c1)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def HuberLoss(x, y, theta, epsilon):\n try:\n x = np.asmatrix(x)\n y = np.asmatrix(y).reshape(-1, 1)\n theta = np.asmatrix(theta).reshape(-1, 1)\n if x.shape[0] != y.shape[0]:\n x = x.transpose()\n except Exception:\n print('There is an error with the input data,\\\n please make sure your x can be transformed into n by m matrix,\\\n your y can be transformed into n by 1 vector,\\\n your theta can be transformed into m by 1 vector')\n sys.exit(0)\n\n n = x.shape[0] # sample size\n fx = x @ theta # matrix (dot) production for estimated y\n error = np.abs(y - fx)\n\n def hl(element):\n if element <= epsilon:\n loss = 1/2 * element**2\n else:\n loss = epsilon * element - 1/2 * epsilon**2\n\n return(loss)\n\n hlvector = np.vectorize(hl)\n\n loss = 1/n * np.sum(hlvector(error))\n\n return(loss)", "def loss(y, y_pred):\n # assert_is_binary(y)\n # assert_is_stochastic(y_pred)\n is_binary(y)\n is_stochastic(y_pred)\n\n # prevent taking the log of 0\n eps = np.finfo(float).eps\n\n # each example is associated with a single class; sum the negative log\n # probability of the correct label over all samples in the batch.\n # observe that we are taking advantage of the fact that y is one-hot\n # encoded!\n cross_entropy = -np.sum(y * np.log(y_pred + eps))\n return cross_entropy", "def get_loss(self, xs, y):\n return nn.SoftmaxLoss(self.run(xs), y)" ]
[ "0.67652905", "0.6620769", "0.66202563", "0.6577398", "0.6380125", "0.6372819", "0.63684165", "0.63215613", "0.6316717", "0.6303203", "0.6277401", "0.62723595", "0.6269626", "0.62407196", "0.62390345", "0.6234942", "0.62235326", "0.6202289", "0.61824965", "0.61645675", "0.61480033", "0.61440444", "0.6140885", "0.6122215", "0.6119902", "0.61021054", "0.6100519", "0.6098364", "0.6097332", "0.60887617" ]
0.72924393
0
Build this task's classification head.
def build_head(self, n_features, device=None): # By default this is a linear layer self.head = self.create_compatible_head(n_features, device)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def head(self) -> tf.estimator.Head:\n\n task_type = self._problem_statement.tasks[0].type\n if task_type.HasField('one_dimensional_regression'):\n return tf.estimator.RegressionHead()\n num_classes = (\n self._tf_transform_output.num_buckets_for_transformed_feature(\n self.raw_label_key))\n if task_type.HasField('multi_class_classification'):\n return tf.estimator.MultiClassHead(num_classes)\n if task_type.HasField('binary_classification'):\n return tf.estimator.BinaryClassHead()\n raise ValueError('Invalid task type: {}'.format(task_type))", "def head(self) -> TaskHead:\n return self._model.head", "def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):\n if name in self.classification_heads:\n prev_num_classes = self.classification_heads[name].out_proj.out_features\n prev_inner_dim = self.classification_heads[name].dense.out_features\n if num_classes != prev_num_classes or inner_dim != prev_inner_dim:\n logger.warning(\n 're-registering head \"{}\" with num_classes {} (prev: {}) '\n 'and inner_dim {} (prev: {})'.format(\n name, num_classes, prev_num_classes, inner_dim, prev_inner_dim\n )\n )\n self.classification_heads[name] = HuggingFaceBertClassificationHead(\n self.args.embed_dim, # self.args.encoder_embed_dim,\n inner_dim or self.args.embed_dim,\n num_classes,\n self.args.pooler_activation_fn,\n self.args.pooler_dropout,\n self.args.quant_noise_pq,\n self.args.quant_noise_pq_block_size,\n )", "def build_head(self):\n stages = [f'stage{i}' for i in range(1, 7)]\n for stage in stages:\n block = getattr(self.arch, stage)\n PAF, CFM = block.keys()\n PAF = build_blocks(block[PAF], 'head')\n CFM = build_blocks(block[CFM], 'head')\n setattr(self, f\"{stage}_PAF\", PAF)\n setattr(self, f\"{stage}_CFM\", CFM)", "def set_head(self, type: Type[TaskHead], **kwargs):\n\n self._config.head = TaskHeadConfiguration(type=type, **kwargs)\n self._model.set_head(self._config.head.compile(backbone=self.backbone))", "def __init__(self, top_n: int = 5, *args, **kwargs):\n super().__init__('Real time classification visualizer',\n *args, **kwargs)\n class_input_spec = self.inputs_specs['classification_data']\n self.class_names = class_input_spec['class_names']\n self.top_n = top_n", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)", "def make_task(layout_parameters=None):\n if not layout_parameters:\n layout_parameters = {\n 'image_url': 'http://herp.com/derp'\n }\n\n return CategorizationTaskFixture(**layout_parameters)", "def build(self, input_image, num_class):\n x = build_resnet(101)\n # add classifier\n x = Conv2D(num_class, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)", "def __init__(self):\n self.label = \"Combine NNOutput Files \"\n self.description = \"Combines PNN, FUZ, and RBN files generated from partitions of the class.dta file.\"\n self.canRunInBackground = False\n self.category = \"Neural network\"", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, channels, num_classes):\n super(AuxiliaryHead, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n # image size = 2 x 2\n nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False),\n nn.Conv2d(channels, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n nn.BatchNorm2d(768),\n nn.ReLU(inplace=True),\n )\n self.classifier = nn.Linear(768, num_classes)", "def __subtask_classification__(self,task_id,classification_tasks,marking_tasks,raw_classifications,aggregations):\n\n\n # go through the tools which actually have the followup questions\n for tool in classification_tasks[task_id]:\n\n # now go through the individual followup questions\n # range(len()) - since individual values will be either \"single\" or \"multiple\"\n\n for followup_question_index in range(len(classification_tasks[task_id][tool])):\n global_index = str(task_id)+\"_\" +str(tool)+\"_\"+str(followup_question_index)\n\n\n followup_classification = {}\n # this is used for inserting the results back into our running aggregation - which are based\n # on shapes, not tools\n shapes_per_cluster = {}\n\n # go through each cluster and find the corresponding raw classifications\n for subject_id in aggregations:\n if subject_id == \"param\":\n continue\n\n # has anyone done this task for this subject?\n if task_id in aggregations[subject_id]:\n # find the clusters which we have determined to be of the correct type\n # only consider those users who made the correct type marking\n # what shape did this particular tool make?\n shape = marking_tasks[task_id][tool]\n for cluster_index,cluster in aggregations[subject_id][task_id][shape + \" clusters\"].items():\n if cluster_index in [\"param\",\"all_users\"]:\n continue\n\n # what is the most likely tool for this cluster?\n most_likely_tool,_ = max(cluster[\"tool_classification\"][0].items(),key = lambda x:x[1])\n if int(most_likely_tool) != int(tool):\n continue\n\n\n # polygons and rectangles will pass cluster membership back as indices\n # ints => we can't case tuples\n if isinstance(cluster[\"cluster members\"][0],int):\n user_identifiers = zip(cluster[\"cluster members\"],cluster[\"users\"])\n else:\n user_identifiers = zip([tuple(x) for x in cluster[\"cluster members\"]],cluster[\"users\"])\n ballots = []\n\n for user_identifiers,tool_used in zip(user_identifiers,cluster[\"tools\"]):\n # did the user use the relevant tool - doesn't matter if most people\n # used another tool\n if tool_used == tool:\n\n followup_answer = raw_classifications[global_index][subject_id][user_identifiers]\n u = user_identifiers[1]\n ballots.append((u,followup_answer))\n\n followup_classification[(subject_id,cluster_index)] = deepcopy(ballots)\n shapes_per_cluster[(subject_id,cluster_index)] = shape\n\n\n followup_results = self.__task_aggregation__(followup_classification,global_index,{})\n assert isinstance(followup_results,dict)\n\n for subject_id,cluster_index in followup_results:\n shape = shapes_per_cluster[(subject_id,cluster_index)]\n # keyword_list = [subject_id,task_id,shape+ \" clusters\",cluster_index,\"followup_questions\"]\n new_results = followup_results[(subject_id,cluster_index)]\n # if this is the first question - insert\n # otherwise append\n\n if followup_question_index == 0:\n aggregations[subject_id][task_id][shape + \" clusters\"] [cluster_index][\"followup_question\"] = {}\n\n\n aggregations[subject_id][task_id][shape + \" clusters\"] [cluster_index][\"followup_question\"][followup_question_index] = new_results.values()[0]\n\n return aggregations", "def __init__(self, layer_list_info):\n super(DynaNet, self).__init__()\n self.layer_list_info = layer_list_info\n self.task_modules = nn.ModuleDict()\n self.classification_layers = nn.ModuleDict()\n self.module_generator = ModuleFactory(layer_list_info)\n self.task_module_name_path = {}\n self.nr_levels = len(layer_list_info)\n self.task_idx = None", "def _multi_class_head(n_classes,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,\n metric_class_ids=None):\n if (n_classes is None) or (n_classes < 2):\n raise ValueError(\"n_classes must be > 1 for classification: %s.\" %\n n_classes)\n\n if n_classes == 2:\n if metric_class_ids:\n raise ValueError(\"metric_class_ids invalid for n_classes==2.\")\n return _BinaryLogisticHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds)\n\n return _MultiClassHead(\n n_classes=n_classes,\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds,\n metric_class_ids=metric_class_ids)", "def generateHead(self, headType):\n # load the multi-head models\n filePrefix, phase = ModelDict[self.style.body]\n headModel = loader.loadModel(\"phase_\" + str(phase) + filePrefix + \"heads\")\n\n # search for the appropriate parts\n headReferences = headModel.findAllMatches(\"**/\" + headType)\n for i in range(0, headReferences.getNumPaths()):\n headPart = self.instance(headReferences.getPath(i), \"modelRoot\",\n \"joint_head\")\n # set head texture if necessary\n if self.headTexture:\n headTex = loader.loadTexture(\"phase_\" + str(phase) + \"/maps/\" +\n self.headTexture)\n headTex.setMinfilter(Texture.FTLinearMipmapLinear)\n headTex.setMagfilter(Texture.FTLinear) \n headPart.setTexture(headTex, 1)\n\n # set head color if necessary\n if self.headColor:\n headPart.setColor(self.headColor)\n self.headParts.append(headPart)\n\n # Now remove the extra instance that was created in the\n # loadModelOnce call; we don't need it anymore now that we've\n # copied everything out.\n headModel.removeNode()", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)", "def build_task_a(self, x, y, is_training, ext_wts=None):\n config = self.config\n global_step = self.global_step\n if config.backbone_class == 'resnet_backbone':\n bb_config = config.resnet_config\n else:\n assert False, 'Not supported'\n proto_config = config.protonet_config\n opt_config = config.optimizer_config\n num_classes_a = self._num_classes_a\n\n # Classification branch for task A.\n h_a = self._run_backbone(x, is_training=is_training, ext_wts=ext_wts)\n self._h_a = h_a\n h_shape = h_a.get_shape()\n h_size = 1\n for ss in h_shape[1:]:\n h_size *= int(ss)\n self._h_size = h_size\n\n if ext_wts is not None:\n w_class_a = weight_variable(\n [h_size, num_classes_a],\n init_method='numpy',\n dtype=self.dtype,\n init_param={'val': np.transpose(ext_wts['w_class_a'])},\n wd=bb_config.wd,\n name='w_class_a')\n b_class_a = weight_variable([],\n init_method='numpy',\n dtype=self.dtype,\n init_param={'val': ext_wts['b_class_a']},\n wd=0e0,\n name='b_class_a')\n else:\n w_class_a = weight_variable([h_size, num_classes_a],\n init_method='truncated_normal',\n dtype=self.dtype,\n init_param={'stddev': 0.01},\n wd=bb_config.wd,\n name='w_class_a')\n b_class_a = weight_variable([num_classes_a],\n dtype=self.dtype,\n init_method='constant',\n init_param={'val': 0.0},\n name='b_class_a')\n self._w_class_a = w_class_a\n self._b_class_a = b_class_a\n num_classes_a_dyn = tf.cast(tf.shape(b_class_a)[0], tf.int64)\n num_classes_a_dyn32 = tf.shape(b_class_a)[0]\n\n if proto_config.cosine_a:\n if proto_config.cosine_tau:\n if ext_wts is None:\n tau_init_val = 10.0\n else:\n tau_init_val = ext_wts['tau'][0]\n tau = weight_variable([],\n dtype=self.dtype,\n init_method='constant',\n init_param={'val': tau_init_val},\n name='tau')\n else:\n tau = tf.constant(1.0)\n\n w_class_a_norm = self._normalize(w_class_a, axis=0)\n h_a_norm = self._normalize(h_a, axis=1)\n dot = tf.matmul(h_a_norm, w_class_a_norm)\n if ext_wts is not None:\n dot += b_class_a\n logits_a = tau * dot\n else:\n logits_a = tf.matmul(h_a, w_class_a) + b_class_a\n\n self._prediction_a = logits_a\n self._prediction_a_all = self._prediction_a\n y_dense = tf.one_hot(y, num_classes_a)\n xent_a = tf.nn.softmax_cross_entropy_with_logits(\n logits=logits_a, labels=y_dense)\n xent_a = tf.reduce_mean(xent_a, name='xent')\n cost_a = xent_a\n self._cost_a = cost_a\n cost_a += self._decay()\n self._prediction_a = logits_a\n return logits_a", "def _construct_prediction_heads(self, num_classes, num_feature_outputs,\n class_prediction_bias_init,\n unit_height_conv=False):\n prediction_heads = {}\n prediction_heads[OBJECT_CENTER] = self._make_prediction_net_list(\n num_feature_outputs,\n num_classes,\n kernel_sizes=self._center_params.center_head_kernel_sizes,\n num_filters=self._center_params.center_head_num_filters,\n bias_fill=class_prediction_bias_init,\n name='center',\n unit_height_conv=unit_height_conv)\n\n if self._od_params is not None:\n prediction_heads[BOX_SCALE] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_SIZE_CHANNELS,\n kernel_sizes=self._od_params.scale_head_kernel_sizes,\n num_filters=self._od_params.scale_head_num_filters,\n name='box_scale',\n unit_height_conv=unit_height_conv)\n prediction_heads[BOX_OFFSET] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_OFFSET_CHANNELS,\n kernel_sizes=self._od_params.offset_head_kernel_sizes,\n num_filters=self._od_params.offset_head_num_filters,\n name='box_offset',\n unit_height_conv=unit_height_conv)\n\n if self._kp_params_dict is not None:\n for task_name, kp_params in self._kp_params_dict.items():\n num_keypoints = len(kp_params.keypoint_indices)\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_HEATMAP)] = self._make_prediction_net_list(\n num_feature_outputs,\n num_keypoints,\n kernel_sizes=kp_params.heatmap_head_kernel_sizes,\n num_filters=kp_params.heatmap_head_num_filters,\n bias_fill=kp_params.heatmap_bias_init,\n name='kpt_heatmap',\n unit_height_conv=unit_height_conv)\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_REGRESSION)] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_OFFSET_CHANNELS * num_keypoints,\n kernel_sizes=kp_params.regress_head_kernel_sizes,\n num_filters=kp_params.regress_head_num_filters,\n name='kpt_regress',\n unit_height_conv=unit_height_conv)\n\n if kp_params.per_keypoint_offset:\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_OFFSET)] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_OFFSET_CHANNELS * num_keypoints,\n kernel_sizes=kp_params.offset_head_kernel_sizes,\n num_filters=kp_params.offset_head_num_filters,\n name='kpt_offset',\n unit_height_conv=unit_height_conv)\n else:\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_OFFSET)] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_OFFSET_CHANNELS,\n kernel_sizes=kp_params.offset_head_kernel_sizes,\n num_filters=kp_params.offset_head_num_filters,\n name='kpt_offset',\n unit_height_conv=unit_height_conv)\n\n if kp_params.predict_depth:\n num_depth_channel = (\n num_keypoints if kp_params.per_keypoint_depth else 1)\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_DEPTH)] = self._make_prediction_net_list(\n num_feature_outputs, num_depth_channel, name='kpt_depth',\n unit_height_conv=unit_height_conv)\n\n if self._mask_params is not None:\n prediction_heads[SEGMENTATION_HEATMAP] = self._make_prediction_net_list(\n num_feature_outputs,\n num_classes,\n kernel_sizes=self._mask_params.mask_head_kernel_sizes,\n num_filters=self._mask_params.mask_head_num_filters,\n bias_fill=self._mask_params.heatmap_bias_init,\n name='seg_heatmap',\n unit_height_conv=unit_height_conv)\n\n if self._densepose_params is not None:\n prediction_heads[DENSEPOSE_HEATMAP] = self._make_prediction_net_list(\n num_feature_outputs,\n self._densepose_params.num_parts,\n bias_fill=self._densepose_params.heatmap_bias_init,\n name='dense_pose_heatmap',\n unit_height_conv=unit_height_conv)\n prediction_heads[DENSEPOSE_REGRESSION] = self._make_prediction_net_list(\n num_feature_outputs,\n 2 * self._densepose_params.num_parts,\n name='dense_pose_regress',\n unit_height_conv=unit_height_conv)\n\n if self._track_params is not None:\n prediction_heads[TRACK_REID] = self._make_prediction_net_list(\n num_feature_outputs,\n self._track_params.reid_embed_size,\n name='track_reid',\n unit_height_conv=unit_height_conv)\n\n # Creates a classification network to train object embeddings by learning\n # a projection from embedding space to object track ID space.\n self.track_reid_classification_net = tf.keras.Sequential()\n for _ in range(self._track_params.num_fc_layers - 1):\n self.track_reid_classification_net.add(\n tf.keras.layers.Dense(self._track_params.reid_embed_size))\n self.track_reid_classification_net.add(\n tf.keras.layers.BatchNormalization())\n self.track_reid_classification_net.add(tf.keras.layers.ReLU())\n self.track_reid_classification_net.add(\n tf.keras.layers.Dense(self._track_params.num_track_ids))\n if self._temporal_offset_params is not None:\n prediction_heads[TEMPORAL_OFFSET] = self._make_prediction_net_list(\n num_feature_outputs, NUM_OFFSET_CHANNELS, name='temporal_offset',\n unit_height_conv=unit_height_conv)\n return prediction_heads", "def __init__(self, classification_path):\n # TODO: Rodar novamente o KNN com a particao crisp 'otima' para reavaliar os valores de K\n self.data = list()\n self.class_data = np.loadtxt(classification_path, dtype=int)\n self.mfeat_fac_classifier = self.build_classifier(15, 0)\n self.mfeat_fou_classifier = self.build_classifier(13, 1)\n self.mfeat_kar_classifier = self.build_classifier(13, 2)", "def _multi_label_head(n_classes,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,\n metric_class_ids=None):\n if n_classes < 2:\n raise ValueError(\"n_classes must be > 1 for classification.\")\n return _MultiLabelHead(\n n_classes=n_classes,\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds,\n metric_class_ids=metric_class_ids)", "def _build_ner_head(self, bert_out):\n use_crf = self.config[\"model\"][\"ner\"][\"use_crf\"]\n num_labels = self.config[\"model\"][\"ner\"][\"num_labels\"]\n\n # dropout\n if (self.birnn_ner is None) or (self.config[\"model\"][\"ner\"][\"rnn\"][\"dropout\"] == 0.0):\n x = self.bert_dropout(bert_out, training=self.training_ph)\n else:\n x = bert_out\n\n # birnn\n if self.birnn_ner is not None:\n sequence_mask = tf.sequence_mask(self.num_pieces_ph)\n x = self.birnn_ner(x, training=self.training_ph, mask=sequence_mask)\n\n # pieces -> tokens\n # сделано так для того, чтобы в ElmoJointModel не нужно было переопределять данный метод\n if self.first_pieces_coords_ph is not None:\n x = tf.gather_nd(x, self.first_pieces_coords_ph) # [N, num_tokens_tokens, bert_dim or cell_dim * 2]\n\n # label logits\n logits = self.dense_ner_labels(x)\n\n # label ids\n if use_crf:\n with tf.variable_scope(\"crf\", reuse=tf.AUTO_REUSE):\n transition_params = tf.get_variable(\"transition_params\", [num_labels, num_labels], dtype=tf.float32)\n pred_ids, _ = tf.contrib.crf.crf_decode(logits, transition_params, self.num_tokens_ph)\n else:\n pred_ids = tf.argmax(logits, axis=-1)\n transition_params = None\n\n return logits, pred_ids, transition_params", "def __init__(self, classification, extras=[]):\n self.model_list = []\n self._generate_model_list(classification)\n self.model_list.extend(extras)\n self.classification = classification", "def __init__(self, hparams):\n super(ImagenetTransferLearning, self).__init__()\n self.hparams = hparams\n self.feature_extractor = models.mobilenet_v2(pretrained=True)\n self.feature_extractor.eval()\n\n # Establish classifier\n # self.layer_1 = torch.nn.Linear(hparams[\"input_size\"], 128)\n self.layer_1 = torch.nn.Linear(1000, 128)\n self.layer_2 = torch.nn.Linear(128, 256)\n self.layer_3 = torch.nn.Linear(256, hparams[\"targets\"])", "def build_simple_cnn_text_classifier(\n tok2vec, nr_class, exclusive_classes: bool = ..., **cfg\n):\n ...", "def multi_label_cls_head__post_process(ctx, self, pred, **kwargs):\n return pred", "def build_step(self):\n\n pass" ]
[ "0.6749117", "0.60415906", "0.60092235", "0.5853955", "0.5802875", "0.5693355", "0.5665179", "0.5637631", "0.5628371", "0.5621177", "0.56188375", "0.56188375", "0.56188375", "0.55739766", "0.5559742", "0.55255944", "0.5508331", "0.5490201", "0.5489764", "0.5484875", "0.5467933", "0.546658", "0.54648405", "0.5457147", "0.5408707", "0.54052955", "0.5382675", "0.53710043", "0.53617156", "0.5356519" ]
0.6520812
1
Test data for this task
def test_data(self): return self._test_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTestData(self):\n raise NotImplementedError", "def test_process_data(self):\n pass", "def test_data(self, data):\n print('-'*30)\n print('Starting test: {}'.format(data['name']))\n self.set_resolution(data['resolution']['width'], data['resolution']['height'])\n self.test_actions(data['actions'])\n print('Test finished')\n print('-'*30)", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def test_data(self):\n\n return self.__valid_data, self.__valid_labels", "def _load_test_data(self):\n self._save_test_data()", "def getTestResults():", "def test_data(self):\n if self._test_data is None:\n self._load_test_data()\n if self._swapped_test_data is None:\n self._swapped_test_data = {}\n for key, value in self._test_data.items():\n self._swapped_test_data[key] = value\n return self._swapped_test_data", "def get_test_data(self, topic):\n raise NotImplementedError(\"{} must override step()\".format(self.__class__.__name__))", "def setUpTestData(cls):\n cls.board = Board.objects.create(name = DICT.get('board_name') )\n\n cls.task = Task.objects.create(head = DICT.get('task_head'),\n description = DICT.get('task_description'),\n board = cls.board )", "async def populate_test_data(self):\n async with (await self._get_connection_pool()).acquire() as conn:\n await conn.execute('delete from foglamp.tasks')\n await conn.execute('delete from foglamp.schedules')\n await conn.execute('delete from foglamp.scheduled_processes')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep1', '[\"python3\", \"../scripts/sleep.py\", \"1\"]')''')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep10', '[\"python3\", \"../scripts/sleep.py\", \"10\"]')''')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep30', '[\"python3\", \"../scripts/sleep.py\", \"30\"]')''')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep5', '[\"python3\", \"../scripts/sleep.py\", \"5\"]')''')", "def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)", "def setUp(self):\n self.dataset = get_test_dataset()", "def get_test_data():\n\n # test set\n test = pd.read_csv(\"test.csv\")\n\n return test", "def tests():", "def test_alien_data(self):", "def test_batch(self):\n pass", "def test_preprocessed_data(self):\n self.assertEqual(self.tester.preprocessed_data, [1, 2])", "def get_test_examples(self, data_path):\r\n return self.create_examples(self.read_data(data_path), 'test')", "def getTestSet(self):\r\n return self.fTestData", "def test_data_in_param(self):", "def test(self, dataset) -> None:\n raise NotImplementedError()", "def test_data_manipulation(self):\n target_name = self.project['target']['name']\n self.api_mock.return_value.get_metadata.return_value = [\n {'_id': '0',\n 'pid': '1',\n 'created': datetime.datetime.now(),\n 'name':'universe',\n 'originalName': 'credit-sample-200.csv',\n 'varTypeString': 'NN',\n 'shape': [2, 100],\n 'controls':{},\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}},\n {'_id': '1',\n 'pid': '1',\n 'name':'test',\n 'originalName': 'credit-sample-200.csv',\n 'created': datetime.datetime.now(),\n 'varTypeString': 'NN',\n 'shape': [2, 100],\n 'controls':{},\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}},\n {'_id': '2',\n 'pid': '1',\n 'name':'new',\n 'created': datetime.datetime.now(),\n 'originalName': 'credit-sample-200.csv',\n 'newdata':True,\n 'controls':{},\n 'shape': [2, 100],\n 'varTypeString': 'NN',\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}}]\n request = WorkerRequest({'pid': '1', 'uid': '1', 'dataset_id': '1',\n 'command': 'fit', 'max_reps': 0,\n 'samplepct': 100})\n\n #target\n #this will map the target values to (0,1) because target type is Binary\n target_vector = self.dataprocessor.target_vector()\n target_series = target_vector['main']\n self.assertItemsEqual(np.unique(target_series), [0,1])\n\n #this will be none because 'holdout_pct' isn't set in the project data\n self.assertIsNone(target_vector['holdout'])\n\n #prediction dataset\n predictors = self.dataprocessor.predictors()\n pred_dataframe = predictors['1']['main']\n self.assertItemsEqual(list(pred_dataframe.columns), [\"age\"])\n self.assertEqual(self.dataprocessor.get_vartypestring_without_target('1'), \"N\")\n\n request = WorkerRequest({'pid': '1', 'uid': '1', 'dataset_id': '1', 'scoring_dataset_id': '2', 'command': 'predict', 'max_reps': 0, 'samplepct':100})\n dp2 = DataProcessor(request)\n data = dp2.request_datasets()\n self.assertEqual(data.keys(), ['1'])\n self.assertEqual(data['1'].keys(), ['scoring', 'vartypes'])\n scoring_data = data['1']['scoring']\n vartypes = data['1']['vartypes']\n self.assertEqual(list(scoring_data.columns), [\"age\"])\n self.assertEqual(vartypes, \"N\")", "def test_get_run(self):\n pass", "def runtest(self):", "def test_data():\n current_dir = os.path.dirname(os.path.abspath(__file__))\n test_data_dir = os.path.join(current_dir, \"test_data\")\n\n return pd.read_csv(os.path.join(test_data_dir, \"test_data_6m.csv\"))", "def setUp(self):\n\n self.test_data_path = 'testing/test_data/'" ]
[ "0.77121603", "0.74460846", "0.73213947", "0.7164332", "0.7164332", "0.7164332", "0.7164332", "0.714354", "0.7103978", "0.69678515", "0.68217576", "0.68121225", "0.6750978", "0.67116654", "0.67001045", "0.66699296", "0.6623456", "0.66211283", "0.6617969", "0.6593253", "0.6550207", "0.65255433", "0.6525252", "0.65185773", "0.6505959", "0.65020216", "0.6501809", "0.6496141", "0.6485021", "0.6480122" ]
0.75773656
1
Dataloader type for this task
def dataloader(self): return DataLoader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def get_dataloader(self):\n shuffle = True if self.mode == \"train\" else False\n return DataLoader(self.get_dataset(), batch_size=self.batch_size, shuffle = shuffle, \n collate_fn=create_mini_batch)", "def get_dataloader(self, cid, batch_size=None, type=\"train\"):\n dataset = self.get_dataset(cid, type)\n batch_size = len(dataset) if batch_size is None else batch_size\n data_loader = DataLoader(dataset, batch_size=batch_size)\n return data_loader", "def train_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_train, **self.dl_kwargs)", "def get_data_loader(\n name: str, **kwargs) -> data_loader.DataLoader:\n\n if name == \"synthetic_images\":\n logging.info(\"Creating synthetic image data loader.\")\n return synthetic_image.SyntheticImageDataLoader(**kwargs)\n elif name == \"synthetic_bert\":\n logging.info(\"Creating synthetic bert data loader.\")\n return synthetic_bert.SyntheticBertLoader(**kwargs)\n elif name == \"squad_bert\":\n logging.info(\"Creating SQuAD 1.1 bert data loader.\")\n return squad_bert.SquadBertLoader(**kwargs)\n elif name == \"sentiment_bert\":\n logging.info(\"Creating IMDB sentiment analysis data loader.\")\n return generic_jsonl.GenericJsonlLoader(**kwargs)\n elif name == \"criteo\":\n logging.info(\"Creating Criteo data loader.\")\n return criteo.CriteoLoader(**kwargs)\n elif name == \"generic_jsonl\":\n logging.info(\"Creating generic jsonl file data loader.\")\n return generic_jsonl.GenericJsonlLoader(**kwargs)\n else:\n raise ValueError(\"Unsupported data loader type.\")", "def train_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def val_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_valid, **self.dl_kwargs)", "def test_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_test, **self.dl_kwargs)", "def task_type(self):\n pass", "def test_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def get_train_dataloader(self):\n if self.train_dataloader is not None:\n return self.train_dataloader\n\n assert self.schema is not None, \"schema is required to generate Train Dataloader\"\n return T4RecDataLoader.parse(self.args.data_loader_engine).from_schema(\n self.schema,\n self.train_dataset_or_path,\n self.args.per_device_train_batch_size,\n max_sequence_length=self.args.max_sequence_length,\n drop_last=self.args.dataloader_drop_last,\n shuffle=True,\n shuffle_buffer_size=self.args.shuffle_buffer_size,\n )", "def __get_dataset_type(dataset):\n op_type = None\n if isinstance(dataset, de.ShuffleDataset):\n op_type = OpName.SHUFFLE\n elif isinstance(dataset, de.MindDataset):\n op_type = OpName.MINDRECORD\n elif isinstance(dataset, de.BatchDataset):\n op_type = OpName.BATCH\n elif isinstance(dataset, de.SyncWaitDataset):\n op_type = OpName.BARRIER\n elif isinstance(dataset, de.ZipDataset):\n op_type = OpName.ZIP\n elif isinstance(dataset, de.ConcatDataset):\n op_type = OpName.CONCAT\n elif isinstance(dataset, de.MapDataset):\n op_type = OpName.MAP\n elif isinstance(dataset, de.FilterDataset):\n op_type = OpName.FILTER\n elif isinstance(dataset, de.RepeatDataset):\n op_type = OpName.REPEAT\n elif isinstance(dataset, de.SkipDataset):\n op_type = OpName.SKIP\n elif isinstance(dataset, de.TakeDataset):\n op_type = OpName.TAKE\n elif isinstance(dataset, de.ImageFolderDatasetV2):\n op_type = OpName.IMAGEFOLDER\n elif isinstance(dataset, de.GeneratorDataset):\n op_type = OpName.GENERATOR\n elif isinstance(dataset, de.TransferDataset):\n op_type = OpName.DEVICEQUEUE\n elif isinstance(dataset, de.RenameDataset):\n op_type = OpName.RENAME\n elif isinstance(dataset, de.TFRecordDataset):\n op_type = OpName.TFREADER\n elif isinstance(dataset, de.ProjectDataset):\n op_type = OpName.PROJECT\n elif isinstance(dataset, de.MnistDataset):\n op_type = OpName.MNIST\n elif isinstance(dataset, de.ManifestDataset):\n op_type = OpName.MANIFEST\n elif isinstance(dataset, de.VOCDataset):\n op_type = OpName.VOC\n elif isinstance(dataset, de.Cifar10Dataset):\n op_type = OpName.CIFAR10\n elif isinstance(dataset, de.Cifar100Dataset):\n op_type = OpName.CIFAR100\n elif isinstance(dataset, de.CelebADataset):\n op_type = OpName.CELEBA\n elif isinstance(dataset, de.RandomDataset):\n op_type = OpName.RANDOMDATA\n elif isinstance(dataset, de.TextFileDataset):\n op_type = OpName.TEXTFILE\n else:\n raise ValueError(\"Unsupported DatasetOp\")\n\n return op_type", "def get_dataloader(params, format_name='hdf5', **kwargs):\n \n Provider = get_proper_provider(format_name)(params.modality)\n \n return DataLoader(Provider(params.dataset_path,\n seq_length=params.seq_length),\n batch_size=params.batch_size,\n shuffle=params.is_training,\n num_workers=params.num_workers,\n pin_memory=params.cuda,\n collate_fn=pad_collate)", "def getDataSetType(self):\n return self.__data_set_type__", "def _get_dataloader(samples, batch_size):\n print(\"Cogiendo dataloader\")\n return DataLoader(samples, shuffle=True, batch_size=batch_size)", "def _custom_data_loader(self) -> DataLoader:\n dataloaders = DataLoader(self.dataset, batch_size=1)\n return dataloaders", "def data_loader(self, url, type_of):\n\n data_loader = None\n if type_of == \"csv\":\n data_loader = self.csv\n elif type_of == \"json\":\n data_loader = self.json\n elif type_of == \"parquet\":\n data_loader = self.parquet\n elif type_of == \"avro\":\n data_loader = self.avro\n else:\n RaiseIt.type_error(data_loader, [\"csv\", \"json\", \"parquet\", \"avro\", ])\n\n i = url.rfind('/')\n data_name = url[(i + 1):]\n data_def = {\n \"displayName\": data_name,\n \"url\": url\n }\n return Downloader(data_def).download(data_loader, type_of)", "def get_dataloader(hp: HParams) \\\n -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader, int]:\n if hp.data.dataset == \"podcast\":\n dataset = podcast.PODCAST(root=hp.data.path,\n audio_folder=hp.data.audio_folder,\n text_file=hp.data.text_file)\n length = len(dataset)\n train_length = int(0.9 * length)\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_length,\n length - train_length])\n\n # https://towardsdatascience.com/7-tips-for-squeezing-maximum-performance-from-pytorch-ca4a40951259\n train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n return train_dataloader, test_dataloader, int(0.9 * length)\n\n elif hp.data.dataset == \"librispeech\":\n Path(hp.data.path).mkdir(parents=True, exist_ok=True)\n dataset = librispeech.download_data(root=hp.data.path, url=hp.data.url)\n length = len(dataset)\n train_length = int(0.9 * length)\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_length,\n length - train_length])\n train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n return train_dataloader, test_dataloader, int(0.9 * length)\n\n elif hp.data.dataset == \"ljspeech\":\n Path(hp.data.path).mkdir(parents=True, exist_ok=True)\n dataset = ljspeech.download_data(root=hp.data.path)\n length = len(dataset)\n train_length = int(0.9 * length)\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_length,\n length - train_length])\n train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n\n return train_dataloader, test_dataloader, int(0.9 * length)\n\n else:\n raise Exception(f\"Dataset {hp.data.dataset} does not exist\")", "def get_data_loaders(opt):\n return find_dataloader_using_name(opt.dataloader)(opt).load_data()", "def task_type(cls):\r\n raise NotImplementedError()", "def get_test_dataset_DataLoader(self):\n test_info = self.get_test_DataLoader_info()\n name = test_info[\"name\"]\n task = test_info[\"task\"]\n data_dir = test_info[\"data_dir\"]\n hdf5_file = test_info[\"hdf5_file\"]\n\n data_loader = DataLoader(name, task, data_dir, hdf5_file)\n\n return data_loader, self.dataset, self.data_fields", "def get_loader(config):\n train_transform = [T.Resize((256, 128)), T.RandomHorizontalFlip(), T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]\n train_transform = T.Compose(train_transform)\n\n test_transform = [T.Resize((256, 128)), T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]\n test_transform = T.Compose(test_transform)\n\n # Datasets.\n if config.source_dataset in ['duke'] and config.target_dataset in ['market']:\n source_image_dir = config.duke_image_dir\n target_image_dir = config.market_image_dir\n elif config.source_dataset in ['market'] and config.target_dataset in ['duke']:\n source_image_dir = config.market_image_dir\n target_image_dir = config.duke_image_dir\n else:\n assert 'Dataset not support!'\n source_set = ReidDataset(source_image_dir, train_transform)\n target_set = ReidDataset(target_image_dir, train_transform, config.expanding_cam)\n test_set = ReidDataset(source_image_dir, test_transform)\n\n # Dataloader.\n source_loader = data.DataLoader(dataset=source_set, batch_size=config.batch_size,\n num_workers=config.num_workers, shuffle=True, pin_memory=True, drop_last=True)\n\n target_loader = data.DataLoader(dataset=target_set, batch_size=config.batch_size,\n num_workers=config.num_workers, shuffle=True, pin_memory=True, drop_last=True)\n\n test_loader = data.DataLoader(dataset=test_set, batch_size=config.batch_size, num_workers=config.num_workers,\n shuffle=False, pin_memory=True, drop_last=False)\n\n return {'source_loader': source_loader, 'target_loader': target_loader, 'test_loader': test_loader}", "def type(self) -> 'Data_Type':\n return Data_Type(self._info.htype, self._info.ptype)", "def data_type():\n return DataTypeUtil.getDTypeForName(DataTypeUtil.getDtypeFromContext())", "def create_train_dataloader(configs):\n train_lidar_aug = OneOf([\n Random_Rotation(limit_angle=np.pi / 4, p=1.0),\n Random_Scaling(scaling_range=(0.95, 1.05), p=1.0),\n ], p=0.66)\n train_dataset = KittiDataset(configs, mode='train', lidar_aug=train_lidar_aug, hflip_prob=configs.hflip_prob,\n num_samples=configs.num_samples)\n train_sampler = None\n if configs.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, batch_size=configs.batch_size, shuffle=(train_sampler is None),\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=train_sampler)\n\n return train_dataloader, train_sampler", "def get_loader(file_path, src_word2id, trg_word2id, intent_type=None, intent2index=None, batch_size=1):\n dials = json.load(open(file_path))\n dataset_list = []\n for name in dials.keys():\n val_file = dials[name]\n # build a custom dataset\n dataset = MultiwozSingleDataset(val_file, name, src_word2id, trg_word2id, intent_type, intent2index)\n dataset_list.append(dataset)\n datasets = ConcatDataset(dataset_list)\n # data loader for custome dataset\n data_loader = DataLoader(dataset=datasets,\n batch_size=batch_size,\n shuffle=True,\n num_workers=0,\n collate_fn=collate_fn)\n return data_loader", "async def handle_ledertype(self):\n rows = self._load_csv_if_newer(Ledertype)\n return await self._create_classes_from_csv(Ledertype, rows)", "def task_type(cls):\n raise NotImplementedError()", "def create_loader(dataset: Dataset, cfg: trainer_configs.BaseDatasetConfig, batch_size: int, *,\r\n collate_fn: Optional[Callable[[List[Any]], Any]] = None) -> DataLoader:\r\n # return DataLoader(\r\n # dataset, batch_size=batch_size, num_workers=cfg.num_workers,\r\n # drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r\n return DataLoader(\r\n dataset, batch_size=batch_size, shuffle=cfg.shuffle, num_workers=cfg.num_workers,\r\n drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r", "def data_type(self) -> int:\n return self.data[\"args\"][\"dataType\"]" ]
[ "0.64198226", "0.6303011", "0.62908787", "0.6223448", "0.6176214", "0.6175871", "0.59194773", "0.59120303", "0.5911008", "0.59027666", "0.5870242", "0.584646", "0.5842133", "0.5794101", "0.57517266", "0.574646", "0.56932807", "0.5689548", "0.5668795", "0.5666431", "0.56519496", "0.5641953", "0.55997527", "0.5598908", "0.5585775", "0.5576712", "0.55744284", "0.5558059", "0.5552589", "0.55400634" ]
0.67818266
0
Concatenate two task's datasets
def concatenate_tasks( tasks, concat_train=True, concat_valid=True, concat_test=True, ): new_task = deepcopy(tasks[0]) new_task._name = "+".join(task.name for task in tasks) if concat_train: new_task._train_data = ConcatDataset( [task.train_data for task in tasks]) if concat_valid: new_task._valid_data = ConcatDataset( [task.valid_data for task in tasks]) if concat_test: new_task._test_data = ConcatDataset([task.test_data for task in tasks])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def concatenate_data():", "def concat(self: TAvalancheDataset, other: TAvalancheDataset) -> TAvalancheDataset:\n return self.__class__([self, other])", "def Concat(datasets):\n\n dataset_num = len(datasets)\n dataset = datasets[0]\n for i in range(1, dataset_num):\n dataset.concatenate(datasets[i])\n return dataset", "def merge(self, other):\r\n self._train_datas = np.concatenate(\r\n [self._train_datas, other._train_datas], 0)\r\n self._train_labels = np.concatenate(\r\n [self._train_labels, other._train_labels], 0)", "def concat(cls, pipe1, pipe2):\n # pylint: disable=protected-access\n if pipe1.dataset != pipe2.dataset and pipe1.dataset is not None and pipe2.dataset is not None:\n raise ValueError(\"Cannot add pipelines with different datasets\")\n\n new_p1 = cls.from_pipeline(pipe1)\n new_p2 = cls.from_pipeline(pipe2)\n new_p1._action_list += new_p2._action_list[:]\n new_p1._variables = {**pipe1._variables, **pipe2._variables}\n new_p1.dataset = pipe1.dataset or pipe2.dataset\n return new_p1", "def test_merge_datasets(self):\n disk.merge_datasets(self.input_datasets[0:2], self.output_dataset)\n self.assertEqual(4, len(self.output_dataset.metadata()))", "def __add__(self, other):\n train = copy.deepcopy(self.train)\n\n for img_path, pid, camid, dsetid in other.train:\n pid += self.num_train_pids\n camid += self.num_train_cams\n dsetid += self.num_datasets\n train.append((img_path, pid, camid, dsetid))\n\n ###################################\n # Note that\n # 1. set verbose=False to avoid unnecessary print\n # 2. set combineall=False because combineall would have been applied\n # if it was True for a specific dataset; setting it to True will\n # create new IDs that should have already been included\n ###################################\n if isinstance(train[0][0], str):\n return ImageDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False\n )\n else:\n return VideoDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False,\n seq_len=self.seq_len,\n sample_method=self.sample_method\n )", "def ConcatDF(train_set, test_set):\n return pd.concat([train_set, test_set], sort=True).reset_index(drop=True)", "def merge(self, other):\n from .dataset import Dataset\n\n if other is None:\n return self.to_dataset()\n else:\n other_vars = getattr(other, 'variables', other)\n coords = merge_coords_without_align([self.variables, other_vars])\n return Dataset._from_vars_and_coord_names(coords, set(coords))", "def merge(datasets: Sequence[\"Dataset\"]) -> \"Dataset\":\n ds = datasets[0].copy()\n for dsj in datasets[1:]:\n ds = ds._append_items(dsj, copy=False)\n\n return ds", "def Zip(datasets):\n return tf.data.Dataset.zip(datasets)", "def merge(self, dataset):\n def merge_data(source, dest):\n for key, value in source.items():\n if isinstance(value, dict):\n merge_data(value, dest.setdefault(key, {}))\n else:\n dest[key] = value\n return dest\n\n merge_data(dataset.data, self._data)\n\n for h in dataset.task_history:\n if h not in self._task_history:\n self._task_history.append(h)", "def combine_stack_and_label(filesource_dataset_1,filesource_dataset_2,num_sample):\n\n x = filesource_dataset_1[0]\n x_utterances = len(filesource_dataset_1)\n for idx in tqdm(range(1, x_utterances)):\n x = np.hstack((x, filesource_dataset_1[idx]))\n #print(x.shape)\n y = filesource_dataset_2[0]\n y_utterances = len(filesource_dataset_2)\n for idx in tqdm(range(1, y_utterances)):\n y = np.hstack((y, filesource_dataset_2[idx]))\n X = np.hstack((x,y))\n Y = np.hstack((np.ones((x.shape[1])),np.zeros((y.shape[1]))))\n\n if (X.shape[1] > num_sample):\n idx = np.random.choice(X.shape[1], num_sample)\n X = X[:, idx]\n Y = Y[idx]\n return X, Y", "def merge_datasets(self, other):\r\n if isinstance(other, SpatialDataFrame) and \\\r\n other.geometry_type == self.geometry_type:\r\n return pd.concat(objs=[self, other], axis=0)\r\n elif isinstance(other, DataFrame):\r\n return pd.concat(objs=[self, other], axis=0)\r\n elif isinstance(other, Series):\r\n self['merged_datasets'] = other\r\n elif isinstance(other, SpatialDataFrame) and \\\r\n other.geometry_type != self.geometry_type:\r\n raise ValueError(\"Spatial DataFrames must have the same geometry type.\")\r\n else:\r\n raise ValueError(\"Merge datasets cannot merge types %s\" % type(other))", "def flatten(self, in_place=True):\n new_dataset = TaskData()\n\n for i, dataset in enumerate(self._datasets):\n if i != self._default_index:\n new_dataset.merge(dataset)\n\n new_dataset.merge(self.default_dataset)\n\n # point all aliases to the new, single dataset\n new_aliases = {alias: 0 for alias, _ in self._aliases.items()}\n\n # replace existing datasets or return a new MultiTaskData object\n if in_place:\n self._datasets = [new_dataset]\n self._aliases = new_aliases\n self._default_index = 0\n else:\n return MultiTaskData(dataset=new_dataset, aliases=list(new_aliases.keys()))", "def concat_two_batches(batch1, batch2):\r\n with tf.name_scope('concat_batch'):\r\n if 'y' in batch1 and isinstance(batch1['y'], tf.Tensor):\r\n return {'x': tf.concat([batch1['x'], batch2['x']], axis=0),\r\n 'y': tf.concat([batch1['y'], batch2['y']], axis=0)}\r\n else:\r\n return {'x': tf.concat([batch1['x'], batch2['x']], axis=0)}", "def main():\n file_one_path, file_two_path, output_path =\\\n get_command_line_arguments(\n ['/home/ehler002/project/groups/go/Data/Cluster_Data/Dataset.txt',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/translated_genes.fpkm_table',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/Full_fpkm_Table.txt'])\n pattern = 'CRO_T'\n for file_path in [file_one_path, file_two_path]:\n assert os.path.exists(file_path), 'File %s does not exist.' % file_path\n start_time = datetime.datetime.now()\n print('Started concatenation at %s' % start_time)\n file_contents, headers = get_file_contents(file_two_path)\n file_contents = sort_file_contents(file_contents)\n file_contents = remove_pattern(file_contents, pattern)\n concatenate_files(file_one_path, file_contents, headers, output_path)\n print('Finished concatenation in %s' % (datetime.datetime.now() - start_time))", "def load_data(self, task):\n params = self.params\n data = {splt: {} for splt in ['train', 'valid', 'test']}\n dpath = os.path.join(params.data_path, 'eval', task)\n\n self.n_sent = 1 if task in ['SST-2', 'CoLA'] else 2\n\n for splt in ['train', 'valid', 'test']:\n\n # load data and dictionary\n data1 = load_binarized(os.path.join(dpath, '%s.s1.pth' % splt), params)\n data2 = load_binarized(os.path.join(dpath, '%s.s2.pth' % splt), params) if self.n_sent == 2 else None\n data['dico'] = data.get('dico', data1['dico'])\n\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n if self.n_sent == 2:\n set_dico_parameters(params, data, data2['dico'])\n\n # create dataset\n if self.n_sent == 1:\n data[splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n else:\n data[splt]['x'] = ParallelDataset(\n data1['sentences'], data1['positions'],\n data2['sentences'], data2['positions'],\n params\n )\n\n # load labels\n if splt != 'test' or task in ['MRPC']:\n # read labels from file\n with open(os.path.join(dpath, '%s.label' % splt), 'r') as f:\n lines = [l.rstrip() for l in f]\n # STS-B task\n if task == 'STS-B':\n assert all(0 <= float(x) <= 5 for x in lines)\n y = [float(l) for l in lines]\n # QQP\n elif task == 'QQP':\n UNK_LABEL = 0\n lab2id = {x: i for i, x in enumerate(sorted(set(lines) - set([''])))}\n y = [lab2id.get(x, UNK_LABEL) for x in lines]\n # other tasks\n else:\n lab2id = {x: i for i, x in enumerate(sorted(set(lines)))}\n y = [lab2id[x] for x in lines]\n data[splt]['y'] = torch.LongTensor(y)\n assert len(data[splt]['x']) == len(data[splt]['y'])\n\n # compute weights for weighted training\n if task != 'STS-B' and params.weighted_training:\n weights = torch.FloatTensor([\n 1.0 / (data['train']['y'] == i).sum().item()\n for i in range(len(lab2id))\n ]).npu()\n self.weights = weights / weights.sum()\n else:\n self.weights = None\n\n return data", "def joinDev(training, tLabels, dev, dLabels):\n\tdata = [n.concatenate([t,d]) for t,d in zip(training, dev)]\n\n\treturn data, n.concatenate([tLabels, dLabels])", "def concat(datasets: Sequence[\"Dataset\"], keep=\"last\") -> \"Dataset\":\n\n if keep != \"last\":\n raise NotImplementedError(\n \"Last values is the only available option at the moment.\"\n )\n ds = datasets[0].copy()\n for dsj in datasets[1:]:\n ds = ds._concat_time(dsj, copy=False)\n\n return ds", "def merge_models(model_1, model_2, task=None):\n\n def _merge_models(model_1, model_2):\n\n result_model = copy.deepcopy(model_1)\n\n if isinstance(model_1, torch.nn.Embedding):\n\n result_model = _add_embedding_layer(model_1, model_2)\n\n elif isinstance(model_1, torch.nn.Linear):\n result_model = _add_linear_layer(model_1, model_2)\n\n elif isinstance(model_1, torch.nn.LayerNorm):\n result_model = _add_double_norm_layer(model_1, model_2)\n\n elif isinstance(model_1, BertSelfAttention):\n result_model = _add_bert_self_attention_layer(model_1, model_2)\n\n for name_1, name_2 in zip(model_1._modules, model_2._modules):\n module_1 = model_1._modules[name_1]\n module_2 = model_2._modules[name_2]\n\n result_model._modules[name_1] = _merge_models(module_1, module_2)\n\n return result_model\n\n result_model = _merge_models(model_1, model_2)\n\n result_model._text_field_embedder._token_embedders[\"tokens\"].output_dim = 1024\n\n if task == \"QA\":\n result_model._linear_layer = _add_final_linear_layer(\n model_1._linear_layer, model_2._linear_layer\n )\n else:\n result_model._classification_layer = _add_final_linear_layer(\n model_1._classification_layer, model_2._classification_layer\n )\n\n return result_model", "def combine(self, states, tasks):\n self._assert_is_batched(states, tasks)\n return self._tf_call(self._combine, states, tasks)", "def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)", "def __concatenateB0(self, source1, source2, target):\n cmd = \"mrcat {} {} {} -axis 3 -nthreads {} -quiet\".format(source1, source2, target, self.getNTreadsMrtrix())\n self.launchCommand(cmd)\n return target", "def concat(a, b):\n return torch.cat((a, b), 1)", "def combine_datasources(dset, dset_extra, valid_size=0, shuffle=True, random_seed=2019,\n maxsize=None, device='cpu'):\n if shuffle == True and random_seed:\n np.random.seed(random_seed)\n\n ## Convert both to TensorDataset\n if isinstance(dset, torch.utils.data.DataLoader):\n dataloader_args = {k:getattr(dset, k) for k in ['batch_size', 'num_workers']}\n X, Y = load_full_dataset(dset, targets=True, device=device)\n d = int(np.sqrt(X.shape[1]))\n X = X.reshape(-1, 1, d, d)\n dset = torch.utils.data.TensorDataset(X, Y)\n logger.info(f'Main data size. X: {X.shape}, Y: {Y.shape}')\n elif isinstance(dset, torch.utils.data.Dataset):\n raise NotImplemented('Error: combine_datasources cant take Datasets yet.')\n\n merged_dset = torch.utils.data.ConcatDataset([dset, dset_extra])\n train_idx, valid_idx = random_index_split(len(dset), 1-valid_size, (maxsize, None)) # No maxsize for validation\n train_idx = np.concatenate([train_idx, np.arange(len(dset_extra)) + len(dset)])\n\n if shuffle:\n train_sampler = SubsetRandomSampler(train_idx)\n valid_sampler = SubsetRandomSampler(valid_idx)\n else:\n train_sampler = SubsetSampler(train_idx)\n valid_sampler = SubsetSampler(valid_idx)\n\n train_loader_ext = dataloader.DataLoader(merged_dset, sampler = train_sampler, **dataloader_args)\n valid_loader_ext = dataloader.DataLoader(merged_dset, sampler = valid_sampler, **dataloader_args)\n\n logger.info(f'Fold Sizes: {len(train_idx)}/{len(valid_idx)} (train/valid)')\n\n return train_loader_ext, valid_loader_ext", "def resplit_datasets(dataset, other_dataset, random_seed=None, split=None):\n # Prevent circular dependency\n from torchnlp.datasets import Dataset\n\n concat = dataset.rows + other_dataset.rows\n shuffle(concat, random_seed=random_seed)\n if split is None:\n return Dataset(concat[:len(dataset)]), Dataset(concat[len(dataset):])\n else:\n split = max(min(round(len(concat) * split), len(concat)), 0)\n return Dataset(concat[:split]), Dataset(concat[split:])", "def combine_stats(self, self2):\n if self.covs_ds[\"variable\"] != self2.covs_ds[\"variable\"]:\n raise ValueError(\"Variable names in the two datasets are not the same\")\n\n self.covs_ds[\"num_times\"] += self2.covs_ds[\"num_times\"]\n self.covs_ds[\"sum\"] += self2.covs_ds[\"sum\"]\n self.covs_ds[\"sumsq\"] += self2.covs_ds[\"sumsq\"]\n if 'dstn' in self.covs_ds.dims:\n if self.covs_ds.dims['dstn'] != self2.covs_ds.dims['dstn']:\n raise ValueError(\"Number of distances in the two datasets are not the same\")\n self.covs_ds[self.nam_sumsq_var] += self2.covs_ds[self.nam_sumsq_var]", "def add_dataset(self, task_name, dataset=None, *, aliases=None):\n self._datasets.append(dataset if dataset is not None else TaskData())\n last_index = len(self._datasets) - 1\n self._aliases[task_name] = last_index\n\n if aliases is not None:\n for alias in aliases:\n self._aliases[alias] = last_index\n\n if len(self._datasets) == 1:\n self._default_index = 0", "def join(upstream, product):\n a = pd.read_parquet(str(upstream[\"get\"]))\n b = pd.read_parquet(str(upstream[\"features\"]))\n df = a.join(b)\n df.to_parquet(str(product))" ]
[ "0.685715", "0.6508406", "0.6459176", "0.63558143", "0.63046694", "0.617627", "0.6152155", "0.5977995", "0.58850974", "0.58077186", "0.5803393", "0.57910776", "0.5675971", "0.5674531", "0.5653291", "0.5594489", "0.55606794", "0.55558187", "0.55526024", "0.5524445", "0.5524164", "0.55203766", "0.55157864", "0.55030775", "0.54902935", "0.5480768", "0.54485005", "0.543827", "0.5429617", "0.54236305" ]
0.7752744
0
The exception of ValueError when format was unsupported.
def _raise_format_error(self, name: str, format_str: str, source_format: str): raise ValueError(f"The '{ name }' should be { format_str }, rather than { source_format }")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_format(self):\n raise NotImplementedError()", "def test_old_data_format_error(self):\n assert_raises(ValueError, get_data, self.testv1)", "def _unknown_format(self, format):\n\n raise errors.NotAcceptable('unknown data format: ' + format)", "def test_decode_raises_when_format_unknown(thing):\n with pytest.raises(ValueError):\n decode(thing)", "def test_readbadformat(self):\n\n self.assertRaises(ParseError, self.hw, self.badfile)", "def test_schema_invalid_format(self):\n bad_schema = [int, int, float, float, str]\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)", "def check_dataset_format(ds_format):\n if ds_format.lower() not in DATASET_FORMATS.keys():\n raise ValueError(\"dataset_format is expected to be one of %s. '%s' is not valid\" % (\n ', '.join(DATASET_FORMATS.keys()), ds_format,))", "def test_invalid_from_input_format(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"invalid_from_input_format.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\"Input type has to be a markdown variant.\", message)", "def test_parseTimeInvalidFormat(self):\n self.assertRaises(ValueError, imap4.parseTime, u\"invalid\")", "def test_schema_invalid_type(self):\n bad_schema = -77\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)", "def test_invalid_format(api):\n\twith pytest.raises(top_stories.InvalidFormatType):\n\t\tapi.get_stories(\"home\", \"xml\")", "def test_invalid_reader_input_format(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"invalid_reader_input_format.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\"Input type has to be a markdown variant.\", message)", "def handle_invalid_arguments(e):\n errors = e.message\n return generic_errors(errors, code=400)", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def validateWorkFormat(format):\n\n if not(format):\n return \"You must select a work format.\"", "def unsuported_format(self, msg):\n raise UnsupportedError(self.file.name+\" linker map format not supported by parser:\\n \"+ msg)", "def validate(self):\n if self.params.get(\"format\"):\n if self.params[\"format\"] not in formats:\n raise ValueError(f\"format must be one of {formats}: {self.dt}\")\n for p in self.required:\n if not self.params.get(p):\n raise ValueError(f\"{p} missing: {self.dt}\")", "def _validate_format(self, full_encrypted_value, **options):\n\n if not self.FORMAT_REGEX.match(full_encrypted_value):\n raise InvalidEncryptedValueError('Input value is not a valid '\n '[{current}] encryption value.'\n .format(current=self._get_algorithm()))", "def test_feed_value_throws_on_invalid_data(self):\n self.assertRaises(\n ValueError, self.factory.make_from_feed_value, \"foo\", 1\n )", "def test_parse_date_exceptions(\n test_input: typing.Any,\n expected: Exception,\n):\n with pytest.raises(expected):\n tvmaze.parsers.parse_date(test_input)", "def test_validate_with_invalid_key_format_type(self):\n key_format_type = \"invalid\"\n kwargs = {'key_format_type': key_format_type}\n\n self.assertRaisesRegex(\n TypeError, \"invalid key format type\", Digest, **kwargs)", "def testDataFormatNotSupported(self):\n\n x = tf.constant(0.0, shape=(2, 8, 6))\n data_format = \"WNC\"\n self.assertNotIn(data_format, conv.SUPPORTED_1D_DATA_FORMATS)\n\n with self.assertRaisesRegexp(ValueError, \"Invalid data_format\"):\n snt.Conv1D(output_channels=4, kernel_shape=4, data_format=data_format)(x)", "def test_uss_num_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_uss_num(val))", "def test_invalid_date_format(self):\n date_field = 'expiry_date'\n self.batch_data['expiry_date'] = date_field\n resp = self.query_with_token(\n self.access_token, batch_info_query.format(**self.batch_data))\n self.assertIn(\n 'invalid literal',\n resp['errors'][0]['message'])", "def test_parser_raises_decode_error(self):\n with self.assertRaises(ParseError):\n self.parser.parse(\n stream=BytesIO(b'{\"value\": NaN}'),\n media_type=\"application/json\",\n parser_context={},\n )", "def _raise_value_error(is_gt, tracker, seq):\n if is_gt:\n raise TrackEvalException(\n 'GT data for sequence %s cannot be converted to the right format. Is data corrupted?' % seq)\n else:\n raise TrackEvalException(\n 'Tracking data from tracker %s, sequence %s cannot be converted to the right format. '\n 'Is data corrupted?' % (tracker, seq))", "def _validate_data_format(data_format):\n data_format_ = str(data_format).upper()\n if data_format_ in {'NHWC', 'NCHW'}:\n return data_format_\n raise ValueError(\n 'Argument data_format=\"{}\" not recognized; must be one of '\n '{{\"NHWC\", \"NCHW\"}} (case insensitive).'.format(data_format))", "def _raise_argument_validation_exception(typedef, value, detail, expected_tokens=None):\n typedef_name = typedef.get('help-name')\n if typedef_name is None:\n typedef_name = typedef.get('name')\n if typedef_name is None:\n typedef_name = typedef.get('field')\n if typedef_name is None:\n typedef_name = '<unknown-type>'\n if detail is None:\n detail = ''\n validation_error_format = typedef.get('validation-error-format',\n 'Invalid %(typedef)s: %(value)s; %(detail)s')\n validation_error = (validation_error_format %\n {'typedef': typedef_name, 'value': str(value), 'detail': detail})\n raise error.ArgumentValidationError(validation_error, expected_tokens)", "def _FormatException(exc):\n return ''.join(traceback.format_exception_only(type(exc), exc))", "def test_single_specifier_missing(self):\n template = 'missing'\n value_count = 1\n msg = 'The formatter should contain one \"{}\" specifier.'\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)" ]
[ "0.7590254", "0.72390485", "0.721036", "0.6805415", "0.6779957", "0.6535371", "0.6451091", "0.6289927", "0.620279", "0.6175287", "0.61463153", "0.6133835", "0.60960364", "0.6054621", "0.6033631", "0.60062885", "0.6004486", "0.5990601", "0.59883577", "0.5986072", "0.5967362", "0.59346", "0.5932319", "0.59104335", "0.59084284", "0.5900029", "0.5897594", "0.58964026", "0.5889684", "0.58832926" ]
0.7319483
1
The train iterator that executes a standard training flow per batch.
def _train_batch(self): # start epoch for i, (source, target) in enumerate(self.train_dataset): result = self._batch_iter(source, target, i) # yield yield result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_train_iterator(self) -> Iterable[Batch]:\n if self._train_name not in self._datasets:\n raise ValueError(\"Training data not provided.\")\n return self.get_iterator(self._train_name)", "def train_batch_iter(self, batch_size, num_epochs):\n return self.batch_iter(0, batch_size, num_epochs)", "def get_train_iterator(self) -> tf.contrib.data.Iterator:\n return self.train.make_initializable_iterator()", "def train(self, batch_training=False):\n raise NotImplementedError", "def run_train_iter(self, session, batch, summary_writer):\n # Match up our input data with the placeholders\n input_feed = {}\n input_feed[self.context_ids] = batch.context_ids\n input_feed[self.context_mask] = batch.context_mask\n input_feed[self.qn_ids] = batch.qn_ids\n input_feed[self.qn_mask] = batch.qn_mask\n input_feed[self.ans_ids] = batch.ans_ids\n input_feed[self.ans_mask] = batch.ans_mask\n input_feed[self.keep_prob] = 1.0 - self.FLAGS.dropout # apply dropout\n\n # if not use raw graph tokens\n if not self.FLAGS.use_raw_graph:\n input_feed[self.context_embedding] = batch.context_embeddings\n\n # output_feed contains the things we want to fetch.\n output_feed = [self.updates, self.summaries, self.loss, self.global_step, self.param_norm, self.gradient_norm, self.dev_loss]\n\n # Run the model\n [_, summaries, loss, global_step, param_norm, gradient_norm, dev_loss] = session.run(output_feed, input_feed)\n\n # All summaries in the graph are added to Tensorboard\n summary_writer.add_summary(summaries, global_step)\n\n return loss, global_step, param_norm, gradient_norm, dev_loss", "def train(self, batch):\n pass", "def train(self, train_iter_fct, train_steps):\n logger.info('Start training...')\n\n # step = self.optim._step + 1\n step = self.optim._step + 1\n true_batchs = []\n accum = 0\n normalization = 0\n train_iter = train_iter_fct()\n\n total_stats = Statistics()\n report_stats = Statistics()\n self._start_report_manager(start_time=total_stats.start_time)\n\n while step <= train_steps:\n\n reduce_counter = 0\n for i, batch in enumerate(train_iter):\n if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):\n\n true_batchs.append(batch)\n normalization += batch.batch_size\n accum += 1\n if accum == self.grad_accum_count:\n reduce_counter += 1\n if self.n_gpu > 1:\n normalization = sum(distributed\n .all_gather_list\n (normalization))\n\n self._gradient_accumulation(\n true_batchs, normalization, total_stats,\n report_stats)\n report_stats = self._maybe_report_training(\n step, train_steps,\n self.optim.learning_rate,\n report_stats)\n\n true_batchs = []\n accum = 0\n normalization = 0\n if step % self.save_checkpoint_steps == 0 and self.gpu_rank == 0:\n self._save(step)\n\n step += 1\n if step > train_steps:\n break\n train_iter = train_iter_fct()\n\n return total_stats", "def train(self, num_batches: int):", "def train_epoch(self):\n for it in range(self.iter_per_epoch):\n # Get batch\n xs, _ = self.mnist.train.next_batch(100)\n _, loss, summary = self.sess.run([self.train_op, self.loss, self.summary_op],\n {self.x: xs})\n self.summary_writer.add_summary(summary, it)\n if it % 1000 == 0:\n print('Iteration {}\\t loss: {}'.format(it, loss))", "def train_step(self, iterator_map):\r\n\r\n def step_fn(inputs):\r\n losses = self.multi_task.joint_train_step(\r\n inputs,\r\n multi_task_model=self.multi_task_model,\r\n optimizer=self.optimizer,\r\n task_metrics=self.training_metrics)\r\n for key, loss in losses.items():\r\n self.training_losses[key].update_state(loss)\r\n\r\n self.strategy.run(\r\n step_fn, args=(tf.nest.map_structure(next, iterator_map),))\r\n self.global_step.assign_add(1)", "def train_batch(self, data, num_iteration, verbose=False):\n self.train(data, num_iteration, random_order=False, verbose=verbose)", "def _train(trainer, train_data, batcher_fn, total_batch_steps = 5, seed = 1):\n for i in range(total_batch_steps):\n torch.manual_seed(seed)\n set_seed(seed)\n data, targets = batcher_fn(train_data, i*35)\n trainer.train_step(data, targets)", "def iter_batch(self):\n\n # model initialization\n self._set_train()\n\n if not self.batch_process:\n self.batch_process = self._train_batch()\n return self.batch_process.__next__()\n else:\n try:\n return self.batch_process.__next__()\n except StopIteration:\n # update the state if StopIteration\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Average loss: { self.epoch_loss.avg }\")\n\n # update epoch and reset the epoch_loss\n self.epoch_loss.reset()\n self.epoch += 1\n\n # reset the batch process\n del self.batch_process\n self.batch_process = self._train_batch()\n return self.batch_process.__next__()", "def _train(self):\n step = 0\n for epoch in range(self.opts.num_epochs):\n self.hub.publish(Topic.EPOCH, epoch)\n for i, data in enumerate(self.loader):\n # Compute loss ...\n # NOTE(ycho): if one of the callbacks require training loss,\n # e.g. for logging, simply register a hook to the loss module\n # rather than trying to extract them here.\n loss = self.loss_fn(self.model, data)\n self.hub.publish(Topic.TRAIN_LOSS, loss)\n\n # Backprop + Optimize ...\n self.optim.zero_grad()\n loss[\"total\"].backward()\n self.optim.step()\n\n # Emit `step` event.\n # == logging, saving, evaluation\n self.hub.publish(Topic.STEP, step)\n step += 1\n\n if step >= self.opts.train_steps:\n return", "def iter_epoch(self):\n\n # set to train mode\n self._set_train()\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n self._batch_iter(source, target, i)\n\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Average loss: { self.epoch_loss.avg }\")\n\n # update epoch and reset the epoch_loss\n self.epoch_loss.reset()\n self.epoch += 1", "def on_train_forward(self, runner):\n self.on_iter_forward(runner)", "def train(self, session, *args, train_data_iterator=None,\n dev_data_iterator=None, **kwargs):\n\n raise NotImplementedError(\"Implement train() method\")", "def train_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def train(self, training_steps=10):", "def train(self):\r\n\r\n for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):\r\n self.train_epoch(cur_epoch)\r\n self.model.global_step_assign_op.eval(session=self.sess, feed_dict={\r\n self.model.global_step_input: self.model.global_step_tensor.eval(self.sess) + 1})", "def train(self, data_iterator):\n \n if self.config['sequence_input']:\n if self.config['net_input_add_onehot']:\n input_data_ph = tf.placeholder(tf.uint8, shape=(self.config['batch_size'], self.config['timesteps']))\n else:\n input_data_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['timesteps'], self.config['num_input']))\n else:\n input_data_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['num_input']))\n \n if self.config['sequence_output']:\n if self.config['net_target_add_onehot']:\n target_ph = tf.placeholder(tf.uint8, shape=(self.config['batch_size'], self.config['timesteps']))\n else:\n target_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['timesteps'], self.config['num_output']))\n else:\n target_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['num_output']))\n \n training, loss_avg_t = self.setup_train(input_data_ph, target_ph)\n \n session = tf.Session()\n session.run(tf.global_variables_initializer())\n \n self.analyze_config()\n \n for epoch in range(self.config['epochs']):\n starttime = time.time()\n for step in range(self.config['epoch_steps']):\n input_data, target = next(data_iterator)\n tmp, loss_avg_value = session.run([training, loss_avg_t], {input_data_ph:input_data, target_ph:target})\n print(\"Epoch: {} Loss: {} Elapsed:{}s\".format(epoch, loss_avg_value, (time.time() - starttime)))", "def trainer(model,\n optimizer,\n dataset,\n count_of_epoch=5,\n batch_size=64,\n callback=None,\n progress=None):\n iterations = range(count_of_epoch)\n\n if progress is not None:\n iterations = progress(iterations)\n\n for it in iterations:\n\n batch_generator = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True)\n\n train_epoch(\n \tmodel=model,\n train_generator=batch_generator,\n optimizer=optimizer,\n callback=callback)\n\n return", "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def __iter__(self):\n for batch in self.iterator:\n yield Batch.from_iterator_batch(batch, self.pad_index, self.sos_index, self.eos_index)", "def _run_one_training_iteration(self) -> Tuple[ResultDict, \"TrainIterCtx\"]:\n # In case we are training (in a thread) parallel to evaluation,\n # we may have to re-enable eager mode here (gets disabled in the\n # thread).\n if self.config.get(\"framework\") == \"tf2\" and not tf.executing_eagerly():\n tf1.enable_eager_execution()\n\n results = None\n # Create a step context ...\n with TrainIterCtx(algo=self) as train_iter_ctx:\n # .. so we can query it whether we should stop the iteration loop (e.g.\n # when we have reached `min_time_s_per_iteration`).\n while not train_iter_ctx.should_stop(results):\n # Try to train one step.\n # TODO (avnishn): Remove the execution plan API by q1 2023\n with self._timers[TRAINING_ITERATION_TIMER]:\n if self.config._disable_execution_plan_api:\n results = self.training_step()\n else:\n results = next(self.train_exec_impl)\n\n # With training step done. Try to bring failed workers back.\n self.restore_workers(self.workers)\n\n return results, train_iter_ctx", "def train__iter__(self):\n\n # create worker-specific random number generator\n rng = create_rng_for_worker(self.model.current_epoch)\n\n while True:\n\n # select one file at random (with probability proportional to its annotated duration)\n file, *_ = rng.choices(\n self._train,\n weights=[f[\"duration\"] for f in self._train],\n k=1,\n )\n\n # select one annotated region at random (with probability proportional to its duration)\n segment, *_ = rng.choices(\n file[\"annotated\"],\n weights=[s.duration for s in file[\"annotated\"]],\n k=1,\n )\n\n # select one chunk at random (with uniform distribution)\n start_time = rng.uniform(segment.start, segment.end - self.duration)\n chunk = Segment(start_time, start_time + self.duration)\n\n X, one_hot_y, _ = self.prepare_chunk(file, chunk, duration=self.duration)\n\n y = self.prepare_y(one_hot_y)\n\n yield {\"X\": X, \"y\": y}", "def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )", "def step_train(self, max_iter):\n nzx, nzy = self.trn_graph.nonzero()\n n = len(self.trn_x_index)\n n_pos = len(nzx)\n for _ in range(max_iter):\n Y_pred, loss, grad_norm = self.train_fn(self.gX, self.hX, self.sym_g, self.sym_h,\n self.trn_graph, self.trn_x_index, self.trn_y_index)\n return Y_pred, loss, grad_norm", "def train(self):\n for i in xrange(self.num_steps):\n if c.ADVERSARIAL:\n # update discriminator\n batch = get_train_batch()\n print 'Training discriminator...'\n self.d_model.train_step(batch, self.g_model)\n\n # update generator\n batch = get_train_batch()\n print 'Training generator...'\n self.global_step = self.g_model.train_step(\n batch, discriminator=(self.d_model if c.ADVERSARIAL else None))\n\n # save the models\n if self.global_step % c.MODEL_SAVE_FREQ == 0:\n print '-' * 30\n print 'Saving models...'\n self.saver.save(self.sess,\n c.MODEL_SAVE_DIR + 'model.ckpt',\n global_step=self.global_step)\n print 'Saved models!'\n print '-' * 30\n\n # test generator model\n if self.global_step % c.TEST_FREQ == 0:\n self.test()", "def train(self, epoch=50):\n # self.history = self.model.fit(self.train_images,\n # self.train_labels,\n # epochs=epoch,\n # validation_data=(self.test_images, self.test_labels))\n datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1,\n horizontal_flip=True)\n # prepare iterator\n it_train = datagen.flow(self.train_images, self.train_labels, batch_size=64)\n # fit model\n steps = int(self.train_images.shape[0] / 64)\n self.history = self.model.fit_generator(it_train, steps_per_epoch=steps,\n epochs=epoch,\n validation_data=(self.test_images,\n self.test_labels),\n verbose=1)\n # evaluate model\n _, acc = self.model.evaluate(self.test_images, self.test_labels, verbose=0)\n LOGGER.info('> %.3f' % (acc * 100.0))\n self.summarize_diagnostics()" ]
[ "0.7524614", "0.7506505", "0.7341301", "0.7287756", "0.7240162", "0.71176016", "0.7104605", "0.70856947", "0.70841146", "0.6950423", "0.6944791", "0.6935007", "0.6922542", "0.6916224", "0.6903519", "0.68901885", "0.6874518", "0.68721944", "0.6839896", "0.68311983", "0.68162155", "0.6801768", "0.6787648", "0.6771482", "0.67539996", "0.6732291", "0.67137545", "0.67128164", "0.67096996", "0.6708402" ]
0.79244447
0
Reset the process of training, which includes the loss meter reset, epoch reset and model's weights reset.
def reset_train(self): self.model.apply(self._reset_weights) self.epoch_loss.reset() self.epoch = 0 del self.batch_process self.batch_process = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n self.train_loss.reset_states()\n self.train_accuracy.reset_states()\n self.val_loss.reset_states()\n self.val_accuracy.reset_states()\n self.train_mIoU.reset_states()\n self.val_mIoU.reset_states()", "def train_loop_begin(self):\r\n for _, train_loss_metric in self.training_losses.items():\r\n train_loss_metric.reset_states()\r\n\r\n for _, metrics in self.training_metrics.items():\r\n for metric in metrics:\r\n metric.reset_states()", "def reset(self):\n self.epochs = 0\n # Shuffle the training data\n perm = np.arange(self.num_train)\n np.random.shuffle(perm)\n assert self.num_train == self.train_images.shape[\n 0], 'Error incorrect shuffling mask'\n self.train_images = self.train_images[perm]\n self.train_labels = self.train_labels[perm]\n self.curr_train_index = 0", "def reset(self):\n checkpoint = torch.load(\n 'model_lr_finder.pth.tar',\n map_location=self.device)\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.model.to(self.device)\n self.model.train()", "def reset(self):\n self.pred_classes.clear()\n self.gold_classes.clear()\n self.pred_probas.clear()\n self.gold_probas.clear()\n self.loss = 0\n self.nb_batches = 0\n self.prec_rec_f1 = None\n self.acc = None\n self.mcc = None", "def reset(self):\n self.loss = []\n self.funcargs = []\n self.nSteps = 0 \n self.converged = False", "def reset(self):\n self.loss = 0\n self.cnt = 0", "def reset(self):\n self.acc_loss = 0\n self.norm_term = 0", "def reset_training(self):\n self.policy_optim = Adam(self.policy.parameters(), lr=self.lr)\n self.q_optim = Adam(self.q_net.parameters(), lr=self.lr)\n\n self.alpha_optim = Adam([self.log_alpha], lr=1e-2)", "def reset(self) -> None:\n self.best = self.mode_worse\n self.cooldown_counter = 0\n self.num_bad_epochs = 0", "def reset(self) -> None:\n self.is_run = False\n self.env_step = 0\n if self.resume_from_log:\n self.start_epoch, self.env_step, self.gradient_step = \\\n self.logger.restore_data()\n\n self.last_rew, self.last_len = 0.0, 0\n self.start_time = time.time()\n if self.train_collector is not None:\n self.train_collector.reset_stat()\n\n if self.train_collector.policy != self.policy:\n self.test_in_train = False\n elif self.test_collector is None:\n self.test_in_train = False\n\n if self.test_collector is not None:\n assert self.episode_per_test is not None\n assert not isinstance(self.test_collector, AsyncCollector) # Issue 700\n self.test_collector.reset_stat()\n test_result = test_episode(\n self.policy, self.test_collector, self.test_fn, self.start_epoch,\n self.episode_per_test, self.logger, self.env_step, self.reward_metric\n )\n self.best_epoch = self.start_epoch\n self.best_reward, self.best_reward_std = \\\n test_result[\"rew\"], test_result[\"rew_std\"]\n if self.save_best_fn:\n self.save_best_fn(self.policy)\n\n self.epoch = self.start_epoch\n self.stop_fn_flag = False\n self.iter_num = 0", "def reset(self):\n self.reset_image_estimate()\n self.init_m_aux()\n self.reset_hessian_and_bias()\n self.reset_adadelta_variables()", "def reset_training_data(self):\n logger.info(\"resetting training data\")\n if self.shuffle:\n random.shuffle(self.tweets)\n self.batch_generator = self.get_batch()", "def reset(self):\n \n s = self\n s.step_counter = 0\n \n # TODO: initialize first layer activations here, and not everywhere else\n # self.model.initialize_local_vars()\n # self.model.initialize_global_vars()\n\n ops = []\n\n for var in self.model.trainable_vars:\n if self.needs_correction(var):\n A_svd = s[var].A.svd\n B2_svd = s[var].B2.svd \n ops.extend(A_svd.init_ops)\n ops.extend(B2_svd.init_ops)\n ops.append(s[var].A.cov.initializer)\n ops.append(s[var].B2.cov.initializer)\n\n # in new TensorFlow this breaks, probably because of\n # https://github.com/tensorflow/tensorflow/commit/07adc2ea910de715d31e16a019fcbcccb575e931\n # sometimes get \"need to feed\" placeholder error\n # sometimes do not get this error, but spend two minutes inside\n # _build_initializer_expr\n s.run(ops)", "def reset(self):\n for i in range(0, len(self.current_state)):\n self.current_state[i] = 0\n\n for i in range(0, len(self.weights)):\n self.weights[i] = 0", "def _reset(self):\n self.loss_history = []\n self.optim_configs = {}\n for p in self.model.params:\n d = {k: v for k, v in self.optim_config.items()}\n self.optim_configs[p] = d", "def reset(self):\n\n self.scaler = None\n self.isFitted = False\n self.__create_scaler()", "def reset(self, runs):\n\n self.answer_wrong = 0\n self.answer_right = 0\n self.train_new(runs)", "def shutdown_training(self):\n\n self._train_data_set = None\n self._test_data_set = None", "def reset(self):\n self.clean_cache_upstream()\n self.set_mode_train()\n for step_obj in self.all_upstream_steps.values():\n step_obj.is_fittable = DEFAULT_TRAINING_SETUP['is_fittable']\n step_obj.force_fitting = DEFAULT_TRAINING_SETUP['force_fitting']\n step_obj.persist_output = DEFAULT_TRAINING_SETUP['persist_output']\n step_obj.cache_output = DEFAULT_TRAINING_SETUP['cache_output']\n step_obj.load_persisted_output = DEFAULT_TRAINING_SETUP['load_persisted_output']\n logger.info('Step {}, reset all upstream Steps to default training parameters, '\n 'including this Step'.format(self.name))\n return self", "def reset_train_results(self):\n self.train_loss_results = {}\n self.train_accuracy_results = {}\n self.train_pred_results = {}", "def _reset(self):\n self.use_gpu = torch.cuda.is_available()\n if self.use_gpu:\n self.model = self.model.cuda()\n self.hist_train_psnr = []\n self.hist_val_psnr = []\n self.hist_loss = []", "def reset(self):\n self.pred = None\n self.target = None", "def reset(self):\n self.pred = None\n self.target = None", "def _reset(self):\n self._model._reset()\n super(RDPAnalyzer, self)._reset()", "def reset(self):\n # Attempt to reset data loader\n self.data_loader_iter = iter(self.data_loader)\n self.num_batches = 0\n\n # Make sure calibrator will check the cache again when reset.\n self.cache_contents = None", "def reset(self):\n self.epochs = 0\n self.num_classes = 2 # Minimum of 2 classes\n self._random_state = check_random_state(self.random_state)\n if self.base_estimators:\n self.experts = [\n self.WeightedExpert(\n cp.deepcopy(be), 1, self.labels)\n for be in self.base_estimators\n ]\n else:\n self.experts = [\n self._construct_new_expert()\n ]", "def _reset(self):\n np.random.shuffle(self.id)\n self.episode_step = 0 # Reset episode step counter at the end of every episode\n self._state = self.X_train[self.id[self.episode_step]]\n self._episode_ended = False\n\n return ts.restart(self._state)", "def reset(self):\n inv_perm = np.argsort(self._current_order)\n self._current_order = self._current_order[inv_perm]\n self.inputs = self.inputs[inv_perm]\n self.targets = self.targets[inv_perm]\n self.target_ids = self.target_ids[inv_perm]\n self.new_epoch()", "def reset_states(self):\n self.mean_makespan_baseline.assign(0)\n self.mean_makespan_train.assign(0)\n self.step.assign(0)" ]
[ "0.80868524", "0.7918954", "0.7916748", "0.7766766", "0.7735061", "0.76176995", "0.7499007", "0.7421006", "0.7351169", "0.72698146", "0.7228589", "0.7210134", "0.71855277", "0.70681477", "0.701447", "0.6989159", "0.69768757", "0.6957746", "0.6951352", "0.6948265", "0.69307107", "0.6917882", "0.69028765", "0.69028765", "0.6899475", "0.68962693", "0.6867698", "0.68386054", "0.68189144", "0.6818599" ]
0.9016183
0
UiView of sett module
def ui_view(request): return render(request, 'sett_ui_view.html', {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ui(self):\n return ui", "def init_ui(self):\n raise NotImplementedError", "def init_ui(self):\n raise NotImplementedError", "def prepare_UI(self):", "def view(self):", "def show(self):", "def updateSettingsUI(self):\n\n pass", "def __init__(self):\n self.view = GuiView(self)\n return", "def init_ui(self):\n raise NotImplementedError(\"This is an abstract method.\")", "def getWidget(self):", "def init_ui(self):\n # Create GUI elements, set them in dict structure\n labelwidth = 150\n\n # Add parameter line edit for Factor Tm to Tp\n\n # Add line edit with browsebutton for swan result folder\n self.input_elements['hares folder'] = widgets.ExtendedLineEdit(\n label='HARES uitvoerbestanden folder:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_hares_folder)\n )\n\n\n self.setLayout(QtWidgets.QVBoxLayout())\n self.layout().setSpacing(10)\n\n for _, item in self.input_elements.items():\n self.layout().addWidget(item)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.layout().addWidget(line)\n\n # OK and Cancel buttons\n self.generateButton = QtWidgets.QPushButton('Start lezen uitvoerbestanden')\n self.generateButton.setDefault(True)\n self.generateButton.clicked.connect(self.generate)\n\n self.cancelButton = QtWidgets.QPushButton('Annuleren')\n self.cancelButton.setAutoDefault(False)\n self.cancelButton.clicked.connect(self.cancel)\n\n button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal, self)\n button_box.addButton(self.generateButton, QtWidgets.QDialogButtonBox.ActionRole)\n button_box.addButton(self.cancelButton, QtWidgets.QDialogButtonBox.RejectRole)\n button_box.accepted.connect(QtWidgets.QDialog.accept)\n\n self.layout().addWidget(button_box)", "def setup_additional_ui(self):\n\n #set title\n self.setWindowTitle(self.title)\n\n #set question\n self.lbl_question.setText(self.question)\n\n #set_remember_choice\n self.set_remember_choice(self.chkbx_remember_choice.isChecked())", "def ui(self):\n return self._ui", "def view(self):\n raise NotImplementedError", "def additional_ui(self):\n return _UI_DEF", "def __init__(self, parent=None):\n super(Representative, self).__init__(parent)\n self.setupUi(self)", "def inicialUI(self):\r\n\r\n self.setGeometry(500, 500, 500, 500)\r\n self.setWindownTitle(\"Pesquisa\")\r\n self.displayWidgets()\r\n\r\n self.show()", "def gui(self):\n return gui", "def show(self) -> None:", "def ui(self, ui):\n\n self._ui = ui", "def set_ui(self):\n\n self.setLayout(self.horizon_layout)\n self.setWindowTitle(\"数据采集\")\n self.setWindowIcon(self.Icon)\n self.setWindowState(Qt.WindowMaximized)\n # self.resize(self._size_of_x, self._size_of_y)\n\n # //-set left\n self.horizon_left_layout1.addWidget(self.ECG)\n self.horizon_left_layout1.addWidget(self.ECGWin)\n self.horizon_left_layout2.addWidget(self.Respiration)\n self.horizon_left_layout2.addWidget(self.RespirationWin)\n self.horizon_left_layout3.addWidget(self.PulseWave)\n self.horizon_left_layout3.addWidget(self.PulseWaveWin)\n # self.horizon_left_layout4.addWidget(self.SpO2)\n # self.horizon_left_layout4.addWidget(self.SpO2Win)\n\n # self.vertical_left_layout.addStretch(1)\n self.vertical_left_layout.addLayout(self.horizon_left_layout1)\n # self.vertical_left_layout.addStretch(1)\n self.vertical_left_layout.addLayout(self.horizon_left_layout2)\n # self.vertical_left_layout.addStretch(1)\n self.vertical_left_layout.addLayout(self.horizon_left_layout3)\n # self.vertical_left_layout.addStretch(1)\n # self.vertical_left_layout.addLayout(self.horizon_left_layout4)\n # self.vertical_left_layout.addStretch(1)\n\n # //-set right\n # self.vertical_right_layout.addStretch(1)\n self.vertical_right_layout.addWidget(self.save)\n self.vertical_right_layout.addWidget(self.clear)\n self.vertical_right_layout.addWidget(self.receive)\n self.vertical_right_layout.addStretch(1)\n self.vertical_right_layout.addWidget(self.exit)\n # self.vertical_right_layout.addStretch(1)\n\n # //-set layout\n # self.horizon_layout.addStretch(0)\n self.horizon_layout.addLayout(self.vertical_left_layout)\n # self.horizon_layout.addStretch(0)\n # self.horizon_layout.addWidget(self.dataWin)\n self.horizon_layout.addLayout(self.vertical_right_layout)", "def _ui_module(self, name, module):\n raise NotImplementedError()", "def init_ui(self):\n self.parent.title(\"Roku Player Controller\")\n self.style.theme_use(\"default\")", "def __init__(self):\r\n super().__init__()\r\n self.init_ui()", "def show(self):\n pass", "def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()", "def setUp(self):\n self.ui = UI()", "def show(self):\n\n pass", "def toControls(self,widget):", "def setup(self):\n self.ui.setup_window()" ]
[ "0.7031982", "0.6457756", "0.6457756", "0.6440338", "0.64161724", "0.63023686", "0.6291685", "0.6243074", "0.61494553", "0.61151314", "0.6093119", "0.6090017", "0.6085131", "0.60317594", "0.60222656", "0.60190624", "0.60181403", "0.6016506", "0.5963193", "0.59621996", "0.5954504", "0.59279686", "0.5922433", "0.5894824", "0.5876441", "0.5863867", "0.58485997", "0.5809992", "0.5808063", "0.5794273" ]
0.7076183
0
Calls open file dialog, possible to choose only '.xlsx .xls .xlsm .xlsb'
def callDialog(self): self.pathTuple = filedialog.askopenfilenames(filetypes=[("Excel files", ".xlsx .xls .xlsm .xlsb")]) self.fileNames = [basename(path.abspath(name)) for name in self.pathTuple]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_file():\n filepath = filedialog.askopenfilename(initialdir = \"./\",title = \"Seleccionar archivo\",filetypes = ((\"xls files\",\"*.xls\"),(\"xlsx files\",\"*.xlsx\")))\n if not filepath:\n return\n\n window.title(filepath)\n lbl_url[\"text\"] = filepath\n btn_generate['state'] = 'normal'", "def open_file_dialog(self, title, initial_directory=None, file_types=None, multiselect=False):\n return self._impl.open_file_dialog(title, initial_directory, file_types, multiselect)", "def on_open_file(self):\n return tkFileDialog.askopenfilename(\n filetypes=[('default', '*.txt'), ('All files', '*.*')])", "def fileDialog(*args, application: bool=True, defaultFileName: AnyStr=\"\", directoryMask:\n AnyStr=\"\", mode: int=0, title: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def OpenFileExcel(self, *args, **kwargs):\n directory = None\n if kwargs is not None:\n for key, value in kwargs.items():\n if key == 'directory':\n directory = value\n\n\n\n with wx.FileDialog(self, \"Open report file\", directory,\n wildcard=\"excel files (*.xlsx)|*.xlsx|(*.xls)|*.xlsx|(*.csv)|*.csv\",\n style=wx.FD_OPEN) as fileDialog:\n \n if fileDialog.ShowModal() == wx.ID_CANCEL:\n return \n\n\n else:\n\n pathname = fileDialog.GetPath()\n print('the file to be opened is :'+ pathname)\n\n def openWorkbook(xlapp, xlfile):\n try:\n xlwb = xlapp.Workbooks(xlfile)\n except Exception as e:\n try:\n xlwb = xlapp.Workbooks.Open(xlfile)\n except Exception as e:\n print(e)\n xlwb = None\n return (xlwb)\n\n pathname = os.path.normcase(pathname)\n\n\n try:\n excel = win32.gencache.EnsureDispatch('Excel.Application')\n wb = openWorkbook(excel, pathname)\n #ws = wb.Worksheets('Sheet1')\n excel.Visible = True\n except Exception as e:\n print(e)\n\n finally:\n # RELEASES RESOURCES\n ws = None\n wb = None\n excel = None", "def ask_file(message=\"Select file for open.\", title=None):\n return dialog(\"ask_file\", message=message, title=title)", "def choose_file():\r\n import tkinter\r\n from tkinter import filedialog\r\n\r\n root_window = tkinter.Tk()\r\n root_window.withdraw()\r\n\r\n return filedialog.askopenfilename()", "def open_file(self: object) -> None:\n self.file = filedialog.askopenfilename(\n initialdir= os.getcwd(),title=\"Select File\",filetypes=(\n (\"Text Files\", \"*.txt\"),(\"all files\",\"*.*\")))\n\n if self.file:\n messagebox.showinfo(\"Selected file\", \"You have selected %s\"%(\n self.file))", "def file_to_open(self, title='Open file..', initial_folder=None, extension=\"All files (*.*)\", datafolder=None):\n pass", "def open_fileDialog(self):\n\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getOpenFileName(self, \"Открыть исходный файл\", os.path.expanduser(\"~\"),\n \"XML Файлы (*.xml);;JSON Файлы (*.json)\", options=options)\n if fileName:\n file_format = fileName.split('.')[1]\n if file_format == 'xml':\n self.data_from_xml(fileName)\n elif file_format == 'json':\n self.data_from_json(fileName)\n self.msg2Statusbar.emit('Импорт из файла {0}'.format(fileName))", "def menu_Open():\n asdf = tkFileDialog.askopenfilename()\n print(asdf)", "def request_file():\n \n from tkinter import Tk\n from tkinter.filedialog import askopenfilename\n \n # Make a top-level instance and hide from user.\n root = Tk()\n root.withdraw()\n\n # Make it almost invisible - no decorations, 0 size, top left corner.\n root.overrideredirect(True)\n root.geometry('0x0+0+0')\n\n # Show window again and lift it to top so it can get focus, otherwise dialogs will end up behind the terminal.\n root.deiconify()\n root.lift()\n root.focus_force()\n\n # Show an \"Open\" dialog box and return the path to the selected file\n file_path = askopenfilename(initialdir='./IR_Datasets/',\n title='Excel to Read',\n filetypes=(('New Excel', '*xlsx'), ('Old Excel', '*.xls')),\n parent=root)\n\n # Get rid of the top-level instance once to make it actually invisible.\n root.destroy()\n \n return file_path", "def input_file(self):\r\n try:\r\n f = tkFileDialog.askopenfilename(parent=self.top, initialdir=\"/home/marcin/pulpit/Py/\",\r\n title=\"Wybór pliku excel z danymi\",\r\n filetypes=[(\"Excel file\", \".xlsx\")])\r\n self.filepath_input.set(os.path.realpath(f))\r\n self.excel_input_file = os.path.realpath(f)\r\n except ValueError:\r\n tkMessageBox.showerror(\"Error\", \"Wystąpił problem z załadowaniem pliku excel z danymi.\")", "def FileOpenDialog( message, wildcard, style=0, defaultDir=os.getcwd(), defaultFile='' ):\n style = style | wx.OPEN | wx.CHANGE_DIR\n return FileDialog( message, wildcard, style, defaultDir, defaultFile )", "def msg_open(self,msg):\r\n filepaths = msg.get_data()\r\n if filepaths is ():\r\n #Create the file open dialog.\r\n filepaths,index = DoFileDialog(self.frame, wildcard = \"Python source (*.py,*.pyw)|*.py;*.pyw|All files (*,*.*)|*.*;*\")\r\n if filepaths==None:\r\n return\r\n\r\n if (filepaths is not None) and (filepaths!=[]):\r\n #open the file requested\r\n for path in filepaths:\r\n self.frame.notebook.OpenFile(path)\r\n self.frame.Show()\r\n self.frame.Raise()", "def on_open_button(self, event):\n wildcard = \"All files (*.*)|*.*|\"\\\n \"Preprocessed _iso_res.csv file (*_iso_res.csv)|*_iso_res.csv|\"\\\n \"Massacre iso_csv file (*_iso.csv)|*_iso.csv|\"\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentDirectory, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.OPEN | wx.CHANGE_DIR\n )\n \n if dlg.ShowModal() == wx.ID_OK:\n fullname = dlg.GetPaths()[0].split('/')\n dpa = '/'.join(fullname[:-1]) + '/'\n self.currentDirectory = dpa\n fna = fullname[-1]\n [dfr, pul, vlab] = openFile(dpa+fna)\n startApp(dfr, dpa, fna, pul, vlab, fsize=self.fsize, size=self.size)\n\n dlg.Destroy()", "def askOpen(parent,title='',defaultDir='',defaultFile='',wildcard='',style=wx.OPEN):\r\n defaultDir,defaultFile = [GPath(x).s for x in (defaultDir,defaultFile)]\r\n dialog = wx.FileDialog(parent,title,defaultDir,defaultFile,wildcard, style )\r\n if dialog.ShowModal() != wx.ID_OK: \r\n result = False\r\n elif style & wx.MULTIPLE:\r\n result = map(GPath,dialog.GetPaths())\r\n else:\r\n result = GPath(dialog.GetPath())\r\n dialog.Destroy()\r\n return result", "def open_file(self, event=None):\n file = fd.askopenfile(title=\"Choose file to open\",\n filetypes=[(\"Python(default)\", \"*.py\"), (\"Text\", \"*.txt\"),\n (\"Java\", \"*.java\"), (\"JavaScript\", \"*.js\"),\n (\"HTML\", \"*.html\"), (\"CSS\", \"*.css\"),\n (\"All files\", \"*.*\")])\n if file is None:\n return\n else:\n if imghdr.what(\n file.name): # if file is image return image type otherwise return None if file is not an image type\n from project_explorer import ProjectExplorer\n ProjectExplorer().open_image(file.name)\n else:\n self.add_tab(file=file.name, open_file=1)\n from syntax_highlight import Highlighting\n Highlighting().highlight2()", "def select_file() -> True:\n current_directory = os.getcwd()\n selected_file = eg.fileopenbox(title=EG_TITLE+': Open a file',\n default=os.path.join(current_directory, \"..\"),\n filetypes=\"*.txt,*.py\")\n print(f\"Selected file: {os.path.basename(selected_file)}\")\n print(f\"In directory: {os.path.dirname(selected_file)}\")\n return True", "def file_popup(file) -> str:\n layout = [\n [sg.Text(f\"Select the action to perform on\\n\\n{file}\")],\n [sg.Button(\"Open File\", key=\"-APP-\"),\n sg.Button(\"Open in File Explorer\", key=\"-EXPLORER-\"),\n sg.Button(\"Delete File\", key=\"-DEl-\",\n button_color=(\"Black\", \"OrangeRed\"))]\n ]\n window = sg.Window(\"Open selected file.\", layout, finalize=True)\n button, value = window.read()\n window.close()\n del window\n return button", "def askopenfilename():\n\n file_opt = options = {}\n options['defaultextension'] = '.*'\n options['initialdir'] = 'User\\\\'\n options['initialfile'] = ''\n options['parent'] = root\n options['title'] = 'choose file'\n options['multiple'] = 1\n\n # get filename\n filename = tk.filedialog.askopenfilename(**file_opt)\n\n if filename:\n self.sourcefile = filename\n if len(filename) is 1:\n file_path_var.set(filename)\n else:\n file_path_var.set(\n \"Multiple files, including {}\".format(filename[0]))", "def get_file_path():\n root = tk.Tk()\n root.withdraw()\n file_path = filedialog.askopenfilename(filetypes=[(\"Excel file\", \"*.xlsx\")])\n return file_path", "def browse(self):\n formats = [\n \"Text - comma separated (*.csv, *)\",\n \"Text - tab separated (*.tsv, *)\",\n \"Text - all files (*)\"\n ]\n\n dlg = QFileDialog(\n self, windowTitle=\"Open Data File\",\n acceptMode=QFileDialog.AcceptOpen,\n fileMode=QFileDialog.ExistingFile\n )\n dlg.setNameFilters(formats)\n state = self.dialog_state\n lastdir = state.get(\"directory\", \"\")\n lastfilter = state.get(\"filter\", \"\")\n\n if lastdir and os.path.isdir(lastdir):\n dlg.setDirectory(lastdir)\n if lastfilter:\n dlg.selectNameFilter(lastfilter)\n\n status = dlg.exec_()\n dlg.deleteLater()\n if status == QFileDialog.Accepted:\n self.dialog_state[\"directory\"] = dlg.directory().absolutePath()\n self.dialog_state[\"filter\"] = dlg.selectedNameFilter()\n\n selected_filter = dlg.selectedNameFilter()\n path = dlg.selectedFiles()[0]\n # pre-flight check; try to determine the nature of the file\n mtype = _mime_type_for_path(path)\n if not mtype.inherits(\"text/plain\"):\n mb = QMessageBox(\n parent=self,\n windowTitle=\"\",\n icon=QMessageBox.Question,\n text=\"The '{basename}' may be a binary file.\\n\"\n \"Are you sure you want to continue?\".format(\n basename=os.path.basename(path)),\n standardButtons=QMessageBox.Cancel | QMessageBox.Yes\n )\n mb.setWindowModality(Qt.WindowModal)\n if mb.exec() == QMessageBox.Cancel:\n return\n\n # initialize dialect based on selected extension\n if selected_filter in formats[:-1]:\n filter_idx = formats.index(selected_filter)\n if filter_idx == 0:\n dialect = csv.excel()\n elif filter_idx == 1:\n dialect = csv.excel_tab()\n else:\n dialect = csv.excel_tab()\n header = True\n else:\n try:\n dialect, header = sniff_csv_with_path(path)\n except Exception:\n dialect, header = csv.excel(), True\n\n options = None\n # Search for path in history.\n # If found use the stored params to initialize the import dialog\n items = self.itemsFromSettings()\n idx = index_where(items, lambda t: samepath(t[0], path))\n if idx is not None:\n _, options_ = items[idx]\n if options_ is not None:\n options = options_\n\n if options is None:\n if not header:\n rowspec = []\n else:\n rowspec = [(range(0, 1), RowSpec.Header)]\n options = Options(\n encoding=\"utf-8\", dialect=dialect, rowspec=rowspec)\n\n dlg = CSVImportDialog(\n self, windowTitle=\"Import Options\", sizeGripEnabled=True)\n dlg.setWindowModality(Qt.WindowModal)\n dlg.setPath(path)\n dlg.setOptions(options)\n status = dlg.exec_()\n dlg.deleteLater()\n if status == QDialog.Accepted:\n self.set_selected_file(path, dlg.options())", "def open_files():\n import Tkinter\n import tkFileDialog\n\n root = Tkinter.Tk()\n root.withdraw()\n root.overrideredirect(True)\n root.geometry('0x0+0+0')\n \n root.deiconify()\n root.lift()\n root.focus_force()\n \n filenames = tkFileDialog.askopenfilenames(parent=root, title = \"Open file\")\n root.destroy()\n \n return filenames[0]", "def ui_open(*files):\r\n if files:\r\n osname = os.uname()[0].lower()\r\n if not osname in _OPENER_BY_OS:\r\n print('Sorry, open currently not supported for ' + osname)\r\n else:\r\n _OPENER_BY_OS[osname](files)", "def fileBrowserDialog(*args, actionName: AnyStr=\"\", dialogStyle: int=0, fileCommand:\n Script=None, fileType: AnyStr=\"\", filterList: Union[AnyStr,\n List[AnyStr]]=\"\", includeName: AnyStr=\"\", mode: int=0, operationMode:\n AnyStr=\"\", tipMessage: AnyStr=\"\", windowTitle: AnyStr=\"\",\n **kwargs)->AnyStr:\n pass", "def get_input_name():\n xlsTypes = [(\"Файлы Excel или csv\", \".xls .xlsx\")]\n return askopenfilenames(initialdir=os.path.abspath(os.getcwd()), filetypes=xlsTypes, title=\"Выберите файлы Excel или CSV\")", "def open_file(self):\n filepath = askopenfilename(filetypes=[(\"Image Files\", (\"*.jpg\", \"*.png\")), (\"All Files\", \"*.*\")])\n if not filepath:\n return\n return filepath", "def _open_files(view, sel):\n schema, word = get_names(view, sel)\n file_name = word + '.sql'\n path = [schema, None, file_name]\n files = find_file(view.window().folders(), path)\n if len(files) > 5:\n print('something is wrong; too many files; aborting')\n return\n for f in files:\n view.window().open_file(f)", "def on_open(self):\n\n ftypes = [('CSV', '.csv'), ('JSON', '.json'), ('All files', '*')]\n dlg = filedialog.Open(self, filetypes=ftypes)\n\n absolute_file_path = dlg.show()\n \n if absolute_file_path:\n # extract the file name from the absolute path\n file_name = absolute_file_path.split('/')[len(absolute_file_path.split('/')) - 1]\n \n # update the label text\n self.selected_file_name.configure(text=file_name)\n\n self.__set_full_path_of_file(absolute_file_path)\n else:\n # update the label text\n self.selected_file_name.configure(text=\"<Selected file name>\")\n\n self.__set_full_path_of_file(None)" ]
[ "0.75951433", "0.7013304", "0.7009427", "0.6953197", "0.69224757", "0.6823879", "0.669363", "0.6688277", "0.66736335", "0.6672926", "0.66545457", "0.66527754", "0.6618524", "0.65777797", "0.6551159", "0.65394413", "0.65350693", "0.65235656", "0.6490343", "0.6485696", "0.64783317", "0.646859", "0.6425014", "0.64250106", "0.6394995", "0.63840955", "0.6368174", "0.6353769", "0.63432294", "0.62897736" ]
0.7704559
0
Returns tuple of paths stored at class instance
def getPaths(self): return self.pathTuple
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def paths(self):\n return tuple(self._path)", "def get_paths(self):\n paths = []\n for f in dir(self):\n o = getattr(self, f)\n if callable(o) and hasattr(o, '_path'):\n paths.append(getattr(o, '_path'))\n return paths", "def get_class_paths(_class, saltclass_path):\n straight = os.path.join(saltclass_path, \"classes\", \"{}.yml\".format(_class))\n sub_straight = os.path.join(\n saltclass_path, \"classes\", \"{}.yml\".format(_class.replace(\".\", os.sep))\n )\n sub_init = os.path.join(\n saltclass_path, \"classes\", _class.replace(\".\", os.sep), \"init.yml\"\n )\n return straight, sub_init, sub_straight", "def get_paths(self):\n return self.paths", "def paths(self):\r\n return self._paths", "def paths(self) -> Paths:\n return self._paths", "def get_paths(self):\n return (self.world_fpath, self.subj_fpath, self.peds_fpath)", "def GetPaths(self):\n return self.paths", "def paths(self):\n return self._paths", "def paths(self):\n return self._paths", "def path(self) -> List[Path]:\n return self._path", "def get_path(self) :\n path = [self]\n s = self.get_parent()\n while s is not None :\n path.append(s)\n s = s.get_parent()\n path.reverse()\n return path", "def get_paths(self):\n return self.path.split(',')", "def filepaths(self):\n pass", "def path_entries(self):", "def get_path(self) -> list:\n path = []\n if self.parent:\n path = [a.name for a in self.parent.ancestors(include_self=True)]\n\n return path + [self.name]", "def paths(self):\n return list(zip(*self.collected))[0]", "def path(self):\n if bool(self._path_parameters):\n payload = {inflection.underscore(k): v for k, v, in self._path_parameters.items()}\n else:\n payload = dict()\n PathTuple = namedtuple('PathTuple', sorted(payload))\n the_tuple = PathTuple(**payload)\n return the_tuple", "def getTLDPathsTuple(self, basepath):\n return (basepath, )", "def getPath(obj):", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def paths(self):\n return self._visit(self.start)", "def pathMap(self):\n pass", "def path(self):\n ...", "def paths(self) -> typing.Optional[typing.List[str]]:\n return self._values.get('paths')", "def warping_paths(self):\n return self.paths", "def class_path(model, variables):\n return None", "def path(self):\n\t\tif '/' in self.name:\n\t\t\treturn self.name.split(\"/\")\n\t\telse:\n\t\t\treturn self.name.split(\"\\\\\")", "def paths(self):\n rc = []\n for pg in self.path_groups:\n rc.extend(pg.paths)\n return rc", "def __init__(self, paths):\n self.paths = paths" ]
[ "0.7493626", "0.72214353", "0.71332705", "0.6974973", "0.67577744", "0.6682504", "0.66712487", "0.66641986", "0.66585755", "0.66585755", "0.66517574", "0.6619837", "0.6619723", "0.66154474", "0.65316343", "0.6466854", "0.64451087", "0.63843393", "0.6338825", "0.6330856", "0.6270424", "0.6206903", "0.61377454", "0.6111313", "0.6090677", "0.60639614", "0.60602623", "0.60529786", "0.6050811", "0.6049052" ]
0.7340977
1
Triggers a manual build of the project.
async def trigger_build(self, *, branch=None, message=None):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trigger_build(self, postdata):\n pass", "def build(self):\n logging.info('Build %s of %s (%s)', self._build, self.name,\n self.working_dir)\n self._build += 1\n self._event = None\n status = self._builder.execute_script(self.working_dir, self.script)\n self._show_notification(status)", "def TriggerBuild(self):\n readthedocs_url = u'https://readthedocs.org/build/{0:s}'.format(\n self._project)\n\n try:\n self._url_lib_helper.Request(readthedocs_url, post_data=b'')\n\n except errors.ConnectionError as exception:\n logging.warning(u'{0!s}'.format(exception))\n return False\n\n return True", "def post_build_project(project_data):\n ListenerManager.call(_project_post_build_manager, project_data)", "def build(self):\n self.puts(colored.blue(\"Building project...\"))\n\n if os.path.exists(self.build_path):\n shutil.rmtree(self.build_path)\n os.makedirs(self.build_path)\n\n with indent(2):\n self._reset_build_sequence_id()\n self._build_pre_project_template()\n self._build_project_template()\n self._build_pre_resources_template()\n self._build_resources_template()\n self._build_post_resources_template()", "def pre_build_project(project_data):\n ListenerManager.call(_project_pre_build_manager, project_data)", "def monitor_project_build(self, project_name):\n pass", "def force(self, **kwargs):\n log.info(\"Forcing a build\")\n self._force = True", "def build_trigger(ctx, build_type_id, branch, comment, parameter, agent_id,\n open_build_log, wait_for_run):\n parameters = dict([p.split('=', 1) for p in parameter])\n data = ctx.obj.trigger_build(\n build_type_id=build_type_id,\n branch=branch,\n comment=comment,\n parameters=parameters,\n agent_id=agent_id)\n build_id = data['id']\n ctx.invoke(build_queue_show, args=[build_id])\n if open_build_log:\n url = data['webUrl'] + '&tab=buildLog'\n webbrowser.open(url)\n if not wait_for_run:\n return\n while data['state'] == 'queued':\n data = ctx.obj.get_queued_build_by_build_id(build_id)\n click.echo('state: %s' % data['state'])\n time.sleep(1)\n ctx.invoke(build_queue_show, args=[build_id])", "def force(self):\n print \"Forcing a build by touching files\"\n os.chdir(self.version.project.conf_dir(self.version.slug))\n os.system('touch * && touch */*')", "def autoBuild (self, event = None):\r\n if self.autobuildmenuitem.IsChecked():\r\n self.autobuildtimer.Start(5000)\r\n self.autoBuildStart();\r\n else:\r\n self.autobuildtimer.Stop()", "def build_step(self):\n run_cmd('./compile.sh', log_all=True, simple=True, log_ok=True)", "def run(self):\n self.scion_sh('run', 'nobuild')", "def test_build(self):\n self.createFakeSphinxProject()\n self.builder.build(self.sphinxDir)\n self.verifyBuilt()", "def autoBuildTick (self, event = None):\r\n for pathname, oldmtime in self.autobuildfiles.iteritems():\r\n newmtime = os.stat(pathname).st_mtime\r\n if newmtime != oldmtime:\r\n #print \"Auto rebuild triggered by: \", pathname\r\n self.autobuildfiles[pathname] = newmtime\r\n self.rebuild()\r\n break", "def start_build(args):\n\n path = os.path.join(SCRATCH_DIR, args.project)\n \n # Set up virtual environment\n print(\"Setting up virtual python environment in %s\" % path)\n venv.create(path, clear=True, symlinks=True, with_pip=False)\n\n # Pull in repository data\n sourcepath = os.path.join(path, 'source')\n print(\"Cloning from git repository %s (branch: %s)\" % (args.source, args.sourcebranch))\n subprocess.run((GIT, 'clone', '--branch', args.sourcebranch, '--depth=1', '--no-single-branch', args.source, sourcepath),\n check=True)\n\n # Activate venv and install pips if needed. For dev/test, we will\n # assume that all requirements are available at the system level,\n # rather than needing to install them into the venv.\n ### note: this makes it difficult to test requirements.txt, but it\n ### will do for now. Debugging requirements.txt failures on the\n ### production buildbot is not difficult to correct.\n if IS_PRODUCTION and os.path.exists(os.path.join(sourcepath, 'requirements.txt')):\n print(\"Installing pips\")\n subprocess.run(('/bin/bash', '-c',\n 'source bin/activate; pip3 install -r source/requirements.txt'),\n cwd=path, check=True)\n else:\n print(\"On dev/test requirements.txt is not processed, skipping pip\")\n\n # Where are our tools?\n if IS_PRODUCTION:\n tool_dir = PELICANFILES\n else:\n tool_dir = THIS_DIR\n print(\"TOOLS:\", tool_dir)\n\n pelconf_yaml = os.path.join(sourcepath, AUTO_SETTINGS_YAML)\n if os.path.exists(pelconf_yaml):\n settings_path = os.path.join(path, AUTO_SETTINGS)\n if IS_PRODUCTION:\n builtin_plugins = PLUGINS\n else:\n builtin_plugins = os.path.join(tool_dir, os.pardir, 'plugins')\n generate_settings(pelconf_yaml, settings_path, [ builtin_plugins ], sourcepath)\n else:\n # The default name, but we'll pass it explicitly.\n settings_path = os.path.join(sourcepath, 'pelicanconf.py')\n\n # Set currently supported plugins\n ### this needs to be removed, as it is too indeterminate.\n with open(settings_path, 'a') as f:\n f.write(\"\"\"\ntry:\n PLUGINS += ['toc']\nexcept:\n PLUGINS = ['toc', 'gfm']\n\"\"\")\n\n # Call pelican\n buildpath = os.path.join(path, 'build/output')\n os.makedirs(buildpath, exist_ok = True)\n buildcmd = ('/bin/bash', '-c',\n 'source bin/activate; cd source && '\n ### note: adding --debug can be handy\n f'(pelican content --settings {settings_path} -o {buildpath})',\n )\n print(\"Building web site with:\", buildcmd)\n env = os.environ.copy()\n env['LIBCMARKDIR'] = LIBCMARKDIR\n subprocess.run(buildcmd, cwd=path, check=True, env=env)\n\n count = len(glob.glob(f'{buildpath}/**/*.html', recursive=True))\n print(f\"{count} html files.\")\n if args.count > 0 and args.count > count:\n print(\"Not enough html pages in the Web Site. Minimum %s > %s found in the Web Site.\" % (args.count, count))\n sys.exit(4)\n\n # Done for now\n print(\"Web site successfully generated!\")\n\n # It is much easier to do all the below, if we chdir()\n os.chdir(sourcepath)\n\n # Copy to result branch\n print(\"Copying web site to branch:\", args.outputbranch)\n\n try:\n subprocess.run((GIT, 'rev-parse', '--verify', \"origin/%s\" % args.outputbranch),\n check=True)\n print(\"- Doing fresh checkout of branch %s\" % args.outputbranch)\n subprocess.run((GIT, 'checkout', args.outputbranch, '-f'), check=True)\n subprocess.run((GIT, 'pull'), check=True)\n except:\n print(\"- Branch %s does not exist (yet), creating it...\" % args.outputbranch)\n # If .asf.yaml exists, which it should, make a copy of it in memory for later\n asfyml = os.path.join(sourcepath, '.asf.yaml')\n myyaml = None\n if os.path.exists(asfyml):\n myyaml = open(asfyml).read()\n subprocess.run((GIT, 'checkout', '--orphan', args.outputbranch), check=True)\n subprocess.run((GIT, 'rm', '-rf', '.'), check=True)\n # Add .asf.yaml back in if we found it.\n if myyaml:\n open(asfyml, \"w\").write(myyaml)\n subprocess.run((GIT, 'add', '.asf.yaml'), check=True)\n\n print(\"- Adding new content to branch\")\n # RM output dir if it already exists\n outputdir = os.path.join(sourcepath, 'output')\n if os.path.isdir(outputdir):\n print(\"Removing existing output dir %s\" % outputdir)\n shutil.rmtree(outputdir)\n shutil.move(buildpath, outputdir)\n subprocess.run((GIT, 'add', 'output/'), check=True)\n\n # Check if there are any changes.\n cp = subprocess.run((GIT, 'diff', '--cached', '--quiet'))\n if cp.returncode == 0:\n # There were no differences reported.\n print('Nothing new to commit. Ignoring this build.')\n else:\n print(\"- Committing to %s\" % args.source)\n subprocess.run((GIT, 'commit', '-m', 'Automatic Site Publish by Buildbot'), check=True)\n\n # If we're not in production, then avoid pushing changes.\n if IS_PRODUCTION:\n print('- Pushing changes, for publishing')\n subprocess.run((GIT, 'push', args.source, args.outputbranch), check=True)\n\n print('Success. Done.')\n # for dev/test provide viewing instructions\n if not IS_PRODUCTION:\n if args.listen:\n try:\n subprocess.run(('pelican','-l'), check=True)\n except KeyboardInterrupt:\n pass\n else:\n print(f'To test output:\\ncd {sourcepath}; pelican -l')", "def build(parameters):\n\n\n print(\"In Build module\")", "def start_build(self, build_id):\n pass", "def build(self, force: bool = False) -> BuildResult:\n raise NotImplementedError()", "def execute_build(\n self,\n tasks: List[ReleaseTask],\n bld_args: RepoBuildArgs,\n ) -> None:", "def actionBuild():\n\n #Init builder logger\n Builder.init()\n\n for target in Settings.targets:\n targetsToBuild, combineLibs, copyToOutput = Builder.getTargetGnPath(target)\n for platform in Settings.targetPlatforms:\n for cpu in Settings.targetCPUs:\n if System.checkIfCPUIsSupportedForPlatform(cpu,platform):\n for configuration in Settings.targetConfigurations:\n if not Summary.checkIfActionFailed(ACTION_PREPARE, target, platform, cpu, configuration):\n Logger.printStartActionMessage('Build ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.YELLOW)\n result = Builder.run(target, targetsToBuild, platform, cpu, configuration, combineLibs, copyToOutput)\n Summary.addSummary(ACTION_BUILD, target, platform, cpu, configuration, result, Builder.executionTime)\n if result != NO_ERROR:\n Logger.printEndActionMessage('Failed building ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.RED)\n #Terminate script execution if stopExecutionOnError is set to True in userdef\n shouldEndOnError(result)\n else:\n Logger.printEndActionMessage('Build ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration)\n else:\n Logger.printColorMessage('Build cannot run because preparation has failed for ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.YELLOW)\n Logger.printEndActionMessage('Build not run for ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.YELLOW)", "def pre_configure_project(source_dir, build_dir):\n ListenerManager.call(_project_pre_configure_manager, source_dir, build_dir)", "def subscribeToSuccessfulBuilds(target):", "def project_created_handler(event):\n obj = event.obj\n # submit Project after creation\n obj.workflow.start()", "def _doReleaseBuild(self, farbconfig):\n print \"Building all releases ...\"\n try:\n rbr = runner.ReleaseBuildRunner(farbconfig)\n rbr.run()\n print \"Release build completed.\"\n except runner.ReleaseBuildRunnerError, e:\n print >>sys.stderr, e\n sys.exit(1)", "def build():\n local('wintersmith build')", "def trigger(builder, revision, files=[], dry_run=False, extra_properties=None):\n repo_name = query_repo_name_from_buildername(builder)\n return buildapi.trigger_arbitrary_job(repo_name, builder, revision, files, dry_run,\n extra_properties)", "def grunt_build():\n local('cd {{ project_name }} && grunt build')", "def rebuild(options, project_directory=None):\n if options.help:\n print rebuild.__doc__\n sys.exit(1)\n\n if not project_directory:\n project_directory = os.getcwd()\n action_rebuild(project_directory)", "def test_build(self):\n self.app.build()" ]
[ "0.69045925", "0.6808615", "0.6769827", "0.6756172", "0.65721595", "0.6517098", "0.6494844", "0.6461456", "0.6375029", "0.6318517", "0.6312991", "0.6210087", "0.6203009", "0.6105097", "0.60498416", "0.59993887", "0.5988052", "0.5903205", "0.5868917", "0.5858315", "0.5857097", "0.5855001", "0.5835281", "0.5833631", "0.581226", "0.57830405", "0.57789946", "0.57670295", "0.57538867", "0.57512367" ]
0.690302
1
Gets a specific number of builds from the project.
async def get_builds(self, *, quantity=10):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getBuild(number):", "def getBuild(number):", "def getBuilds():", "def get_first_n_built_chunk_ids(self, number):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT chunk_id FROM index_builder WHERE ib_task = 'built' ORDER BY index LIMIT %s;\", (number,))\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)", "def Builds():\n return builds", "def getPendingBuilds():", "def getPendingBuilds():", "def getBuild():", "def num_projects(self):\n return self._num_projects", "def concurrent(self, project):\n limit_reached = False\n query = Q(\n project=project,\n )\n\n if project.main_language_project:\n # Project is a translation, counts all builds of all the translations\n query |= Q(project__main_language_project=project.main_language_project)\n query |= Q(project__slug=project.main_language_project.slug)\n\n elif project.translations.exists():\n # The project has translations, counts their builds as well\n query |= Q(project__in=project.translations.all())\n\n # If the project belongs to an organization, count all the projects\n # from this organization as well\n organization = project.organizations.first()\n if organization:\n query |= Q(project__in=organization.projects.all())\n\n # Limit builds to 5 hours ago to speed up the query\n query &= Q(date__gt=timezone.now() - datetime.timedelta(hours=5))\n\n concurrent = (\n (\n self.filter(query).exclude(\n state__in=[\n BUILD_STATE_TRIGGERED,\n BUILD_STATE_FINISHED,\n BUILD_STATE_CANCELLED,\n ]\n )\n )\n .distinct()\n .count()\n )\n\n max_concurrent = Project.objects.max_concurrent_builds(project)\n log.info(\n \"Concurrent builds.\",\n project_slug=project.slug,\n concurrent=concurrent,\n max_concurrent=max_concurrent,\n )\n if concurrent >= max_concurrent:\n limit_reached = True\n return (limit_reached, concurrent, max_concurrent)", "def builds(self):\n return self._builds", "async def builds(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"builds\"], *args, **kwargs)", "def test_get_build_number(self):\n pass", "def get_latest_build(self):\n # Retrieve last sanity-checked build number (could be 0)\n self.get_last_sanity()\n\n # * List all build numbers for this version. Note this may include\n # builds for other versions, since all versions for a given\n # release share a build directory.\n # * Ignore builds above 50000, which are toy builds\n\n builds = [int(x) for x in os.listdir(self.ver_dir)\n if x.isdigit() and int(x) > self.last_bld and int(x) < 50000]\n builds.sort()\n\n # Check each build after last sanity-checked build\n bld_num = self.last_bld\n for build in builds:\n print (\"Checking build \" + str(build))\n if self.check_build(build):\n bld_num = build\n print(\"bld_num is now \" + str(bld_num))\n return bld_num", "def get_builds(self, *, params: Optional[dict] = None) -> \"resource_types.Builds\":\n\n return communicator.Builds(self.__requester).fetch(parameters=params)", "def GetBuilds(date=0):\n\n # If date is set, get the build id from waterfall.\n builds = []\n\n if date:\n for builder in WATERFALL_BUILDERS + ROTATING_BUILDERS:\n build_ids = GetBuildID(builder, date)\n for build_id in build_ids:\n builds.append((builder, build_id))\n return builds\n\n # If date is not set, we try to get the most recent builds.\n # Read the values of the last builds used to generate a report, and\n # increment them appropriately, to get values for generating the\n # current report. (See comments in UpdateBuilds).\n with open(BUILD_DATA_FILE, 'r') as fp:\n lines = fp.readlines()\n\n for l in lines:\n l = l.rstrip()\n words = l.split(',')\n builder = words[0]\n build = int(words[1])\n builds.append((builder, build + 1))\n # NOTE: We are assuming here that there are always 2 daily builds in\n # each of the rotating builders. I am not convinced this is a valid\n # assumption.\n if builder in ROTATING_BUILDERS:\n builds.append((builder, build + 2))\n\n return builds", "def test_returns_limit_projects(self):\n # Arrange\n # Create and arrange test projects\n self.arrange_projects()\n # Act\n response = self.client.get(\n f\"{self.url}?limit=1\", headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)", "def get_build_number():\n try:\n return int(os.getenv(*legion.config.BUILD_NUMBER))\n except ValueError:\n raise Exception('Cannot parse build number as integer')", "def generateFinishedBuilds(branches=[],\n num_builds=None,\n max_buildnum=None, finished_before=None,\n max_search=200,\n ):", "def get_pull_requests_count(self):\n repo_details = self.repo_url.strip().split('/')[-2:]\n pull_requests = 0\n i = 1\n while True:\n args = {'state': 'open', 'page': i, 'per_page': 100}\n api_url = \"https://api.github.com/repos/{}/{}/pulls?{}\".format(repo_details[0], repo_details[1],\n urllib.parse.urlencode(args))\n response = requests.request(\"GET\", api_url)\n response = json.loads(response.content)\n if not response:\n return pull_requests\n else:\n pull_requests += len(response)\n i += 1", "def getPendingBuildTimes():\n # TODO: it might be nice to make this into getPendingBuildSets, which\n # would let someone subscribe to the buildset being finished.\n # However, the Scheduler doesn't actually create the buildset until\n # it gets submitted, so doing this would require some major rework.", "def getBuildRequests():", "def get_builder_stats(builder: str, time_window: datetime.datetime) -> BuildStats:\n print('Gettings builds for {}...'.format(builder))\n # TODO: can we limit the data we're requesting?\n url = '{}/{}/builds/_all'.format(BASE_URL, builder)\n stats = BuildStats()\n for build, results in requests.get(url).json().items(): \n start_time = datetime.datetime.fromtimestamp(float(results['times'][0]))\n if start_time < time_window:\n continue\n successful = results['text'] == ['build', 'successful']\n stats.add(successful)\n return stats", "def generateFinishedBuilds(builders=[], branches=[],\n num_builds=None, finished_before=None,\n max_search=200):", "def num_projects(self, num_projects):\n\n self._num_projects = num_projects", "def collection_get(self):\n\n return {\n 'builds': self.build_info.get_builds(\n self.request.matchdict['product_name'],\n version=self.request.matchdict['product_version']\n )\n }", "def build_number(self):\n return self.get_data(\"build_number\")", "def build():\n return get_cached(\"build.json\", False).get(\"build_id\")", "def bamboo_builds(ctx, from_date, to_date, use_cache):\r\n\r\n if from_date is None:\r\n from_date, to_date = previous_month_range()\r\n\r\n log.info('Getting Bamboo builds between {} and {}'.format(from_date, to_date))\r\n report = BambooBuildsReport(\r\n ctx.obj,\r\n from_date=from_date,\r\n to_date=to_date\r\n )\r\n report.run_report(use_cache=use_cache)", "def get_build(self, build_id):\n pass" ]
[ "0.728129", "0.728129", "0.6382856", "0.62236434", "0.6215881", "0.6080445", "0.6080445", "0.59587866", "0.59204555", "0.57647854", "0.5759255", "0.5724999", "0.5719008", "0.56793606", "0.56029767", "0.5565936", "0.55610716", "0.5540158", "0.54715705", "0.54323786", "0.5412853", "0.5399856", "0.53983504", "0.53974426", "0.5380614", "0.53742117", "0.53711414", "0.5364596", "0.5361517", "0.53351253" ]
0.73818636
0
Return `ret_value` `times` times. If generator will receive some value from outside, update `ret_value`
def exercise_gen(ret_val, times):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def counter_wrapper(generator):\n for value in generator:\n yield value", "def counter_wrapper_2(generator):\n yield from generator", "def _mc_gen():\r\n n = 1\r\n while True:\r\n yield n\r\n n += 1", "def random_values():\n while True:\n yield random()", "def Count():\n return CheckForError(lib.Generators_Get_Count())", "def repeat(value: T, times: int) -> List[T]:\n return [value] * times", "def task5(count):\n number_1, number_2 = 1, 1\n for _ in range(count):\n yield number_1\n number_1, number_2 = number_2, number_1 + number_2", "def next(self):\n self.lock.acquire()\n self.count += self.step;\n result = self.count\n self.lock.release()\n return result", "async def async_generator() -> Generator[float, None, None]:\n for _ in range(10):\n await asyncio.sleep(1)\n yield random.random() * 10", "def generator(factor: int, test: typing.Callable[[int], bool],\n start: int) -> typing.Iterator[int]:\n value = start\n while True:\n value = (value * factor) % 2147483647\n if test(value):\n yield value", "def iter_latest_asynchonously(gen_func, timeout = None, empty_value = None, use_forkserver = False, uninitialized_wait = None):\n if use_forkserver:\n from multiprocessing import set_start_method # Only Python 3.X\n set_start_method('forkserver') # On macos this is necessary to start camera in separate thread\n\n m = Manager()\n namespace = m.Namespace()\n\n lock = Lock()\n\n with lock:\n namespace.time_and_data = (-float('inf'), Uninitialized)\n\n p = Process(target=_async_value_setter, args=(gen_func, namespace, lock))\n p.start()\n while True:\n with lock:\n lasttime, item = namespace.time_and_data\n if item is PoisonPill: # The generator has terminated\n break\n elif item is Uninitialized:\n if uninitialized_wait is not None:\n time.sleep(uninitialized_wait)\n continue\n else:\n yield empty_value\n elif timeout is not None and (time.time() - lasttime) > timeout: # Nothing written or nothing recent enough\n yield empty_value\n else:\n yield item", "def test_generator_method(self):\n for i in range(0, 4):\n yield self.try_odd, i", "def repeat(num_times):\n\n def decorator_repeat(func):\n \"\"\"\n defines wrapper_repeat(*args, **kwargs)\n\n :returns: wrapper_repeat\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_repeat(*args, **kwargs):\n \"\"\"\n func(*args, **kwargs) num_times\n\n :return: last return value\n \"\"\"\n for _ in range(num_times):\n value = func(*args, **kwargs)\n return value\n\n return wrapper_repeat\n\n return decorator_repeat", "def generator():\n mygenerator = (x for x in range(3))\n for element in mygenerator:\n print 'poprve = ', element\n\n for element in mygenerator:\n print 'podruhe = ', element", "def random_number_generator(arg1, arg2):\n return 42", "def multiple_gen(modulus):\n count = 1\n while True:\n yield modulus * count\n count += 1", "async def async_generator() -> Generator[float, None, None]:\n\n for i in range(10):\n yield random.random()\n await asyncio.sleep(1)", "def next(self):\n self.attempt += 1\n if self.attempt > self.max_retries:\n raise StopIteration\n return self.slot_duration * random.randint(0, 2 ** self.attempt - 1)", "async def async_generator() -> Generator[float, None, None]:\n for i in range(10):\n yield (random.uniform(0, 10))\n await asyncio.sleep(1)", "def test_generator_continuous():\n RANGE_MAX = 100\n prev_value = RANGE_MAX // 2\n for msg in it.islice(generate_msgs(0, RANGE_MAX), 0, 42):\n curr_value = Message.parse(msg).power\n assert curr_value - prev_value <= 1\n prev_value = curr_value", "def repeat_count(instance, args):\r\n count = instance.repeat_count(args)\r\n return count", "def counter():\n for value in range(5):\n yield \"<{}>\".format(value)", "def next ( num = 1 ) :\n return run ( num )", "def counter(self, value: int, /) -> None:", "def generator(self):\n return [None, 1]", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def timeit_context() -> Generator:\n result = TimeItResult()\n started_time = time.time()\n try:\n yield result\n finally:\n result.time_passed = time.time() - started_time", "def data_repeated(data):\n\n def gen(count):\n for _ in range(count):\n yield data\n\n yield gen", "def __next__(self):\n if self.returned >= len(self):\n raise StopIteration\n else:\n val = self.buffer[self.current]\n self.current = (self.current + 1) % len(self.buffer)\n self.returned += 1\n return val", "def iterate(func, x):\n while True:\n x = func(x)\n yield x" ]
[ "0.6483438", "0.6218066", "0.6070627", "0.60574543", "0.5988066", "0.58443666", "0.5823438", "0.57975304", "0.5627555", "0.5625004", "0.5599266", "0.5565379", "0.55576694", "0.5519748", "0.5512081", "0.5501229", "0.54756796", "0.5460212", "0.5435833", "0.54218477", "0.54203224", "0.5413908", "0.5413298", "0.53569335", "0.53565973", "0.5334986", "0.533437", "0.5331532", "0.53177315", "0.5306439" ]
0.7192209
0
Update `exercise_gen`, so it will ignore all exceptions
def exercise2(): g1 = exercise_gen("I'll ignore errors", 300) assert next(g1) == "I'll ignore errors" assert g1.send('new val') == 'new val' assert g1.throw(Exception) == 'new val' assert next(g1) == 'new val'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exercise_gen(ret_val, times):", "def experiment3():\n raise FAKE_ERROR", "def test_post_codegen_error_query(self):\n with tempfile.TemporaryDirectory() as tmpdirname:\n translator = AstUprootTranslator()\n with pytest.raises(GenerateCodeException):\n translator.generate_code(\"\", cache_path=tmpdirname)", "def add_exercise(self):\r\n\r\n # Take the exercise entires from TOML file\r\n entries = self.cfg.get(\"payload\",{}).get(\"exercise\")\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/exercise.json', test=payload)\r\n # Post request\r\n requests.post(API.url_exercise, data = payload, headers = self.headers, timeout = 2)", "def testgen(self):\n self.parse()\n self.generate()", "def test_generate_all_training(self):\n facade = ChatetteFacade.get_or_create()\n\n input_dir_path = \"tests/system-testing/inputs/generate-all/\"\n input_filenames = [\n \"simplest.chatette\", \"only-words.chatette\",\n \"words-and-groups.chatette\", \"alias.chatette\", \"include.chatette\",\n \"slot.chatette\", \"slotrolegroup.chatette\"\n ]\n for filename in input_filenames:\n file_path = os.path.join(input_dir_path, filename)\n facade.run(file_path)\n if not TestSystem.check_no_duplicates(facade.train_examples):\n pytest.fail(\n \"Some examples were generated several times \" +\n \"when dealing with file '\" + filename + \"'.\\nGenerated: \" + \\\n str(facade.train_examples)\n )\n legal_examples = TestSystem.get_legal_examples(file_path)\n for ex in facade.train_examples:\n formatted_ex = {\"intent\": ex.intent_name, \"text\": ex.text}\n if formatted_ex not in legal_examples:\n pytest.fail(\n str(formatted_ex) + \" is not a legal example for '\" + \\\n file_path + \"'\"\n )\n if len(legal_examples) != len(facade.train_examples):\n training_texts = [ex.text for ex in facade.train_examples]\n for legal_ex in legal_examples:\n if legal_ex[\"text\"] not in training_texts:\n pytest.fail(\n \"Example '\" + legal_ex[\"text\"] + \\\n \"' was not generated.\"\n )\n pytest.fail(\n \"An unknown example was not generated (\" + \\\n str(len(facade.train_examples)) + \\\n \" generated instead of \" + str(len(legal_examples)) + \\\n \").\\nGenerated: \" + str(facade.train_examples)\n )\n legal_syn = TestSystem.get_legal_synonyms(file_path)\n if legal_syn is not None:\n synonyms = AST.get_or_create().get_entities_synonyms()\n for key in synonyms:\n if key not in legal_syn:\n pytest.fail(\n \"'\" + key + \"' shouldn't have any synonyms.\"\n )\n for syn in synonyms[key]:\n if syn not in legal_syn[key]:\n pytest.fail(\n \"'\" + syn + \"' shouldn't be a synonym of '\" + \\\n key + \"'\"\n )", "def testExplicitGeneratorConvenienceFunctionExceptionUsage(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tc.generator(x.g(8, 9), [10], Exception(\"bogus\"))\n\t\tc.replay()\n\t\tg = x.g(8, 9)\n\t\tself.failUnless(g.next() == 10)\n\t\tself.failUnlessRaises(Exception, g.next)", "def test_incorrect_prediction_key(self):\n self._config['Prediction key'] = 'wrong_key'\n with self.assertRaisesRegex(ValueError, 'Invalid prediction key'):\n self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)", "def testExplicitGeneratorExecptionUsage(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tx.g(8, 9)\n\t\tc.generator()\n\t\tc.setReturn(10)\n\t\tc.setException(Exception(\"bogus\"))\n\t\tc.replay()\n\t\tg = x.g(8, 9)\n\t\tself.failUnless(g.next() == 10)\n\t\tself.failUnlessRaises(Exception, g.next)", "def test_create_unexpected_problem(self):\n pass", "def test_no_model(self):\n\n with self.assertRaisesRegex(ValueError,\n 'Please provide a model for this generator'):\n self._gen.generate(\n example=self._example,\n model=None,\n dataset=self._dataset,\n config=self._config)", "def generate(self, **kwargs):\n yield NotImplementedError", "def test_invalidate_error():\n \n test_object = fa.read_in_envision(data_csv=plate_2_repeat, platemap_csv=plate_map_file, data_type='plate', size=384)\n test_object.invalidate() # execute the invalidate function without specifying well ids, rows or columns to be invalidated", "def add_exercise( self, exercise ):\n self.exercises.append( exercise )", "def test_generate_missing(pytester):\n pytester.makefile(\n \".feature\",\n generation=textwrap.dedent(\n \"\"\"\\\n Feature: Missing code generation\n\n Background:\n Given I have a foobar\n\n Scenario: Scenario tests which are already bound to the tests stay as is\n Given I have a bar\n\n\n Scenario: Code is generated for scenarios which are not bound to any tests\n Given I have a bar\n\n\n Scenario: Code is generated for scenario steps which are not yet defined(implemented)\n Given I have a custom bar\n \"\"\"\n ),\n )\n\n pytester.makepyfile(\n textwrap.dedent(\n \"\"\"\\\n import functools\n\n from pytest_bdd import scenario, given\n\n scenario = functools.partial(scenario, \"generation.feature\")\n\n @given(\"I have a bar\")\n def _():\n return \"bar\"\n\n @scenario(\"Scenario tests which are already bound to the tests stay as is\")\n def test_foo():\n pass\n\n @scenario(\"Code is generated for scenario steps which are not yet defined(implemented)\")\n def test_missing_steps():\n pass\n \"\"\"\n )\n )\n\n result = pytester.runpytest(\"--generate-missing\", \"--feature\", \"generation.feature\")\n result.assert_outcomes(passed=0, failed=0, errors=0)\n assert not result.stderr.str()\n assert result.ret == 0\n\n result.stdout.fnmatch_lines(\n ['Scenario \"Code is generated for scenarios which are not bound to any tests\" is not bound to any test *']\n )\n\n result.stdout.fnmatch_lines(\n [\n 'Step Given \"I have a custom bar\" is not defined in the scenario '\n '\"Code is generated for scenario steps which are not yet defined(implemented)\" *'\n ]\n )\n\n result.stdout.fnmatch_lines(\n ['Step Given \"I have a foobar\" is not defined in the background of the feature \"Missing code generation\" *']\n )\n\n result.stdout.fnmatch_lines([\"Please place the code above to the test file(s):\"])", "def source_exercise_target(self, node):\n std_domain = self.builder.env.domains['std']\n figtype = std_domain.get_enumerable_node_type(node.parent)\n assert figtype == 'solution'\n\n fig_id = node.parent['ids'][0]\n\n # sort out the label\n exercise_label = node.parent.attributes['exercise']\n\n names = node.parent['names']\n assert len(names) == 1\n assert names[0].startswith('sol:')\n\n # get exercise id\n assert fig_id.startswith('sol-')\n exercise_id = 'ex-{}'.format(fig_id[4:])\n assert exercise_id == nodes.make_id(exercise_label)\n\n # because the exercise may be in a different document, we go global\n all_labels = std_domain.data['labels']\n assert exercise_label in all_labels\n\n # track down the document and identifier\n exercise_source_docname = all_labels[exercise_label][0]\n fig_identifiers = self.builder.env.toc_fignumbers\n assert exercise_source_docname in fig_identifiers\n assert 'exercise' in fig_identifiers[exercise_source_docname]\n ex_docname_map = fig_identifiers[exercise_source_docname]['exercise']\n assert exercise_id in ex_docname_map\n\n fignumber = ex_docname_map[exercise_id]\n\n return exercise_source_docname, exercise_id, fignumber", "def rollback(self):\n\t\traise GeneratorException(\"Not implemented\")", "def _sample_seed(self):\n raise Exception(\" not implemented in base model\")", "def new_exercise():\n db = get_db()\n users = db.users\n exercises = db.exercises\n data = request.json\n \n expected_fields = ['name', 'pic_urls', 'instructions', 'created_by']\n # If the feilds in data don't match the expected fields\n if not set(expected_fields) == set(data):\n raise APIException(status_code=400, message='data does not match the expected fields')\n if not ( isinstance(data['name'], str) and isinstance(data['instructions'], str)\n and isinstance(data['created_by'], str) and isinstance(data['pic_urls'], list) ):\n raise APIException(status_code=400, message='name, created_by, and instructions must be strings')\n\n for pic in data['pic_urls']:\n if not isinstance(pic, str):\n raise APIException(status_code=400, message='each pic_url must be a string')\n\n # Check if created_by is an existing user\n cursor = users.find({\"user_id\": data['created_by']})\n if cursor.count() is 0:\n raise APIException(status_code=404, message='user_id represented by created_by does not exist')\n elif cursor.count() > 1:\n raise APIException(status_code=500, message='Error, multiple users with same user_id (created_by) exist, which is not allowed')\n \n data['workouts_used_in'] = 0\n\n # Create n grams for exercise to be used in search\n data['ngrams'] = ' '.join(make_ngrams(str(data['name']).lower()))\n\n # Insert the new exercise and return its newly created key\n postid = exercises.insert_one(data)\n\n # Index the exercises in the database to be able to be searched\n exercises.search.create_index(\n [\n ('ngrams', 'text'),\n ],\n name='search_exercises',\n weights={\n 'ngrams': 100\n }\n )\n\n return_data = {\"exercise_id\": str(postid.inserted_id)}\n return flask.jsonify(**return_data), 200", "def test_generate_nb_training(self):\n facade = ChatetteFacade.get_or_create()\n\n input_dir_path = \\\n \"tests/system-testing/inputs/generate-nb/training-only/\"\n input_filenames = [\n \"only-words.chatette\", \"words-and-groups.chatette\",\n \"alias.chatette\", \"include.chatette\", \"slot.chatette\",\n \"bugfixes/bug-22-slot-position.chatette\"\n ]\n for filename in input_filenames:\n file_path = os.path.join(input_dir_path, filename)\n facade.run(file_path)\n # if not TestSystem.check_no_duplicates(facade.train_examples): # TODO: make sure there are no duplicates in this case\n # pytest.fail(\"Some examples were generated several times \"+\n # \"when dealing with file '\"+filename+\"'.\\n\"+\n # \"Generated: \"+str(facade.train_examples))\n legal_examples = TestSystem.get_legal_examples(file_path)\n for ex in facade.train_examples:\n formatted_ex = {\"intent\": ex.intent_name, \"text\": ex.text}\n if formatted_ex not in legal_examples:\n pytest.fail(\n str(formatted_ex) + \" is not a legal example for '\" + \\\n file_path + \"'\"\n )\n \n legal_syn = TestSystem.get_legal_synonyms(file_path)\n if legal_syn is not None:\n synonyms = AST.get_or_create().get_entities_synonyms()\n for key in synonyms:\n if key not in legal_syn:\n pytest.fail(\n \"'\" + key + \"' shouldn't have any synonyms.\"\n )\n for syn in synonyms[key]:\n if syn not in legal_syn[key]:\n pytest.fail(\n \"'\" + syn + \"' shouldn't be a synonym of '\" + \\\n key + \"'\"\n )\n\n filename_zero = \"zero-ex.chatette\"\n file_path = os.path.join(input_dir_path, filename_zero)\n facade.run(file_path)\n if len(facade.train_examples) != 0:\n pytest.fail(\n \"When dealing with file 'zero-ex.chatette', no examples \" + \\\n \"should be generated.\\nGenerated: \" + \\\n str(facade.train_examples)\n )\n\n filename_one = \"one-ex.chatette\"\n file_path = os.path.join(input_dir_path, filename_one)\n facade.run(file_path)\n print(\"TRAIN EX: \" + str(facade.train_examples))\n if len(facade.train_examples) != 1:\n pytest.fail(\n \"When dealing with file 'one-ex.chatette', one examples \" + \\\n \"should be generated.\\nGenerated: \" + \\\n str(facade.train_examples)\n )", "def test_strain_not_in(generate_no_strain_one_file):\n fname = generate_no_strain_one_file\n with pytest.raises(Exception) as f:\n process_files([fname])", "def add_all_exercises(exam_date, path_all, path_collection):\n type_list = [x for x in os.listdir(path_collection) if '.DS_Store' not in x]\n print(type_list)\n for i in range(len(type_list)):\n print('Type: ' + type_list[i])\n os.mkdir(path_all + '/' + type_list[i])\n path_type = path_collection + '/' + type_list[i]\n nb_ex_type = len(os.listdir(path_type)) # indexing file da 0\n for j in range(nb_ex_type):\n chosen_type_yaml = path_type + '/' + type_list[i] + str(j) + '.yaml'\n if j+1>=9:\n path_ex = path_all + '/' + type_list[i] + '/istanza_' + str(j+1)\n else:\n path_ex = path_all + '/' + type_list[i] + '/istanza_0' + str(j+1)\n print(path_ex)\n os.mkdir(path_ex)\n mode1.create_exercise(exam_date, str(j+1), path_ex, chosen_type_yaml)\n #mode2.create_exercise(str(i+1), path_ex, chosen_type_yaml)\n #mode3.create_exercise(str(i+1), path_ex, chosen_type_yaml)\n print('Exercise ' + str(j+1) + ' added')\n return", "def generate_first_problem():\n click.echo(\"No Project Euler files found in the current directory.\")\n generate(1)\n sys.exit()", "def _test_generator(notebook):\n \n def test(self):\n nb, errors = run_notebook(notebook, kernel_name=self.kernel_name)\n \n message = ''\n if len(errors) > 0:\n for error in errors:\n message += '%s: %s\\n' % (error['ename'], error['evalue'])\n for line in error['traceback']:\n message += ' %s\\n' % line\n self.assertEqual(errors, [], message)\n \n return test", "def run(self):\n # NOTE: since this directive has a complementary `solution` directive\n # it may be better to put the two in a separate `exercise` domain\n env = self.state.document.settings.env\n\n # get the user-provided label of the exercise\n label = self.arguments[0]\n assert label.startswith('ex:'), (\n 'The exercise label ({}) must start with the \"ex:\" prefix.'.format(\n label))\n\n if self.content:\n content_string = '\\n'.join(self.content)\n content_list = self.content\n content_offset = self.content_offset\n else:\n content_string = read_exercise(env, label)\n content_list = content_string.split('\\n')\n content_offset = 0\n\n # we do not assign an id to this node (despite it being a prerequisite\n # for assigning it a fignum) as this will happen automatically when\n # a name is assigned to this node\n exercise_content_node = exercise(content_string)\n\n # since the label of the node was not given in the standard docutil\n # manner (via the optional `name` parameter), it needs to be manually\n # assigned to this instance of the exercise directive and processed,\n # i.e., it registers the label with the domain (standard `std` domain\n # in this case); it also checks whether the labels is not duplicated\n self.options['name'] = label\n self.add_name(exercise_content_node)\n # these steps ensure that the node created by this directive can be\n # referenced with `ref` and `numref`\n\n # build an empty exercise title, the fignum is injected when building\n # its HTML representation\n exercise_title_node = exercise_title()\n\n # add title to the exercise and process the content\n exercise_content_node += exercise_title_node\n self.state.nested_parse(\n content_list, content_offset, exercise_content_node)\n\n return [exercise_content_node]", "def test_acc(self):\n raise Exception(\" not implemented in base model\")", "def create_exercise(exam_date, num, path_ex_folder, path_yaml):\n global images_to_add\n global REL_PATH_IMAGES\n REL_PATH_IMAGES = 'img_' + exam_date\n images_to_add = []\n path_mode_free = path_ex_folder + '/modo_libero/' # new folder for the considered submission mode\n os.mkdir(path_mode_free)\n exer = read_exercise_yaml(path_yaml) # reading the given yaml\n notebook = nb.v4.new_notebook() # creating the new notebook\n #print(exer['name'])\n if exer['name'] in ('graphs_flow','graphs_trees', 'graphs_planarity','graphs_paths'):\n insert_graph_import(notebook) #required graph import\n insert_no_scroll(notebook) #no scroll of output div\n else:\t\n insert_import_mode_free(notebook) # required import\n insert_start_button(notebook) # start button to run cells with tag 'run_start'\n insert_hide_code(notebook) # hide all code cells\n insert_user_bar_lib(notebook,path_ex_folder) # insert user_bar.py in a code cell\n insert_heading(notebook, exer['title']) # heading with title\n insert_description1(notebook, exer['description1'], exam_date, path_ex_folder) # description 1\n if 'description2' in exer:\n insert_description2(notebook, exer['description2']) # description 2\n insert_tasks(notebook, exer['tasks']) # inserting the several tasks\n if exer['name'] in ('lp_duality', 'lp_interactive', 'lp_modelling', 'lp_two_phases'): # other libraries needed for some types of exercises\n insert_needed_import(notebook, exer['name'])\n if int(num) >= 10: # writing the notebook and saving it in the correct folder\n note_name = 'Esercizio_' + num + '.ipynb'\n prev_folder = 'esercizio_' + num\n else:\n note_name = 'Esercizio_0' + num + '.ipynb'\n prev_folder = 'esercizio_0' + num\n insert_rendition(notebook, note_name)\n nb.write(notebook, note_name)\n os.rename(os.getcwd()+ '/' + note_name, path_mode_free + '/' + note_name)\n os.system(\"jupyter trust \" + path_mode_free + note_name) # signing the notebook in order to make it trusted\n insert_suppl_folders(path_mode_free) # inserting the supplementary folders (i.e., 'allegati', 'img')\n if exer['name'] in ('graphs_flow','graphs_trees', 'graphs_planarity','graphs_paths'):\n insert_graph_folder(path_mode_free)\n if 'tags' in exer:\n e_dict = {'title':exer['title'],'tags':exer['tags'],'tot_points':0,'link':'http://127.0.0.1:8888/notebooks/'+prev_folder+'/modo_libero/' + note_name, 'tasks':exer['tasks']}\n else:\n\t e_dict = {'title':exer['title'],'tags':[],'tot_points':0,'link':'http://127.0.0.1:8888/notebooks/'+prev_folder+'/modo_libero/' + note_name, 'tasks':exer['tasks']}\n return e_dict", "def test_sudoku_solver_handles_garbage_input():\n from sudoku_solver_hard_unique import setup\n with pytest.raises(Exception) as e_info:\n candidates, dicts, square_coords = setup(invalid)\n assert str(e_info.value) == \"Garbage input: 'a' at coord (0, 8), not a valid Sudoku\"", "def unexpectedException(self):", "def test_generate_03_raise_exception(self):\n move = self.get_new_move(3)\n form_wizard = Form(self.env['stock.assign.serial'].with_context(\n default_move_id=move.id,\n default_next_serial_number='code-xxx',\n ))\n wiz = form_wizard.save()\n with self.assertRaises(UserError):\n wiz.generate_serial_numbers()\n\n form_wizard.next_serial_count = 0\n # Must raise an exception because `next_serial_count` must be greater than 0.\n with self.assertRaises(ValidationError):\n form_wizard.save()" ]
[ "0.5758369", "0.56753606", "0.5549753", "0.53802556", "0.53689146", "0.53568685", "0.5292572", "0.528666", "0.5275125", "0.5251637", "0.5170305", "0.51676023", "0.51614165", "0.51531315", "0.5127131", "0.5115587", "0.51139444", "0.5084856", "0.50776774", "0.50622576", "0.5061116", "0.50604576", "0.5056269", "0.50464183", "0.50455076", "0.5027918", "0.5026984", "0.5012438", "0.5011268", "0.5010567" ]
0.6592326
0
Create the reference file of a test using the response received. The file will be created in the git references folder provided in the settings file
def create_reference( self, response_checker=default_checker.default_journey_checker ): # Check that the file doesn't already exist filename = self.get_file_name() filepath = os.path.join(config["REFERENCE_FILE_PATH"], filename) if os.path.isfile(filepath): logger.warning( "NO REF FILE CREATED - {} is already present".format(filepath) ) else: # Concatenate reference file info reference_text = OrderedDict() reference_text["query"] = self.query.replace( config["URL_JORMUN"][7:], "localhost" ) logger.warning("Query: {}".format(self.query)) reference_text["response"] = response_checker.filter( json.loads(self.full_resp) ) reference_text["full_response"] = json.loads( self.full_resp.replace(config["URL_JORMUN"][7:], "localhost") ) # Write reference file directly in the references folder with open(filepath, "w") as ref: ref.write(json.dumps(reference_text, indent=4)) logger.info("Created reference file : {}".format(filepath))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ref(request):\n r = referencepytest.ref(request)\n this_dir = os.path.abspath(os.path.dirname(__file__))\n r.set_data_location(os.path.join(this_dir, '..', 'reference'))\n return r", "def test_with_new_file(self):\n repository = self.create_repository(tool_name='Test')\n review_request = self.create_review_request(\n repository=repository,\n submitter=self.user,\n publish=True)\n diffset = self.create_diffset(review_request)\n filediff = self.create_filediff(diffset,\n source_revision=PRE_CREATION)\n\n rsp = self.api_get(\n get_original_file_url(review_request, diffset, filediff),\n expected_status=404)\n self.assertEqual(rsp['stat'], 'fail')\n self.assertEqual(rsp['err']['code'], DOES_NOT_EXIST.code)", "def ref_resp2files(output_file, output_json):\n with open(output_file, \"w\") as reference_text:\n reference_text.write(output_json)", "def create_ref_file(self):\n id = self.task_record.create_published_output_name()\n ctx = self.block_store.make_local_output(id)\n self.open_ref_contexts[ctx.get_filename()] = ctx\n return ctx.get_filename()", "def compare_with_ref(\n self, response, response_checker=default_checker.default_journey_checker\n ):\n\n def ref_resp2files(output_file, output_json):\n \"\"\"\n Create a file for the filtered response and for the filtered reference\n \"\"\"\n with open(output_file, \"w\") as reference_text:\n reference_text.write(output_json)\n\n def print_diff(ref_file, resp_file):\n \"\"\"\n Print differences between reference and response in console\n \"\"\"\n # open reference\n with open(ref_file) as reference_text:\n reference = reference_text.readlines()\n # open response\n with open(resp_file) as response_text:\n response = response_text.readlines()\n\n # Print failed test name\n print_color(\"\\n\\n\" + str(file_name) + \" failed :\" + \"\\n\\n\", Colors.PINK)\n\n symbol2color = {\"+\": Colors.GREEN, \"-\": Colors.RED}\n for line in difflib.unified_diff(reference, response):\n print_color(line, symbol2color.get(line[0], Colors.DEFAULT))\n\n # Filtering the answer. (We compare to a reference also filtered with the same filter)\n filtered_response = response_checker.filter(response)\n\n # Get the reference\n\n # Create the file name\n filename = self.get_file_name()\n filepath = os.path.join(config[\"REFERENCE_FILE_PATH\"], filename)\n\n assert os.path.isfile(filepath), \"{} is not a file\".format(filepath)\n\n with open(filepath, \"r\") as f:\n raw_reference = f.read()\n\n # Transform the string into a dictionary\n dict_ref = json.loads(raw_reference)\n\n # Get only the full_response part from the ref\n ref_full_response = dict_ref[\"full_response\"]\n\n # Filtering the reference\n filtered_reference = response_checker.filter(ref_full_response)\n\n # Compare response and reference\n try:\n response_checker.compare(filtered_response, filtered_reference)\n except AssertionError as e:\n # print the assertion error message\n logging.error(\"Assertion Error: %s\" % str(e))\n # find name of test\n file_name = filename.split(\"/\")[-1]\n file_name = file_name[:-5]\n\n # create a folder\n dir_path = config[\"RESPONSE_FILE_PATH\"]\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n # create path to ref and resp\n full_file_name_ref = dir_path + \"/reference_\" + file_name + \".txt\"\n full_file_name_resp = dir_path + \"/response_\" + file_name + \".txt\"\n\n json_filtered_reference = json.dumps(filtered_reference, indent=4)\n json_filtered_response = json.dumps(filtered_response, indent=4)\n\n # Save resp and ref as txt files in folder named outputs\n ref_resp2files(full_file_name_ref, json_filtered_reference)\n ref_resp2files(full_file_name_resp, json_filtered_response)\n\n # Print difference in console\n print_diff(full_file_name_ref, full_file_name_resp)\n\n raise", "def test_output_source_file(self):\n response = self.client.open(\n '/v1/control/file/{id}'.format(id='id_example'),\n method='GET',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_create_files(self):\n\n testdir = \"test_output\"\n test_submission = Submission()\n self.addCleanup(os.remove, \"submission.tar.gz\")\n self.addCleanup(shutil.rmtree, testdir)\n\n test_submission.create_files(testdir)\n\n self.doCleanups()", "def testExampleFileGeneration(ref):\n outdir = ref.tmp_dir\n outpath = os.path.join(outdir, 'file_result.html')\n generate_file(outpath)\n ref.assertTextFileCorrect(outpath, 'file_result.html',\n ignore_substrings=['Copyright', 'Version'])", "def test_download_file(token):\n\n # github => repo => release => asset_list => asset => url => download\n\n g_h = github.Github(token, per_page=100)\n repo = g_h.get_repo(TEST_SLUG, lazy=False)\n release = repo.get_release(TEST_TAG)\n asset_list = release.get_assets()\n sha_filename = Template(Arguments.HASH_FILE).safe_substitute({\n 'platform': platform.system().lower()\n })\n\n assets_calculated_sha = 'notasha'\n sha_dict = {}\n\n for check_asset in asset_list:\n # look through list of assets for uploaded file and sha file\n\n if check_asset.name == os.path.basename(TEST_FILENAME):\n\n # the uploaded asset\n request = requests.get(check_asset.browser_download_url)\n open(TEST_DOWNLOAD, 'wb').write(request.content)\n\n # recalc hash of downloaded file\n assets_calculated_sha = Arguments.get_hash(TEST_DOWNLOAD)\n\n elif check_asset.name == sha_filename:\n\n # the sha hash file\n request = requests.get(check_asset.browser_download_url)\n sha_dict = request.json()\n\n assert assets_calculated_sha == sha_dict[os.path.basename(TEST_FILENAME)]", "def test_use_generated_reference_tests_directory(\n self,\n tmp_path_factory,\n platform_url,\n setup_student_repos,\n workdir,\n rtd_path,\n ):\n # arrange\n run_generate_rtd(base_url=platform_url, rtd=rtd_path, workdir=workdir)\n clone_dir = workdir / \"clone_dir\"\n clone_dir.mkdir()\n\n # act\n results = repobee_testhelpers.funcs.run_repobee(\n f\"repos clone -a {ASSIGNMENTS_ARG} \"\n f\"--base-url {platform_url} \"\n f\"--junit4-reference-tests-dir {rtd_path} \"\n f\"--junit4-hamcrest-path {HAMCREST_PATH} \"\n f\"--junit4-junit-path {JUNIT_PATH} \",\n plugins=[junit4],\n workdir=clone_dir,\n )\n\n # assert\n iterations = 0\n for repo_name in plug.generate_repo_names(\n repobee_testhelpers.const.STUDENT_TEAMS, ASSIGNMENT_NAMES\n ):\n iterations += 1\n first_result, *rest = results[repo_name]\n assert not rest, \"there should only be one result\"\n assert first_result.name == SECTION\n assert first_result.status != plug.Status.ERROR\n\n assert iterations > 0, \"the assertion loop did not execute\"", "def create_test_file(test_path, robot_test_name, entry_url, full_path):\n new_test_file = test_path + '\\\\' + robot_test_name + '.tstest'\n shutil.copyfile(template_test_file, new_test_file) #note shutil.copyfile() overwrites target file if it exists\n r = requests.get(entry_url)\n # print r.content\n # fill in TestPrototypeParameter interface XML element and replace hard coded Param1 by variable name\n # fill in SingleVariable interface XML element and replace hard coded default_val by default value\n robot_arguments = ''\n replacements = dict()\n if VAR:\n interface_section = ''\n variable_section = ''\n report_expression_section = ''\n\n # by default, no need to rename robot variable in test unless there is space in the name\n variable_renames = dict()\n for variable in retrieve_variables(r.content):\n variable_name = variable[0]\n variable_renames[variable_name] = variable_name\n # print variable_name\n\n # if variable name has single spaces in it, e.g. 'Example Input 1', replace by '_', e.g. 'Example_Input_1'\n # however if there is also robot variable 'Example_Input_1', then keep appending '_' for the corresponding\n # TestShell test variable until it is unique\n for variable_name, rename in variable_renames.iteritems():\n if ' ' in variable_name:\n # rename = variable_name.replace(' ', '_') #replace space in the name by underscore\n rename = re.sub('[^0-9a-zA-Z_]', '_', variable_name) # replace each unsupported char by underscore\n while rename in variable_renames:\n rename += '_'\n variable_renames[variable_name] = rename\n\n for variable in retrieve_variables(r.content):\n variable_name = variable[0]\n default_value = variable[1]\n replacements[variable_name_in_template] = variable_renames[variable_name]\n replacements[variable_original_name_in_template] = variable_name\n replacements[variable_default_value_in_template] = default_value\n interface_section += fill_template(test_interface_template, replacements)\n variable_section += fill_template(test_variable_template, replacements)\n report_expression_section += fill_template(report_expression_template, replacements)\n robot_arguments += \" --variable \\'\" + variable_name + \"\\':\\'{\" + variable_renames[variable_name] + \"}\\'\"\n\n replacements = {\"test1.robot\": robot_arguments + \" \\'\" + full_path + \"\\'\"} # reset dictionary\n if VAR:\n replacements[test_interface_template_fill_tag] = interface_section\n replacements[test_variable_template_fill_tag] = variable_section\n replacements[report_expression_template_fill_tag] = report_expression_section\n # the following initial values of required variables are hard coded in test template\n replacements['CLOUDSHELL_SERVER_ADDRESS_VALUE'] = cloudshell_server_address\n replacements['CLOUDSHELL_SERVER_PORT_VALUE'] = cloudshell_server_port\n replacements['CLOUDSHELL_USERNAME_VALUE'] = cloudshell_server_username\n replacements['CLOUDSHELL_PASSWORD_VALUE'] = cloudshell_server_password\n replacements['CLOUDSHELL_DOMAIN_VALUE'] = cloudshell_server_domain\n replacements['EXEC_SERVER_ADDRESS_VALUE'] = exec_server_address\n replacements['EXEC_USERNAME_VALUE'] = exec_server_username\n replacements['EXEC_PASSWORD_VALUE'] = exec_server_password\n replacements['BITBUCKET_REPOSITORY_URL'] = bitbucket_repository_url\n replacements['EXEC_SERVER_WORKING_DIR'] = exec_server_working_directory\n replacements['ROBOT_TESTS_DIR'] = robot_tests_directory\n replacements['ARCHIVE_OUTPUT_DIR'] = archive_output_directory\n replacements['LOCAL_WORKING_DIR'] = local_working_directory\n # print replacements\n substitute_string_in_tstest_file(new_test_file, replacements)\n new_test_file_ascii_name = new_test_file.encode('ascii', 'ignore') # otherwise UnicodeDecodeError\n return new_test_file_ascii_name", "def test_generate_diff_download(self, mock_response, mock_request, mock_test_result_file):\n from mod_test.controllers import generate_diff\n\n mock_request.accept_mimetypes.best = 'application/json'\n\n response = generate_diff(1, 1, 1, to_view=0)\n\n self.assertTrue(response, mock_response())", "def test_set_api_url(self):\n UI_path = './resources/'\n test_js_filename = 'test_main.js'\n new_js_filename = UI_path + 'main_blabla.js'\n reference_js = UI_path + 'test_main_reference.js'\n\n os.system('cp {} {}'.format(\n UI_path + test_js_filename,\n new_js_filename))\n\n api_url = 'https://app.etabot.ai:8000/api/'\n set_api_url.set_api_url(\n UI_path, api_url, api_url_var_name='apiUrl')\n\n ute.assertFileEqual(new_js_filename, reference_js, self)\n os.remove(new_js_filename)", "def test_create_symlink_file(self):\n pass", "def test_make_file():\n with tempfile.TemporaryDirectory() as STATUS_DIR:\n Status.make_job_file(STATUS_DIR, 'generation', 'test1', TEST_1_ATTRS_1)\n status = Status.retrieve_job_status(STATUS_DIR, 'generation', 'test1')\n msg = 'Failed, status is \"{}\"'.format(status)\n assert status == 'R', msg", "def test_create_content(self):\n url = reverse('content-list')\n with tempfile.NamedTemporaryFile(suffix='.txt') as content_file:\n content_file.write(b\"The contents of the temporary file.\\n\")\n content_file.seek(0)\n data = {\n 'name': 'Content File',\n 'description': 'File 1',\n 'content_file': content_file,\n 'updated_time': date.today(),\n 'creators': [],\n 'coverage': '',\n 'subjects': [],\n 'keywords': [],\n 'workareas': [],\n 'language': '',\n 'cataloger': ''\n }\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def file_factory(test_workspace):\n\n return FileCreator(test_workspace)", "def test_download_to_file(req, tmpdir):\n req.get(ENTREZ_URL, text='This works.')\n outdir = tmpdir.mkdir('outdir')\n filename = outdir.join('foo')\n expected = outdir.join('foo.gbk')\n config = core.Config(molecule='nucleotide', verbose=False)\n\n core.download_to_file('FOO', config, filename=filename)\n\n assert expected.check()", "def file(c, path=local.http_path):\r\n c = conn(c)\r\n print(\"make file repo on {}, path [{}]\".format(c.host, path))\r\n\r\n system.install(c, 'createrepo')\r\n c.run('createrepo {}'.format(path))", "def test_CRUD_ProjectFiles_Successfully(self):\n\n real_file_code = get_content('BasicTemplateAlgorithm.py')\n second_real_file_code = get_content('BasicTemplateForexAlgorithm.py')\n\n fakeFile = {\"name\":\"Hello.py\", \"code\": \"Hello World!\"}\n realFile = {\"name\":\"main.py\", \"code\": real_file_code}\n secondRealFile = {\"name\":\"lol.py\", \"code\": second_real_file_code}\n\n # Create a new project and make sure there are no files\n project = self.api.create_project(\"Test project - \", \"Py\")\n self.assertTrue(project['success'])\n self.assertTrue(project['projects'][0]['projectId'] > 0)\n\n # Add random file\n randomAdd = self.api.add_project_file(project['projects'][0]['projectId'], fakeFile[\"name\"], fakeFile[\"code\"])\n self.assertTrue(randomAdd['success'])\n self.assertTrue(randomAdd['files'][0]['content'] == fakeFile['code'])\n self.assertTrue(randomAdd['files'][0]['name'] == fakeFile['name'])\n\n # Update names of file\n updatedName = self.api.update_project_filename(project['projects'][0]['projectId'], randomAdd['files'][0]['name'], realFile['name'])\n self.assertTrue(updatedName['success'])\n\n # Replace content of file\n updateContents = self.api.update_project_file_content(project['projects'][0]['projectId'], realFile[\"name\"], realFile['code'])\n self.assertTrue(updateContents['success'])\n\n # Read single file\n readFile = self.api.read_project_file(project['projects'][0]['projectId'], realFile['name'])\n self.assertTrue(readFile['success'])\n self.assertTrue(readFile['files'][0]['content'] == realFile['code'])\n self.assertTrue(readFile['files'][0]['name'] == realFile['name'])\n\n # Add a second file\n secondFile = self.api.add_project_file(project['projects'][0]['projectId'], secondRealFile['name'], secondRealFile['code'])\n self.assertTrue(secondFile['success'])\n self.assertTrue(secondFile['files'][0]['content'] == secondRealFile['code'])\n self.assertTrue(secondFile['files'][0]['name'] == secondRealFile['name'])\n\n # Read multiple files\n readFiles = self.api.read_project_files(project['projects'][0]['projectId'])\n self.assertTrue(readFiles['success'])\n self.assertTrue(len(readFiles['files']) == 2)\n\n # Delete the second file\n deleteFile = self.api.delete_project_file(project['projects'][0]['projectId'], secondRealFile['name'])\n self.assertTrue(deleteFile['success'])\n\n # Read files\n readFilesAgain = self.api.read_project_files(project['projects'][0]['projectId'])\n self.assertTrue(readFilesAgain['success'])\n self.assertTrue(len(readFilesAgain['files']) == 1)\n self.assertTrue(readFilesAgain['files'][0]['name'] == realFile['name'])\n\n # Delete the project\n deleteProject = self.api.delete_project(project['projects'][0]['projectId'])\n self.assertTrue(deleteProject['success'])", "def writeFile(self,fileLink,fileBuffer,testChars=''):\n # 026 Unit test should test also urllib file like object aside the real file.\n #self.debug.printHeader() # Too many times -- need to move to debuglevel=4\n filePath=fileLink.replace('http://','')\n [fileDir,fileName]=os.path.split(filePath)\n if not os.path.exists(self.pathStorage.workDir()+os.sep+fileDir): os.makedirs(self.pathStorage.workDir()+os.sep+fileDir)\n localFile=file(self.pathStorage.workDir()+os.sep+fileDir+os.sep+fileName,'wb')\n localFile.write(testChars)\n localFile.write(fileBuffer.read())\n localFile.close()", "def commit_test(self, test_case, file_name):\n self.logger.info('found fuzzing target')\n\n case_folder = os.path.join(self.crashes, file_name)\n\n if os.path.exists(case_folder):\n self.logger.error('duplicate case folder')\n sys.exit(1)\n\n os.mkdir(case_folder)\n\n dest = os.path.join(case_folder, 'input')\n with open(dest, 'w+') as f:\n f.write(test_case)", "def test_with_training_file(\n self, mock_get_ai_details, mock_get_ai, mock_get_categories\n ):\n\n mock_get_ai.return_value = self.ai\n mock_get_ai_details.return_value = self.ai_details\n\n mock_get_ai_details.return_value['training_file'] = 'This is my training file'\n\n response = self.client.get(reverse(\n 'studio:edit_bot',\n kwargs={'aiid': self.ai['aiid']}\n ))\n\n self.assertContains(response, 'This is my training file')\n self.assertNotContains(response, 'Simply upload historical conversations '\n 'or conversation samples between your users.')", "def fetch_test_feature_file(context, filename):\n resource_package = \"quantarhei\"\n resource_path = '/'.join(('testing', 'resources', 'behave', filename))\n\n content = pkg_resources.resource_string(resource_package, resource_path)\n\n with open(filename, \"w\") as file:\n file.write(content.decode(\"utf-8\"))\n\n context.output = \"\"", "def test_download(self):\n pass", "def create_test(self, test_case, file_name):\n with open(os.path.join(self.tests, file_name), 'w+') as f:\n f.write(test_case)", "def test_open_write(self, client, remote_temp_dir):\n\n file_path = posixpath.join(remote_temp_dir, \"test2.txt\")\n assert not client.exists(file_path)\n\n with HdfsHook() as hook:\n with hook.open(file_path, \"wb\") as file_:\n file_.write(b\"Test file\\n\")\n\n assert client.exists(file_path)", "def test_save_and_add_another_redirects_to_create(self):\n with open(fixture_file, 'rb') as fp:\n params = {\n \"caption\": \"some file\",\n \"publication\": fp,\n \"_addanother\": \"\"\n }\n response = self.client.post(reverse(\"admin2:files_captionedfile_create\"),\n params)\n self.assertTrue(\n CaptionedFile.objects.filter(caption=\"some file\").exists())\n self.assertRedirects(\n response, reverse(\"admin2:files_captionedfile_create\"))", "def test_master(self, tmpgitdir, branch):\n with tmpgitdir.join('file_a.txt').open('w') as handle:\n handle.write('first file')\n\n subprocess.check_call(['git', 'checkout', '-b', branch])\n subprocess.check_call(['git', 'add', '.'])\n subprocess.check_call(['git', 'commit', '-m', 'first'])\n\n assert git_head_ref_name(tmpgitdir) == branch", "def test_upload_file(self):\n\n uploadFile = os.path.join(testdatadir, \"upload.data\")\n r = gracedb.writeFile(eventId, uploadFile)\n self.assertEqual(r.status, 201) # CREATED\n r_content = r.json()\n link = r_content['permalink']\n\n self.assertEqual(\n open(uploadFile, 'r').read(),\n gracedb.get(gracedb.files(eventId).json()['upload.data']).read()\n )\n\n self.assertEqual(\n open(uploadFile, 'r').read(),\n gracedb.get(link).read()\n )\n\n # Re-upload slightly different file.\n uploadFile2 = os.path.join(testdatadir, \"upload2.data\")\n r = gracedb.writeFile(\n eventId,\n filename=\"upload.data\",\n filecontents=open(uploadFile2, 'r'))\n self.assertEqual(r.status, 201) # CREATED\n r_content = r.json()\n link2 = r_content['permalink']\n\n self.assertEqual(\n open(uploadFile2, 'r').read(),\n gracedb.get(gracedb.files(eventId).json()['upload.data']).read()\n )\n\n self.assertEqual(\n open(uploadFile2, 'r').read(),\n gracedb.get(link2).read()\n )\n\n self.assertNotEqual(link, link2)" ]
[ "0.65603554", "0.62640995", "0.62635124", "0.62011635", "0.6008765", "0.5936762", "0.5884765", "0.5857535", "0.5827133", "0.5797445", "0.5793405", "0.5725234", "0.57189417", "0.571022", "0.5687655", "0.5664678", "0.5638524", "0.56348014", "0.5630147", "0.5594338", "0.55792326", "0.55502665", "0.55477107", "0.5520945", "0.54971486", "0.5490254", "0.54767", "0.5470865", "0.54565525", "0.54542285" ]
0.70232326
0
Compare the response (which is a dictionary) to the reference First, the function retrieves the reference then filters both ref and resp Finally, it compares them
def compare_with_ref( self, response, response_checker=default_checker.default_journey_checker ): def ref_resp2files(output_file, output_json): """ Create a file for the filtered response and for the filtered reference """ with open(output_file, "w") as reference_text: reference_text.write(output_json) def print_diff(ref_file, resp_file): """ Print differences between reference and response in console """ # open reference with open(ref_file) as reference_text: reference = reference_text.readlines() # open response with open(resp_file) as response_text: response = response_text.readlines() # Print failed test name print_color("\n\n" + str(file_name) + " failed :" + "\n\n", Colors.PINK) symbol2color = {"+": Colors.GREEN, "-": Colors.RED} for line in difflib.unified_diff(reference, response): print_color(line, symbol2color.get(line[0], Colors.DEFAULT)) # Filtering the answer. (We compare to a reference also filtered with the same filter) filtered_response = response_checker.filter(response) # Get the reference # Create the file name filename = self.get_file_name() filepath = os.path.join(config["REFERENCE_FILE_PATH"], filename) assert os.path.isfile(filepath), "{} is not a file".format(filepath) with open(filepath, "r") as f: raw_reference = f.read() # Transform the string into a dictionary dict_ref = json.loads(raw_reference) # Get only the full_response part from the ref ref_full_response = dict_ref["full_response"] # Filtering the reference filtered_reference = response_checker.filter(ref_full_response) # Compare response and reference try: response_checker.compare(filtered_response, filtered_reference) except AssertionError as e: # print the assertion error message logging.error("Assertion Error: %s" % str(e)) # find name of test file_name = filename.split("/")[-1] file_name = file_name[:-5] # create a folder dir_path = config["RESPONSE_FILE_PATH"] if not os.path.exists(dir_path): os.makedirs(dir_path) # create path to ref and resp full_file_name_ref = dir_path + "/reference_" + file_name + ".txt" full_file_name_resp = dir_path + "/response_" + file_name + ".txt" json_filtered_reference = json.dumps(filtered_reference, indent=4) json_filtered_response = json.dumps(filtered_response, indent=4) # Save resp and ref as txt files in folder named outputs ref_resp2files(full_file_name_ref, json_filtered_reference) ref_resp2files(full_file_name_resp, json_filtered_response) # Print difference in console print_diff(full_file_name_ref, full_file_name_resp) raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare_original_response_with_copy(context):\n original = context.response.json()\n copy = context.response_copy\n\n def compare_top_level_values():\n # get the list of fields that are JSON values not arrays\n keys = [val for val in original.iterkeys() if not isinstance(original[val], (dict, list, set))]\n assert keys, ('Expected at least 1 field key to compare but got none!')\n logging.debug('List of top tier field keys to compare: %s', keys)\n for key in keys:\n assert original[key] == copy[key]\n logging.debug(\n 'All top level fields in the response copy have the same values as'\n ' in the original response. Here is a list of compared fields:\\n%s',\n ', '.join(keys))\n\n def compare_items():\n original_items = original['items']\n copy_items = copy['items']\n skip = ['title', 'last_activity_date']\n for original_item in original_items:\n # get all item field keys\n keys = [val for val in original_item.iterkeys()]\n # remove the keys that need to be skipped\n keys = [x for x in keys if x not in skip]\n for copy_item in copy_items:\n # find matching items\n if original_item['question_id'] == copy_item['question_id']:\n # compare original an copied items\n for key in keys:\n assert original_item[key] == copy_item[key]\n logging.debug(\n 'All fields in the copied item ID: %s'\n ' have the same values as in in the original items',\n copy_item['question_id'])\n\n compare_top_level_values()\n compare_items()", "def validate_get_response(response, status, count, job_templates, keys=None):\n assert (response[\"status\"]) == status\n json_response = json.loads(response[\"body\"])\n assert (json_response[\"count\"]) == count\n results = json_response[\"results\"]\n for item in results:\n matching_item = find_by_id(item[\"id\"], job_templates)\n if not keys:\n keys = list(matching_item.keys())\n assert sorted(keys) == sorted(list(item.keys()))\n compare(item, matching_item, keys)", "def compare_result_with_reference(parsed_result:dict, parsed_reference:dict, tolerance:dict, debug_mode=False):\n\n result = [{key: value} for key, value in get_hashable_entries(parsed_result)]\n reference = [{key: value} for key, value in get_hashable_entries(parsed_reference)]\n\n if debug_mode:\n check_key_consistency(reference, result)\n\n def key_from_single_entry(a:dict):\n return [x for x in a.keys()][0]\n\n def value_from_single_entry(a:dict):\n return [x for x in a.values()][0]\n\n for i in range(0, len(result)):\n\n result_key = key_from_single_entry(result[i])\n reference_key = key_from_single_entry(reference[i])\n assert result_key == reference_key\n\n result_value = value_from_single_entry(result[i])\n reference_value = value_from_single_entry(reference[i])\n\n assert_equal(result_value, reference_value, tolerance[reference_key], message=reference_key)", "def test_fetch_related_data_valid(self):\n resp = requests.post(\n _CONF[\"re_api_url\"] + \"/api/v1/query_results\",\n params={\"stored_query\": \"ws_fetch_related_data\", \"show_public\": True},\n data=json.dumps({\"obj_key\": \"1:1:1\"}),\n ).json()\n self.assertEqual(resp[\"count\"], 1)\n self.assertEqual(resp[\"has_more\"], False)\n res = resp[\"results\"][0]\n # Check the root object results\n self.assertEqual(res[\"obj\"][\"_key\"], \"1:1:1\")\n self.assertEqual(res[\"obj_type\"][\"_key\"], \"Module.Type1-1.0\")\n # Check the copy results\n self.assertEqual(res[\"copies\"][\"count\"], 1)\n self.assertEqual(len(res[\"copies\"][\"data\"]), 1)\n self.assertEqual(\n res[\"copies\"][\"data\"][0][\"data\"][\"_id\"], \"ws_object_version/1:2:1\"\n )\n self.assertEqual(res[\"copies\"][\"data\"][0][\"hops\"], 1)\n self.assertEqual(\n res[\"copies\"][\"data\"][0][\"type\"][\"_id\"], \"ws_type_version/Module.Type1-1.0\"\n )\n # Check the provenance results\n self.assertEqual(res[\"prov\"][\"count\"], 1)\n self.assertEqual(len(res[\"prov\"][\"data\"]), 1)\n self.assertEqual(\n res[\"prov\"][\"data\"][0][\"data\"][\"_id\"], \"ws_object_version/1:3:1\"\n )\n self.assertEqual(res[\"prov\"][\"data\"][0][\"hops\"], 1)\n self.assertEqual(\n res[\"prov\"][\"data\"][0][\"type\"][\"_id\"], \"ws_type_version/Module.Type1-1.0\"\n )\n # Check the ref results\n self.assertEqual(res[\"refs\"][\"count\"], 1)\n self.assertEqual(len(res[\"refs\"][\"data\"]), 1)\n self.assertEqual(\n res[\"refs\"][\"data\"][0][\"data\"][\"_id\"], \"ws_object_version/1:4:1\"\n )\n self.assertEqual(res[\"refs\"][\"data\"][0][\"hops\"], 1)\n self.assertEqual(\n res[\"refs\"][\"data\"][0][\"type\"][\"_id\"], \"ws_type_version/Module.Type1-1.0\"\n )", "def _slack_get_value(slack_response, search_value, search_field, return_field, classifier):\n if not slack_response['ok']:\n return False\n for item in slack_response[classifier]:\n if search_field in item and search_value == item[search_field] and return_field in item:\n return item[return_field]", "def http_get_and_compare_resp(url, expected_get_json_resp, check_util=default_full_compare):\n get_resp_obj = RestClientApis.http_get_and_check_success(url)\n if get_resp_obj.success:\n get_resp_json = get_resp_obj.json_body\n success = check_util(json.loads(expected_get_json_resp), get_resp_json)\n message, return_code = assign_message_code(success)\n else:\n return get_resp_obj\n\n rest_return_obj = RestReturn(success=success, message=message, http_status=return_code,\n json_body=get_resp_json,\n response_object=get_resp_obj.response_object)\n return rest_return_obj", "def filter_pro_matches(resp):\n\n return [x for x in resp if x[\"dire_name\"] and x[\"radiant_name\"]]", "def fusion_api_validate_response(self, respDict, valDict):\n success = True\n returnDict = {}\n keys = []\n for key in valDict:\n if not valDict[key]:\n continue\n # logger._log_to_console_and_log_file('key: %s' % (key))\n keyDict = {'key': key, 'expected': valDict[\n key], 'actual': respDict[key], 'success': True}\n if key in respDict:\n pattern = re.compile(str(valDict[key]))\n # if not re.search(str(valDict[key]), str(respDict[key])):\n # t = re.compile('(?i)Warning|Unknown|Terminated|Killed|Error|Completed')\n\n if not re.search(pattern, str(respDict[key])):\n\n success = False\n keyDict['success'] = False\n else:\n success = False\n keyDict['success'] = False\n keys.append(keyDict)\n\n returnDict['success'] = success\n returnDict['keys'] = keys\n return returnDict", "def compare():\n body: t.Any = request.json\n check_error({'input': {'old': {}, 'new': {}}}, body)\n response_new = rpc_search({'input': body['input']['new']})\n response_old = rpc_search({'input': body['input']['old']})\n\n modules_new = response_new['yang-catalog:modules']['module']\n modules_old = response_old['yang-catalog:modules']['module']\n\n if len(modules_new) == 0 or len(modules_old) == 0:\n abort(404, description='No hits found either in old or new input')\n\n new_mods = []\n for mod_new in modules_new:\n new_rev = mod_new['revision']\n new_name = mod_new['name']\n found = False\n new_rev_found = False\n for mod_old in modules_old:\n old_rev = mod_old['revision']\n old_name = mod_old['name']\n if new_name == old_name and new_rev == old_rev:\n found = True\n break\n if new_name == old_name and new_rev != old_rev:\n new_rev_found = True\n if not found:\n mod_new['reason-to-show'] = 'New module'\n new_mods.append(mod_new)\n if new_rev_found:\n mod_new['reason-to-show'] = 'Different revision'\n new_mods.append(mod_new)\n if len(new_mods) == 0:\n abort(404, description='No new modules or modules with different revisions found')\n output = {'output': new_mods}\n return output", "def compare(self, base, head):\r\n url = '{0}/compare/{1}...{2}'.format(self.parent.get_url(), base, head)\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def _referencedChecker(self, entity, params):\n\n if 'ref_logic' not in params:\n return False\n\n logic = self.helper.getLogicForItem(params, 'ref_logic')\n filter = {\n params['ref_field']: entity.key()\n }\n ref_entity = logic.getForFields(filter=filter, unique=True)\n\n result = ref_entity is not None\n\n no_ref = params.get('no_ref')\n if no_ref:\n result = not result\n\n return result", "def _matcher(r1: vcr.request.Request, r2: vcr.request.Request) -> None:\n assert r1.uri == r2.uri and r1.body == r2.body and r1.headers == r2.headers", "def _check_response(self, response_contents, correct_jsons):\r\n for username, content in response_contents.items():\r\n\r\n # Used in debugger for comparing objects.\r\n # self.maxDiff = None\r\n\r\n # We should compare top_words for manually,\r\n # because they are unsorted.\r\n keys_to_compare = set(content.keys()).difference(set(['top_words']))\r\n self.assertDictEqual(\r\n {k: content[k] for k in keys_to_compare},\r\n {k: correct_jsons[username][k] for k in keys_to_compare})\r\n\r\n # comparing top_words:\r\n top_words_content = sorted(\r\n content['top_words'],\r\n key=itemgetter('text')\r\n )\r\n top_words_correct = sorted(\r\n correct_jsons[username]['top_words'],\r\n key=itemgetter('text')\r\n )\r\n self.assertListEqual(top_words_content, top_words_correct)", "def _test_single_prerecorded_api_call(app, path, prerecorded, contexts={}):\n rv = app.get(path)\n assert rv.status_code == 200\n response = json.loads(rv.get_data().decode('utf8'))\n if type(prerecorded) is list:\n response = response['items']\n compare_objects(contexts, '', prerecorded, response)\n return False", "def _filter_entries_by_response(self, urls, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n if len(har[\"log\"][\"entries\"]) > 1:\r\n for entry in har[\"log\"][\"entries\"]:\r\n for url in urls:\r\n if url in entry[\"request\"][\"url\"]:\r\n tempObject = {}\r\n if entry[\"response\"][\"status\"] == 200 and entry[\"response\"][\"content\"].get(\"text\") and entry[\"response\"][\"content\"][\"text\"] != \"\":\r\n tempObject['url'] = entry[\"request\"][\"url\"]\r\n tempObject['response'] = entry[\"response\"][\"content\"][\"text\"].encode('ascii', 'ignore')\r\n matches.append(tempObject)\r\n return matches", "def check_recommendation_in_result(context):\n json_data = context.response.json()\n result = json_data[\"recommendation\"]\n assert result == {}", "def verify(self, response):", "def references(md5):\n u = Upload.objects.filter(md5=md5).first()\n if not u:\n abort(404)\n # first, is this searchable?\n is_searchable = False\n count = elastic.count('page', filter={'md5': md5})\n if count > 0:\n is_searchable = True\n #annotations = Reference.objects.filter(upload=u, ref_url__exists=True)\n annotations = Reference.objects.filter(upload=u).order_by('ref_pos')\n # create a list of referenced things\n references = {'references':[], 'searchable': is_searchable}\n for a in annotations:\n try:\n references['references'].append({\n 'pos_x': a.pos_x, \n 'pos': a.pos, \n 'ref': a.ref_upload.md5, \n 'ref_pos': a.ref_pos\n })\n except:\n pass\n return jsonify(references)", "def get_matching_citizens():\n try:\n volunteer = request.headers.get('X-volunteer')\n except:\n return jsonify(\"X-volunteer header is missing\")\n logger.info(\"X-volunteer header is missing\")\n \n vaibhav_interests = ['sleeping','home building','garden walks']\n arsalan_interests = ['music','politics','science','reading']\n senior_list = table.scan()[\"Items\"]\n if request.headers['X-volunteer'] == \"Vaibhav\":\n dummy_volunteer_interest_list = vaibhav_interests\n matching_list = []\n for senior in senior_list:\n match = len(set(dummy_volunteer_interest_list) & set(senior['interests'])) / float(len(set(dummy_volunteer_interest_list) | set(senior['interests']))) * 100\n if match >= 20:\n matching_list.append(senior)\n if len(matching_list) == 0:\n return(jsonify(\"No matches found!\"))\n logger.info(\"Vaibhav Matching citizens returned\")\n elif request.headers['X-volunteer'] == \"Arsalan\":\n dummy_volunteer_interest_list = arsalan_interests\n matching_list = []\n # senior_list = [post for post in posts.find()]\n for senior in senior_list:\n match = len(set(dummy_volunteer_interest_list) & set(senior['interests'])) / float(len(set(dummy_volunteer_interest_list) | set(senior['interests']))) * 100\n if match >= 20:\n matching_list.append(senior)\n if len(matching_list) == 0:\n return jsonify(\"No matches found!\")\n logger.info(\"Arsalan Matching citizens returned\")\n else:\n return jsonify(\"Send a valid user header!\")\n return jsonify(matching_list)", "def test_get_data_success(monkeypatch):\n\n class MockResponse(object):\n def __init__(self):\n self.status_code = 200\n\n def json(self):\n return {\n \"continue\": {\"excontinue\": 1, \"continue\": \"||info\"},\n \"query\": {\n \"pages\": {\n \"151688\": {\n \"pageid\": 151688,\n \"ns\": 0,\n \"title\": \"Naantali\",\n \"index\": -1,\n \"extract\": \"Naantali (en suédois Nådendal, en latin Vallis Gratiae - la vallée de grâce) est une ville du sud-ouest de la Finlande. Cette petite ville, qui compte une population de 19 000 habitants, se situe dans la province de Finlande occidentale et la région de Finlande du Sud-Ouest, à 15 km à l'ouest de Turku, la capitale provinciale.\", # noqa: E501\n \"contentmodel\": \"wikitext\",\n \"pagelanguage\": \"fr\",\n \"pagelanguagehtmlcode\": \"fr\",\n \"pagelanguagedir\": \"ltr\",\n \"touched\": \"2020-10-08T00:35:54Z\",\n \"lastrevid\": 169716755,\n \"length\": 14393,\n \"fullurl\": \"https://fr.wikipedia.org/wiki/Naantali\", # noqa: E501\n \"editurl\": \"https://fr.wikipedia.org/w/index.php?title=Naantali&action=edit\", # noqa: E501\n \"canonicalurl\": \"https://fr.wikipedia.org/wiki/Naantali\", # noqa: E501\n },\n \"2709037\": {\n \"pageid\": 2709037,\n \"ns\": 0,\n \"title\": \"Muumimaailma\",\n \"index\": 0,\n \"contentmodel\": \"wikitext\",\n \"pagelanguage\": \"fr\",\n \"pagelanguagehtmlcode\": \"fr\",\n \"pagelanguagedir\": \"ltr\",\n \"touched\": \"2020-10-04T16:19:49Z\",\n \"lastrevid\": 168229306,\n \"length\": 1447,\n \"fullurl\": \"https://fr.wikipedia.org/wiki/Muumimaailma\", # noqa: E501\n \"editurl\": \"https://fr.wikipedia.org/w/index.php?title=Muumimaailma&action=edit\", # noqa: E501\n \"canonicalurl\": \"https://fr.wikipedia.org/wiki/Muumimaailma\", # noqa: E501\n },\n \"5751499\": {\n \"pageid\": 5751499,\n \"ns\": 0,\n \"title\": \"Kultaranta\",\n \"index\": 1,\n \"contentmodel\": \"wikitext\",\n \"pagelanguage\": \"fr\",\n \"pagelanguagehtmlcode\": \"fr\",\n \"pagelanguagedir\": \"ltr\",\n \"touched\": \"2020-10-08T00:43:32Z\",\n \"lastrevid\": 164009230,\n \"length\": 11889,\n \"fullurl\": \"https://fr.wikipedia.org/wiki/Kultaranta\", # noqa: E501\n \"editurl\": \"https://fr.wikipedia.org/w/index.php?title=Kultaranta&action=edit\", # noqa: E501\n \"canonicalurl\": \"https://fr.wikipedia.org/wiki/Kultaranta\", # noqa: E501\n },\n \"7700543\": {\n \"pageid\": 7700543,\n \"ns\": 0,\n \"title\": \"Port de Naantali\",\n \"index\": 2,\n \"contentmodel\": \"wikitext\",\n \"pagelanguage\": \"fr\",\n \"pagelanguagehtmlcode\": \"fr\",\n \"pagelanguagedir\": \"ltr\",\n \"touched\": \"2020-10-04T16:25:50Z\",\n \"lastrevid\": 162923416,\n \"length\": 2675,\n \"fullurl\": \"https://fr.wikipedia.org/wiki/Port_de_Naantali\", # noqa: E501\n \"editurl\": \"https://fr.wikipedia.org/w/index.php?title=Port_de_Naantali&action=edit\", # noqa: E501\n \"canonicalurl\": \"https://fr.wikipedia.org/wiki/Port_de_Naantali\", # noqa: E501\n },\n }\n },\n }\n\n parameters = {\n \"action\": \"query\",\n \"prop\": \"extracts|info\",\n \"inprop\": \"url\",\n \"explaintext\": True,\n \"exsentences\": 2,\n \"exlimit\": 1,\n \"generator\": \"geosearch\",\n \"ggsradius\": 10000,\n \"ggscoord\": f\"{00000}|{00000}\",\n \"format\": \"json\",\n }\n\n headers = {\n \"date\": \"10/10/2020\",\n \"user-agent\": '\"MoominPappaBot/fake_version',\n }\n\n def mock_get(url, params=parameters, headers=headers, timeout=10):\n return MockResponse()\n\n monkeypatch.setattr(requests, \"get\", mock_get)\n\n request = MediawikiApi(\"user_input1\", \"user_input2\")\n result = request.get_data()\n expected_result = {\n \"title\": \"Naantali\",\n \"extract\": \"Naantali (en suédois Nådendal, en latin Vallis Gratiae - la vallée de grâce) est une ville du sud-ouest de la Finlande. Cette petite ville, qui compte une population de 19 000 habitants, se situe dans la province de Finlande occidentale et la région de Finlande du Sud-Ouest, à 15 km à l'ouest de Turku, la capitale provinciale.\", # noqa: E501\n \"fullurl\": \"https://fr.wikipedia.org/wiki/Naantali\",\n }\n assert result == expected_result", "def test_list_referrals_by_desc_object(self):\n user = factories.UserFactory()\n referrals = [\n factories.ReferralFactory(\n state=models.ReferralState.RECEIVED,\n object=\"First by alphabetical order\",\n post__users=[user],\n urgency_level=models.ReferralUrgency.objects.get(\n duration=timedelta(days=1)\n ),\n ),\n factories.ReferralFactory(\n state=models.ReferralState.RECEIVED,\n object=\"Second by alphabetical order\",\n post__users=[user],\n urgency_level=models.ReferralUrgency.objects.get(\n duration=timedelta(days=1)\n ),\n ),\n ]\n\n self.setup_elasticsearch()\n response = self.client.get(\n f\"/api/referrallites/?user={user.id}&sort=object.keyword&sort_dir=desc\",\n HTTP_AUTHORIZATION=f\"Token {Token.objects.get_or_create(user=user)[0]}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json()[\"count\"], 2)\n self.assertEqual(response.json()[\"results\"][0][\"id\"], referrals[1].id)\n self.assertEqual(response.json()[\"results\"][1][\"id\"], referrals[0].id)", "def rpc_match():", "def pare_dict(d, ref, strict_b=False, **kw):\n strict_b = kw.get(\"strict\", strict_b)\n if strict_b:\n return {k: v for k, v in d.items() if k in ref and v != ref.get(k)}\n return {k: v for k, v in d.items() if k not in ref or v != ref.get(k)}", "def compare_response_to_model_instance(self, response, model_instance):\n parsed_response = json_decode(response)\n headers = parsed_response['headers']\n data = parsed_response['data']\n self.assertEquals(len(data), len(model_instance))\n for i in range(len(data)):\n datum = self.deserialize(headers, data[i])\n self.compare_model_instance(datum, model_instance[i])", "def compare_promises(fi, se, en, ref) -> bool:\n same = False\n if (fi == ref):\n return True\n if (fi != None):\n if (fi == ref) or (format_promise(fi) == format_promise(ref)):\n return True\n if (se == ref):\n return True\n if (en == ref):\n return True\n if (se != None):\n if (se == ref) or (format_promise(se) == format_promise(ref)):\n return True\n return same", "def compareTwoReco(reference, new, histos, debug=1):\n\n # Tracks with index False are the ones that have been matched to the reference track collection\n new_valid = [True for i in new]\n\n # Tracks with index False are the ones that have been matched to the comparison track collection\n original_valid = [True for i in reference]\n print \" \".join(\"%10s\" % k for k in variables)\n debug_verbose = checkDebug(debug, 'Verbose')\n debug_ordinary = checkDebug(debug, 'Ordinary')\n debug_recovery = checkDebug(debug, 'Recovery')\n debug_lost = checkDebug(debug, 'Lost')\n debug_fake = checkDebug(debug, 'Fake')\n\n for original_index, original in enumerate(reference):\n # Fill in cumulative plots for the reference sample first\n histos['reference_hits_vs_algo'].Fill(original.algo, original.hits)\n histos['reference_hits_vs_orialgo'].Fill(original.orialgo, original.hits)\n histos['reference_hits_vs_pt'].Fill(original.pt, original.hits)\n histos['den'].Fill(original.pt)\n histos['den_eta'].Fill(original.eta)\n histos['den_phi'].Fill(original.phi)\n histos['den_hits'].Fill(original.hits)\n histos['den_algo'].Fill(original.algo)\n histos['den_orialgo'].Fill(original.orialgo)\n\n # Now start to look for a matching track in the comparison track collection\n window_depth = 400 # elements to span to look for best candidate\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = -1, 100, 100\n if debug_verbose:\n print original\n for i,j in enumerate(new):\n if new_valid[i] == True:\n if debug_verbose:\n print \" \", i, j\n if window_depth == 0:\n break\n dr_squared, dPt_over_pt = match(original, j)\n if dr_squared < bestDeltaRMatch*bestDeltaRMatch and dPt_over_pt < DELTA_PT_OVER_PT_CUT:\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = i, dr_squared, dPt_over_pt\n if debug_verbose:\n print \" \", window_depth, iBest, bestDeltaRMatch, dr_squared, bestDeltaPt_over_PtMatch, dPt_over_pt\n if bestDeltaRMatch <= 0.0001 or bestDeltaPt_over_PtMatch == 0.0001:\n break\n window_depth -= 1\n if iBest != -1 and bestDeltaRMatch < DELTA_R_CUT:\n # These are the tracks in the reference track collection\n # that have been matched to a track in the comparison\n # track collection\n new_valid[iBest] = False\n original_valid[original_index] = False\n assert original.run == new[iBest].run, \"run mismatch\"\n assert original.ls == new[iBest].ls, \"ls mismatch\"\n assert original.event == new[iBest].event, \"event mismatch\"\n if debug_ordinary:\n print original\n print new[iBest]\n print iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch, '\\n'\n histos['num'].Fill(original.pt)\n histos['num_eta'].Fill(original.eta)\n histos['num_phi'].Fill(original.phi)\n histos['num_hits'].Fill(original.hits)\n histos['num_algo'].Fill(original.algo)\n histos['num_orialgo'].Fill(original.orialgo)\n histos['fake_num'].Fill(new[iBest].pt)\n histos['fake_num_eta'].Fill(new[iBest].eta)\n histos['fake_num_phi'].Fill(new[iBest].phi)\n histos['fake_num_hits'].Fill(new[iBest].hits)\n histos['fake_num_algo'].Fill(new[iBest].algo)\n histos['fake_num_orialgo'].Fill(new[iBest].orialgo)\n histos['comparison_algo_vs_reference_algo'].Fill(original.algo, new[iBest].algo)\n histos['comparison_orialgo_vs_reference_orialgo'].Fill(original.orialgo, new[iBest].orialgo)\n histos['comparison_hits_vs_reference_hits'].Fill(original.hits, new[iBest].hits)\n\n # Let's try a recovery loop with somewhat lesser stringent cuts\n for original_index, original in enumerate(reference):\n if original_valid[original_index]:\n # Now start to look for a matching track in the comparison track collection\n window_depth = 300 # elements to span to look for best candidate\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = -1, 100, 100\n if debug_verbose:\n print \"Recovery \", original\n for i,j in enumerate(new):\n if new_valid[i] == True:\n if debug_verbose:\n print \"Recovery \", i, j\n if window_depth == 0:\n break\n dr_squared, dPt_over_pt = match(original, j)\n if dr_squared < bestDeltaRMatch*bestDeltaRMatch and dPt_over_pt < DELTA_PT_OVER_PT_CUT*6:\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = i, dr_squared, dPt_over_pt\n if debug_verbose:\n print \"Recovery \", window_depth, iBest, bestDeltaRMatch, dr_squared, bestDeltaPt_over_PtMatch, dPt_over_pt\n if bestDeltaRMatch <= 0.0001 or bestDeltaPt_over_PtMatch == 0.0001:\n break\n window_depth -= 1\n if iBest != -1 and bestDeltaRMatch < DELTA_R_CUT*10: # inflate cut on DeltaR to recover some good-medium matching\n # These are the tracks in the reference track collection\n # that have been matched to a track in the comparison\n # track collection\n new_valid[iBest] = False\n original_valid[original_index] = False\n if debug_recovery:\n print \"Recovery \", original\n print \"Recovery \", new[iBest]\n print \"Recovery \", iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch\n histos['num'].Fill(original.pt)\n histos['num_eta'].Fill(original.eta)\n histos['num_phi'].Fill(original.phi)\n histos['num_hits'].Fill(original.hits)\n histos['num_algo'].Fill(original.algo)\n histos['num_orialgo'].Fill(original.orialgo)\n histos['fake_num'].Fill(new[iBest].pt)\n histos['fake_num_eta'].Fill(new[iBest].eta)\n histos['fake_num_hits'].Fill(new[iBest].hits)\n histos['fake_num_algo'].Fill(new[iBest].algo)\n histos['fake_num_orialgo'].Fill(new[iBest].orialgo)\n histos['comparison_algo_vs_reference_algo'].Fill(original.algo, new[iBest].algo)\n histos['comparison_orialgo_vs_reference_orialgo'].Fill(original.orialgo, new[iBest].orialgo)\n histos['comparison_hits_vs_reference_hits'].Fill(original.hits, new[iBest].hits)\n\n\n # These are the tracks in the reference track collection\n # that have *not* been associated to any track in the\n # comparison collection == > LOST TRACKS\n reference_not_assigned = [j for i,j in enumerate(reference) if original_valid[i]]\n reference_not_assigned.sort(key=lambda tr: tr.algo)\n if debug_lost:\n print \"**** Lost tracks **** %d\" % len(reference_not_assigned)\n for j in reference_not_assigned:\n histos['lost_hits_vs_algo'].Fill(j.algo, j.hits)\n histos['lost_hits_vs_orialgo'].Fill(j.orialgo, j.hits)\n histos['lost_hits_vs_pt'].Fill(j.pt, j.hits)\n histos['lost_eta'].Fill(j.eta)\n if debug:\n print j\n if debug_lost:\n print \"**** End of Lost tracks ****\"\n\n # Fake Tracks\n for i, j in enumerate(new):\n # Fill in the cumulative plots related to tracks in the comparison track collection\n histos['comparison_hits_vs_algo'].Fill(j.algo, j.hits)\n histos['comparison_hits_vs_orialgo'].Fill(j.orialgo, j.hits)\n histos['comparison_hits_vs_pt'].Fill(j.pt, j.hits)\n histos['fake_den'].Fill(j.pt)\n histos['fake_den_eta'].Fill(j.eta)\n histos['fake_den_phi'].Fill(j.phi)\n histos['fake_den_hits'].Fill(j.hits)\n histos['fake_den_algo'].Fill(j.algo)\n histos['fake_den_orialgo'].Fill(j.orialgo)\n\n # These are the tracks in the comparison track collection\n # that have *not* been associated to any track in the\n # reference collection ==> FAKE TRACKS\n new_not_assigned = [j for i,j in enumerate(new) if new_valid[i]]\n new_not_assigned.sort(key=lambda tr: tr.algo)\n if debug_fake:\n print \"**** Fake tracks **** %d\" % len(new_not_assigned)\n for j in new_not_assigned:\n histos['fake_hits_vs_algo'].Fill(j.algo, j.hits)\n histos['fake_hits_vs_orialgo'].Fill(j.orialgo, j.hits)\n histos['fake_hits_vs_pt'].Fill(j.pt, j.hits)\n if debug:\n print j\n if debug_fake:\n print \"**** End of Fake tracks ****\"", "def test_get_with_filter_person_factoid(mockclient_cl1):\n r = mockclient_cl1.get(TEST_URL + \"?size=100&f=F00062&p=P00063\")\n assert r.status_code == 200\n assert r.json[\"statements\"][0][\"@id\"] == \"Stmt00184\"\n r = mockclient_cl1.get(TEST_URL + \"?size=100&f=F00062&p=P00064\")\n assert r.status_code == 404", "def compare(self, **kwargs):\n\n source_params = {'sid': kwargs.get('source_sid'),\n 'did': kwargs.get('source_did'),\n 'scid': kwargs.get('source_scid')\n }\n\n target_params = {'sid': kwargs.get('target_sid'),\n 'did': kwargs.get('target_did'),\n 'scid': kwargs.get('target_scid')\n }\n\n if 'source_tid' in kwargs:\n source_params['tid'] = kwargs['source_tid']\n if 'target_tid' in kwargs:\n target_params['tid'] = kwargs['target_tid']\n\n source = self.fetch_objects_to_compare(**source_params)\n\n target = self.fetch_objects_to_compare(**target_params)\n\n # If both the dict have no items then return None.\n if not (source or target) or (\n len(source) <= 0 and len(target) <= 0):\n return None\n\n return compare_dictionaries(source, target,\n self.node_type,\n self.blueprint.COLLECTION_LABEL,\n self.keys_to_ignore)", "def isResp(obxDict):\n readingCode = getReadingCode(obxDict)\n return readingCode == '76270-8'", "def __eq__(self, other):\n if not isinstance(other, ClientDetailResponseResponse):\n return False\n\n return self.__dict__ == other.__dict__" ]
[ "0.6025328", "0.57968277", "0.57027745", "0.56979775", "0.5373415", "0.532791", "0.5307729", "0.52962", "0.52779275", "0.5272271", "0.51982015", "0.5154221", "0.514683", "0.5144544", "0.51169497", "0.5078921", "0.5072906", "0.5058429", "0.5058264", "0.505702", "0.5056039", "0.5030337", "0.50278306", "0.5023779", "0.49947494", "0.4987356", "0.49872717", "0.4980991", "0.49746358", "0.49706483" ]
0.7237854
0
Create a file for the filtered response and for the filtered reference
def ref_resp2files(output_file, output_json): with open(output_file, "w") as reference_text: reference_text.write(output_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_file(self):\n if self.args.keyfilter:\n self.filter_keys()\n if self.args.datafilter:\n self.filter_values()\n json.dump(self.outputdata, self.outfile, indent=self.args.indent)\n self.outfile.write('\\n')", "def create_reference(\n self, response_checker=default_checker.default_journey_checker\n ):\n # Check that the file doesn't already exist\n filename = self.get_file_name()\n filepath = os.path.join(config[\"REFERENCE_FILE_PATH\"], filename)\n\n if os.path.isfile(filepath):\n logger.warning(\n \"NO REF FILE CREATED - {} is already present\".format(filepath)\n )\n else:\n # Concatenate reference file info\n reference_text = OrderedDict()\n reference_text[\"query\"] = self.query.replace(\n config[\"URL_JORMUN\"][7:], \"localhost\"\n )\n logger.warning(\"Query: {}\".format(self.query))\n reference_text[\"response\"] = response_checker.filter(\n json.loads(self.full_resp)\n )\n reference_text[\"full_response\"] = json.loads(\n self.full_resp.replace(config[\"URL_JORMUN\"][7:], \"localhost\")\n )\n\n # Write reference file directly in the references folder\n with open(filepath, \"w\") as ref:\n ref.write(json.dumps(reference_text, indent=4))\n logger.info(\"Created reference file : {}\".format(filepath))", "def create_response_info(self, response):\n output_path = os.path.join(self.output_folder, self.file_name)\n output_path += \".response.txt\"\n with open(output_path, 'w') as file:\n file.write(json.dumps(response))", "def create_file_output(self, results):\n for key, value in results.table_output.items():\n name_timestamp = key.split('&')\n _name = name_timestamp[0]\n timestamp = name_timestamp[1]\n file_name = output_file_prefix + \"-\" + _name + \".csv\"\n if file_name not in self.file_creation_set:\n self._header_written = False\n self.file_creation_set.update([file_name])\n for row in value:\n with open(file_name, 'a+') as file_to_write:\n row.update({'Timestamp': timestamp})\n _keys = row.keys()\n file_output = csv.DictWriter(file_to_write, _keys)\n if not self._header_written:\n file_output.writeheader()\n self._header_written = True\n file_output.writerow(row)\n file_to_write.close()\n return results", "def create_filtered_network_file(network_file_prefix, filtered_network_file, ueids):\n network_file_method_attribute = network_file_prefix + \"_method_id.eda\"\n network_file_source_attribute = network_file_prefix + \"_source.eda\"\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = network_file_prefix + \"_y2h.sif\", interaction_type=\"y2h\")\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = network_file_prefix + \"_tap.sif\", interaction_type=\"tap\")\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = network_file_prefix + \"_no_tap.sif\", interaction_type=\"tap\", reverse_selection=True)\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = filtered_network_file + \".no_tap\", interaction_type=\"tap\", reverse_selection=True)\n valid_ids = set([0,4,96,676,729,19,6,7,858,59,109]) # TAP\n biana_output_converter.filter_network_by_interaction_attribute_value(network_attribute_file_name = network_file_method_attribute, network_out_file_name = filtered_network_file + \".no_tap\", accept_attribute_value = lambda x: int(x) not in valid_ids)\n\n #interaction_to_sources = get_interaction_sources(network_file_source_attribute)\n with open(filtered_network_file, 'w') as f:\n for line in open(filtered_network_file + \".no_tap\"):\n id1, dummy, id2 = line.split()\n # Filter self interactions\n if id1 == id2:\n continue\n # Remove singleton interacions (that has evidence only from one database)\n #id_pair = sorted([id1, id2])\n #if is_singleton(interaction_to_sources[(id_pair[0], id_pair[1])]):\n # continue\n # Do not include ambigous user entities\n if id1 in ueids and id2 in ueids:\n f.write(line)\n return", "def save_response(response, file_name, path='~/tmp/fcb-analyzer'):\n \n path = ensure_path(path)\n f = open(path + '/' + file_name, 'w')\n f.write(response.text)", "def create_file(self):\n for data_element in self.data:\n title = data_element['title']\n anchor = data_element['href']\n example = data_element['example']\n content = data_element['content']\n if example:\n abstract = '<section class=\"prog__container\">{}<br>{}</section>'.format(content, example)\n\n list_of_data = [\n title, # api title\n 'A', # type is article\n '', # no redirect data\n '', # ignore\n '', # no categories\n '', # ignore\n '', # no related topics\n '', # ignore\n '', # no external link\n '', # no disambiguation\n '', # images\n abstract, # abstract\n anchor # url to doc\n ]\n self.output_file.write('{}\\n'.format('\\t'.join(list_of_data)))", "def create_exclusions_file(output_file: str, verbosity: int) -> None:\n set_log_level(verbosity)\n\n with open(output_file, \"a\") as file_obj:\n for line in EXCLUSIONS_TEMPLATE:\n file_obj.write(line)\n utils.print_green(f\"Success! Exclusions template file written to: {output_file}\")\n print(\n \"Make sure you download your account authorization details before running the scan.\"\n \"Set your AWS access keys as environment variables then run: \"\n )\n print(\"\\tcloudsplaining download\")\n print(\"You can use this with the scan command as shown below: \")\n print(\n \"\\tcloudsplaining scan --exclusions-file exclusions.yml --input-file default.json\"\n )", "def write_filter_spec(filters, filename):\n data = export_filters(filters)\n with open(filename, 'w') as fp:\n json.dump(data, fp, indent = 4)", "def process_output_file_write(output_file, response):\n\n with open(output_file, \"w\") as output_file:\n output_file.write(response)", "def _toFile(self):\n pass", "def _get_file_objects(self, build_results=True):\n file_obj = None\n writer = None\n if self.filename is not None:\n file_obj = open(self.filename, \"w\")\n writer = csv.writer(file_obj, lineterminator=\"\\n\")\n\n header = [\n \"Interaction index\",\n \"Player index\",\n \"Opponent index\",\n \"Repetition\",\n \"Player name\",\n \"Opponent name\",\n \"Actions\",\n ]\n if build_results:\n header.extend(\n [\n \"Score\",\n \"Score difference\",\n \"Turns\",\n \"Score per turn\",\n \"Score difference per turn\",\n \"Win\",\n \"Initial cooperation\",\n \"Cooperation count\",\n \"CC count\",\n \"CD count\",\n \"DC count\",\n \"DD count\",\n \"CC to C count\",\n \"CC to D count\",\n \"CD to C count\",\n \"CD to D count\",\n \"DC to C count\",\n \"DC to D count\",\n \"DD to C count\",\n \"DD to D count\",\n \"Good partner\",\n ]\n )\n\n writer.writerow(header)\n return file_obj, writer", "def autoSaveFilter(filename):", "def save_file(E_Filtered, output_path):\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n with open(output_path, 'w+') as f:\n for k, v in E_Filtered.items():\n f.write(\"%s\\t%s\\n\" % (list(k), v))", "def CreateOutFile(pdb_file): \n if(pdb_file[-4:] == '.pdb'):\n OutFile = open(pdb_file[:-4].replace('../','')+'_PredictedSites.xyz', 'w') #overwrite file if already present\n elif(pdb_file[-3:] == '.gz'):\n OutFile = open(pdb_file[:-7].replace('../','')+'_PredictedSites.xyz', 'w') #overwrite file if already present\n else:\n OutFile = open(pdb_file.replace('../','')+'_PredictedSites.xyz', 'w') #overwrite file if already present", "def render_to_file(properties,file):\n properties['tempfile']=None\n properties['remove_temp']=True\n properties['outfile']=file", "def write_data_files(self):\n # build our strings\n header_string = \"\"\n data_string = \"\"\n for value in self.data.values():\n header_string += value[2] + \",\"\n if value[0] != None:\n data_string += value[1].format(value[0])\n else:\n data_string += \",\"\n # remove the extra comma and replace with a newline\n header_string = header_string[:-1]\n header_string += \"\\n\"\n data_string = data_string[:-1]\n data_string += \"\\n\"\n \n # show what we built\n #print(header_string)\n #print(data_string)\n \n # open a temp file\n with open(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"w\") as temp_file:\n #temp_file.write(header_string)\n temp_file.write(data_string)\n \n # move to the input file\n filetools.mv(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"{:s}\\\\VWSInput\\\\data.csv\".format(self.path))\n \n return", "def _create_file(content=''):\r\n sjson_file = tempfile.NamedTemporaryFile(prefix=\"subs_\", suffix=\".srt.sjson\")\r\n sjson_file.content_type = 'application/json'\r\n sjson_file.write(textwrap.dedent(content))\r\n sjson_file.seek(0)\r\n return sjson_file", "def tofile(self, f):\n raise NotImplementedError(\"ScalableRedisLocalBloomFilter not support tofile\")", "def test_output_source_file(self):\n response = self.client.open(\n '/v1/control/file/{id}'.format(id='id_example'),\n method='GET',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def writeResponse(response):", "def createMetadata(request, datafile):\n samples = []\n datafile = datafile.split(',')\n for f in datafile:\n filename = f.replace('[', '').replace(']', '').replace('\"', '').replace(' ', '')\n cont = subprocess.Popen(\n [\"curl -u \" + request.session.get('username') + \":\" + request.session.get('password') + \" -k -s \" + filename[1:]],\n stdout=subprocess.PIPE, shell=True).communicate()[0]\n with open(request.session.get('username') + \"/data.txt\", \"w\") as datafile:\n datafile.write(cont)\n with open(datafile.name, \"r\") as tfile:\n for line in tfile:\n if \"!Sample_geo_accession\" in line:\n line = line.split('\\t')\n for x in range(0, len(line)):\n samples.append(line[x].replace('\\n', ''))\n samples = filter(None, samples)\n tfile.seek(0)\n with open(request.session.get('username') + \"/meta.txt\", \"w\") as meta:\n for i in range(0, len(samples)):\n for line in tfile:\n if \"!Sample\" in line:\n line = line.split('\\t')\n line[i] = line[i].replace(\"!Sample_\", \"\").replace(\"\\n\", \"\").replace(\"'\", \"\").replace(\",\", \"\").replace(\"\\\"\", \"\")\n if line[i] == \"geo_accession\":\n line[i] = \"sample_id\"\n elif line[1] == \"\\\"female\\\"\" or line[1] == \"\\\"male\\\"\":\n line[0] = \"sex\"\n if \"title\" not in line[0]:\n meta.write(re.sub(r'[^\\x00-\\x7F]+', ' ', line[i]) + '\\t')\n meta.write('\\n')\n tfile.seek(0)\n meta.close()\n datafile.close()\n call([\"rm\", request.session.get('username') + \"/data.txt\"])\n return meta", "def download_pickle(self, filename, context=None, filter=[]):\n request = context.REQUEST\n RESPONSE = request.RESPONSE\n RESPONSE.setHeader('Content-Type', 'text/plain; charset=utf-8')\n RESPONSE.setHeader('Content-Disposition', 'attachment; filename=%s' % filename)\n return self.to_pickle(filter=filter)", "def __save_response(self, method, extras, data):\n\n import os, re\n to = \"/tmp/lex/\"\n if not os.path.exists(to):\n os.mkdir(to)\n\n removeables = re.compile('[/&?:]')\n filename = method + '-' + '_'.join(\"%s=%s\" % kv for kv in extras.iteritems())\n filename = os.path.join(to, removeables.sub('_', filename))\n with open(filename, 'w') as f:\n f.write(data)", "def pipe_to_file(response, path):\n # TODO: Indicate progress.\n with open(path, 'wb') as file:\n while True:\n chunk = response.read(4096)\n if not chunk:\n break\n file.write(chunk)", "def write_to_vcf(self):\n\n # 1. Generate header info\n date_for_vcf = datetime.now().strftime('%Y%m%d')\n header_info = [\n '##fileformat=VCFv4.2',\n '##fileDate=%s' % date_for_vcf,\n '##source=%s' % self.get_analyser_name(),\n '##reference=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.gz',\n '##contig=<ID=chr1,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr1.fa.gz>',\n '##contig=<ID=chr2,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr2.fa.gz>',\n '##contig=<ID=chr3,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr3.fa.gz>',\n '##contig=<ID=chr4,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr4.fa.gz>',\n '##contig=<ID=chr5,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr5.fa.gz>',\n '##contig=<ID=chr6,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr6.fa.gz>',\n '##contig=<ID=chr7,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr7.fa.gz>',\n '##contig=<ID=chr8,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr8.fa.gz>',\n '##contig=<ID=chr9,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr9.fa.gz>',\n '##contig=<ID=chr10,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr10.fa.gz>',\n '##contig=<ID=chr11,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr11.fa.gz>',\n '##contig=<ID=chr12,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr12.fa.gz>',\n '##contig=<ID=chr13,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr13.fa.gz>',\n '##contig=<ID=chr14,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr14.fa.gz>',\n '##contig=<ID=chr15,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr15.fa.gz>',\n '##contig=<ID=chr16,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr16.fa.gz>',\n '##contig=<ID=chr17,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr17.fa.gz>',\n '##contig=<ID=chr18,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr18.fa.gz>',\n '##contig=<ID=chr19,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr19.fa.gz>',\n '##contig=<ID=chr20,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr20.fa.gz>',\n '##contig=<ID=chr21,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr21.fa.gz>',\n '##contig=<ID=chr22,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr22.fa.gz>',\n '##contig=<ID=chrM,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrM.fa.gz>',\n '##contig=<ID=chrX,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrX.fa.gz>',\n '##contig=<ID=chrY,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrY.fa.gz>',\n ]\n header_parameters = [\n '##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">',\n '##FORMAT=<ID=MTQ,Number=1,Type=String,Description=\"MassArray Typer quality value for SNP call. '\n 'A=Conservative, B=Moderate, C=Aggressive, D=Low Probability, E=User Call, i=Low Intensity. A and B are considered high '\n 'quality scores.\">',\n '##INFO=<ID=PCR,Number=2,Type=String,Description=\"PCR sequences used in assay.\">',\n '##INFO=<ID=AF,Number=A,Type=Float,Description=\"Minor allele frequency from population data.\">',\n '##INFO=<ID=Gene,Number=A,Type=String,Description=\"HGNC Gene Name for gene containing SNP.\">',\n '##INFO=<ID=Build,Number=A,Type=String,Description=\"Genome build used to determine SNP position for assay.\">',\n '##FILTER=<ID=LowCallRate,Description=\"SNP not called in at least 30% of samples in assay.\">',\n ]\n\n # 2. Extract info from XML file\n results = self.get_results()\n snps = self.get_snps()\n pcr_sequences = self.get_pcr_sequences()\n call_rates = self.get_snp_call_rate()\n\n # 3. For each sample, create VCF, add headers, determine genotype of each SNP and write to file.\n for sample, variants in results.items():\n\n with open(os.path.join(self.output, '%s.vcf' % sample), 'w+') as outfile:\n\n header_fields = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', str(sample)]\n\n outfile.write('%s\\n' % '\\n'.join(header_info))\n outfile.write('%s\\n' % '\\n'.join(header_parameters))\n outfile.write('#%s\\n' % '\\t'.join(header_fields))\n\n # for each variant, make a line to add to the file which will\n # then be sorted\n lines_to_write = []\n for snp, info in variants.items():\n\n ref_allele = snps[snp]['ref']\n alt_alleles = snps[snp]['alt']\n alt_list = alt_alleles.split(',')\n\n # Genotype formatting matches VCF v4.0 spec where ./. is no call.\n gt_list = []\n called_genotype = info['genotype']\n if not called_genotype:\n gt_list = ['.', '.']\n elif len(called_genotype) == 1:\n called_genotype += called_genotype\n for allele in list(called_genotype):\n if allele == ref_allele:\n gt_list.append(0)\n else:\n if allele in alt_list:\n idx = alt_list.index(allele)\n gt_list.append(idx + 1)\n else:\n raise ValueError(\n 'Called genotype %s not provided as possible alt in bed file. Sample %s and SNP '\n '%s %s.' % (called_genotype, sample, snp, alt_alleles)\n )\n gt = '/'.join([str(x) for x in gt_list])\n\n # Threshold currently set to 0.3 (70% results have a call).\n snp_call_rate = call_rates[snp]\n if snp_call_rate >= 0.3:\n vcf_filter = 'LowCallRate'\n else:\n vcf_filter = 'PASS'\n\n snp_pcr_seqs = pcr_sequences[snp]\n\n lines_to_write.append(\n '{chr}\\t{pos}\\t{id}\\t{ref}\\t{alt}\\t.\\t{filter}\\tAF={af};PCR={pcr};Gene={gene};Build={build}\\t'\n 'GT:MTQ\\t{gt}:{qual}\\n'.format(\n chr=snps[snp]['chrom'],\n pos=snps[snp]['pos'],\n id=snp,\n ref=ref_allele,\n alt=alt_alleles,\n filter=vcf_filter,\n af=snps[snp]['maf'],\n pcr=','.join(snp_pcr_seqs),\n gene=snps[snp]['gene'],\n build=snps[snp]['genome_build'],\n gt=gt,\n qual=','.join(info['quality'])\n )\n )\n\n sorted_lines_to_write = sorted(\n lines_to_write,\n key=lambda x: (\n # first key for sorting is the int value of chr\n int(x.split('\\t')[0][3:]),\n # second key for sorting is the position of the variant\n int(x.split('\\t')[1])\n )\n )\n\n for line in sorted_lines_to_write:\n outfile.write(line)", "def save_response_to_file(self, response, format=None, annotation=''):\n \n if format is None:\n logging.error(\"Specify a format\")\n return None\n\n # Build filename, choosing extension carefully\n url = response.url\n _name, _ext = os.path.splitext(url.split('/')[-1])\n name = remove_reserved_chars(_name)\n if format in ['html', 'pdf']:\n # HTML files might originally have no extension;\n # PDF files may have a non-PDF extension but PDFMiner requires them to have a .pdf extension\n ext = f'.{format}'\n if _ext != '':\n logging.warning(f\"Overwriting file extension from url ({_ext}) with expected extension ({ext}) for {url}\")\n else:\n if _ext == '':\n # Look up extension from dictionary. Note that Google Sheets are assumed to be exported as CSV files.\n ext = todf.get_ext(format)\n logging.warning(\"No extension in original url for {format} data: using expected extension {ext}\")\n else:\n ext = _ext.split('?')[0] # Remove query portion of URL, if any \n file_name = f\"{self.state_abbrev}{annotation}{name}{ext}\"\n\n # Save HTML and CSV as text, other formats as binary\n file_path = os.path.join(TMPDIR, file_name)\n if ext == '.html' or ext == '.csv':\n try:\n with open(file_path, 'w') as f:\n f.write(response.text)\n except UnicodeEncodeError:\n with open(file_path, \"w\", encoding=\"utf-8\") as f:\n f.write(response.text)\n except AttributeError as e:\n logging.error(f\"{e}. Check if the format of the content at this URL is html as expected; if not, update the code to specify the correct format (e.g., pdf).\")\n else:\n with open(file_path, 'wb') as f:\n f.write(response.body) \n\n return file_path", "def write_response_to_lib_folder(self, label: Optional[str], response: Response) -> None:\n cleaned_label = label.replace(\"/\", \"|\") if label else \"response\"\n file_name = cleaned_label + \" \" + str(datetime.now())\n file_ending = \".json\"\n if not os.path.exists(RECORD_PATH):\n os.mkdir(RECORD_PATH)\n proposed_file_name = os.path.join(RECORD_PATH, file_name + file_ending)\n # Cover files with the same name case\n while os.path.exists(proposed_file_name):\n length_of_file_type = len(file_ending)\n proposed_file_name = proposed_file_name[:-length_of_file_type] + \" (1)\" + file_ending\n with open(proposed_file_name, 'w') as f:\n f.write(response.text)\n if 'X-Trace-Id' in response.headers:\n log.info(cleaned_label + ' | X-Trace-Id: ' + response.headers['X-Trace-Id'])", "def write_data(tech_id, tech_name, sentence, source, date_crawled):\n with open('PDF_data.txt', 'a') as f:\n # text = match[\"tid\"] + '\\n' + match[\"name\"] + '\\n' + sent + '\\n' + source + '\\n' + date_crawled + '\\n\\n'\n text = tech_id + '\\n' + tech_name + '\\n' + sentence + '\\n' + source + '\\n' + date_crawled + '\\n\\n'\n f.write(text)", "def dwn_saved_result_csv(request):\n source_id = request.GET.get('source_id')\n data = []\n objs = ExtractedRelation.objects.filter(source=source_id)\n s = Source.objects.filter(source_id=source_id)[0]\n for i in objs:\n data.append((i.sentence, i.head, i.tail, i.pred_relation, i.sentiment, i.conf, s.source, i.rel_id, os.path.basename(i.ckpt)))\n \n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence', 'Source', 'rel_id', 'Checkpoint'])\n df.to_csv(\"temp/analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/analysis_results.csv','rb'))" ]
[ "0.602621", "0.59944147", "0.5985722", "0.5863088", "0.5780173", "0.5737445", "0.5735932", "0.5701409", "0.56273806", "0.5584363", "0.5571645", "0.55378807", "0.551075", "0.54842454", "0.5458675", "0.5398169", "0.5377229", "0.5367841", "0.5365113", "0.5354977", "0.5352039", "0.5347492", "0.5342986", "0.53339154", "0.53269506", "0.53170735", "0.531115", "0.53064704", "0.53000015", "0.5293311" ]
0.631874
0
Print differences between reference and response in console
def print_diff(ref_file, resp_file): # open reference with open(ref_file) as reference_text: reference = reference_text.readlines() # open response with open(resp_file) as response_text: response = response_text.readlines() # Print failed test name print_color("\n\n" + str(file_name) + " failed :" + "\n\n", Colors.PINK) symbol2color = {"+": Colors.GREEN, "-": Colors.RED} for line in difflib.unified_diff(reference, response): print_color(line, symbol2color.get(line[0], Colors.DEFAULT))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare_with_ref(\n self, response, response_checker=default_checker.default_journey_checker\n ):\n\n def ref_resp2files(output_file, output_json):\n \"\"\"\n Create a file for the filtered response and for the filtered reference\n \"\"\"\n with open(output_file, \"w\") as reference_text:\n reference_text.write(output_json)\n\n def print_diff(ref_file, resp_file):\n \"\"\"\n Print differences between reference and response in console\n \"\"\"\n # open reference\n with open(ref_file) as reference_text:\n reference = reference_text.readlines()\n # open response\n with open(resp_file) as response_text:\n response = response_text.readlines()\n\n # Print failed test name\n print_color(\"\\n\\n\" + str(file_name) + \" failed :\" + \"\\n\\n\", Colors.PINK)\n\n symbol2color = {\"+\": Colors.GREEN, \"-\": Colors.RED}\n for line in difflib.unified_diff(reference, response):\n print_color(line, symbol2color.get(line[0], Colors.DEFAULT))\n\n # Filtering the answer. (We compare to a reference also filtered with the same filter)\n filtered_response = response_checker.filter(response)\n\n # Get the reference\n\n # Create the file name\n filename = self.get_file_name()\n filepath = os.path.join(config[\"REFERENCE_FILE_PATH\"], filename)\n\n assert os.path.isfile(filepath), \"{} is not a file\".format(filepath)\n\n with open(filepath, \"r\") as f:\n raw_reference = f.read()\n\n # Transform the string into a dictionary\n dict_ref = json.loads(raw_reference)\n\n # Get only the full_response part from the ref\n ref_full_response = dict_ref[\"full_response\"]\n\n # Filtering the reference\n filtered_reference = response_checker.filter(ref_full_response)\n\n # Compare response and reference\n try:\n response_checker.compare(filtered_response, filtered_reference)\n except AssertionError as e:\n # print the assertion error message\n logging.error(\"Assertion Error: %s\" % str(e))\n # find name of test\n file_name = filename.split(\"/\")[-1]\n file_name = file_name[:-5]\n\n # create a folder\n dir_path = config[\"RESPONSE_FILE_PATH\"]\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n # create path to ref and resp\n full_file_name_ref = dir_path + \"/reference_\" + file_name + \".txt\"\n full_file_name_resp = dir_path + \"/response_\" + file_name + \".txt\"\n\n json_filtered_reference = json.dumps(filtered_reference, indent=4)\n json_filtered_response = json.dumps(filtered_response, indent=4)\n\n # Save resp and ref as txt files in folder named outputs\n ref_resp2files(full_file_name_ref, json_filtered_reference)\n ref_resp2files(full_file_name_resp, json_filtered_response)\n\n # Print difference in console\n print_diff(full_file_name_ref, full_file_name_resp)\n\n raise", "def print_response(response):\n print(f\"Response for {url}\")\n if response.status_code == 200:\n # Green text\n print(f\"\\033[1;32;40m {response.status_code} {response.reason}\\033[1;37;40m\")\n else:\n # Red text\n print(f\"\\033[1;31;40m {response.status_code} {response.reason}\\033[1;37;40m\")\n # print(response.json())\n print(f\" {response.elapsed.total_seconds()} seconds elapsed.\")", "def print_diff(ip, common, diff1, diff2):\n logging.info('IP: %s', ip)\n if common:\n common = [' {0}'.format(elem) for elem in common]\n logging.info('\\n'.join(common))\n if diff1:\n diff = ['+ {0}'.format(elem) for elem in diff1]\n logging.info('\\n'.join(diff))\n if diff2:\n diff = ['- {0}'.format(elem) for elem in diff2]\n logging.info('\\n'.join(diff))", "def print_response(response):\n print(response)\n print(\"-\"*30)", "def showref_output(self, *arguments, **kwargs):\n return self.get_output('show-ref', *arguments, **kwargs)", "def print_request_response(request_response: json):\n print(\"Printing response:\")\n print(json.dumps(request_response, indent=4))", "def test_get_request_output(self):\n pass", "def view_full_response(line):\n reqs = yield load_reqlist(line)\n for req in reqs:\n if req.response:\n if len(reqs) > 1:\n print '-'*15 + (' %s ' % req.reqid) + '-'*15\n view_full_message(req.response)\n else:\n print \"Request %s does not have a response\" % req.reqid", "def run_diagnostics(self):\n request = {\n 'jsonrpc': '2.0',\n 'id': 0,\n 'method': 'ping'\n }\n result = CurlTestBase.send_request('&diag=1', request)\n response = '<html><body><pre>'\n response += cgi.escape(result.content)\n response += '</pre></body></html>'\n self.response.out.write(response)", "def debug_html(label, response):\n\n print(\"\\n\\n\\n\", \"*********\", label, \"\\n\")\n print(response.data.decode('utf8'))\n print(\"\\n\\n\")", "def dump_request_and_response(response: Response) -> str:\n return _dump_request(response.request) + _dump_response(response)", "def get_raw_diff(self, review):\r\n return self.http_request('/r/%s/diff/raw/' % review, {})", "def view_response_bytes(line):\n reqs = yield load_reqlist(line)\n for req in reqs:\n if req.response:\n if len(reqs) > 1:\n print '-'*15 + (' %s ' % req.reqid) + '-'*15\n print req.response.full_message\n else:\n print \"Request %s does not have a response\" % req.reqid", "def printable_reponse(self):\n resp = self.response\n msg = \"-- Reponse : {} -- \\r\\n\".format(resp.status_code)\n msg += \"Headers: {} \\r\\n\".format(str(resp.headers))\n msg += \"Body: {} \\r\\n\\r\\n\".format(str(resp.content))\n return msg", "def print_response(response):\n\n lines = response.split(\"\\n\")\n for line in lines:\n print line.strip()", "def __repr__(self):\n return pprint.saferepr(self.redirects)", "def annotate_diff(desc, stdout_e, stdout_a, stdout_e_strp, stdout_a_strp):\n id_str= \"%s_\" % desc\n result[id_str + \"stdout_expected\"] = stdout_e\n result[id_str + \"stdout_actual\"] = stdout_a\n result[id_str + \"stdout_expected_stripped\"]= stdout_e_strp\n result[id_str + \"stdout_actual_stripped\"] = stdout_a_strp\n result[id_str + \"stripped_diff\"] = '\\n'.join( difflib.ndiff( stdout_e_strp.splitlines(),\n stdout_a_strp.splitlines() ))\n result.fail(\"Expected standard output from %s does not match actual output.\" % desc)", "def print_diff(diff, out):\n for interface_name, interface in diff.iteritems():\n change_color_by_tag(interface)\n out.change_color('YELLOW')\n print '[[{Interface}]]'.format(Interface=interface_name)\n for member_name, member in interface.iteritems():\n if member_name == 'ExtAttributes':\n out.reset_color()\n print 'ExtAttributes'\n print_extattribute(member)\n elif member_name == 'Consts':\n out.reset_color()\n print ' Consts'\n print_const(member)\n elif member_name == 'Attributes':\n out.reset_color()\n print ' Attributes'\n print_attribute(member)\n elif member_name == 'Operations':\n out.reset_color()\n print ' Operations'\n print_operation(member)", "def PrintDiffs(message, lhs, rhs):\n dif = set(lhs).difference(rhs)\n if dif:\n print message, ', '.join(dif)", "def print_query_response(response):\n if response.text is not None:\n print(json.loads(response.text))\n else:\n logger.warning('Response not valid.')", "def get_git_diff_stdout() -> str:\n proc = subprocess.run(\n [\"git\", \"diff\", \"origin/main\", \"HEAD\"],\n capture_output=True,\n check=True,\n text=True,\n )\n return proc.stdout", "def print_results(request, response, procedure_name) -> None:\n procedure_names_dict = {\n 'SquareRoot': calculator_pb2_grpc.CalculatorServicer.SquareRoot.__name__,\n 'Square': calculator_pb2_grpc.CalculatorServicer.Square.__name__,\n }\n print_string = f\"Request: {procedure_names_dict[procedure_name]} for {request.value}.\\nResponse: {response.value}.\\n\"\n print(print_string)", "def test_difference(self, client):\n\n expected = {\n 'a': [0,2,4,6,8],\n 'b': [4,6,8,10,12,14,16],\n 'result': [0, 2]\n }\n\n res = client.post('/api/v1/difference', json={'a': expected['a'], 'b': expected['b'] })\n assert res.status_code == 200\n assert res.json['data'] == expected['result']\n assert res.json['status'] == 2000", "def _print_status(self):", "def show_refs(config, args):\n for item in lib.input_json_lines():\n yield config.repo.ref(item)", "def view_response_headers(line):\n reqs = yield load_reqlist(line)\n for req in reqs:\n if req.response:\n if len(reqs) > 1:\n print '-'*15 + (' %s ' % req.reqid) + '-'*15\n view_full_message(req.response, True)\n else:\n print \"Request %s does not have a response\" % req.reqid", "def disp_resp(self, resp, content=False):\n for field in dir(resp):\n if not content and field=='content': continue\n if field.startswith('_'): continue\n try:\n print '%s = %s\\n\\n' % (field,resp.__getattribute__(field))\n except (SyntaxError,KeyError):\n pass", "def compare():\n body: t.Any = request.json\n check_error({'input': {'old': {}, 'new': {}}}, body)\n response_new = rpc_search({'input': body['input']['new']})\n response_old = rpc_search({'input': body['input']['old']})\n\n modules_new = response_new['yang-catalog:modules']['module']\n modules_old = response_old['yang-catalog:modules']['module']\n\n if len(modules_new) == 0 or len(modules_old) == 0:\n abort(404, description='No hits found either in old or new input')\n\n new_mods = []\n for mod_new in modules_new:\n new_rev = mod_new['revision']\n new_name = mod_new['name']\n found = False\n new_rev_found = False\n for mod_old in modules_old:\n old_rev = mod_old['revision']\n old_name = mod_old['name']\n if new_name == old_name and new_rev == old_rev:\n found = True\n break\n if new_name == old_name and new_rev != old_rev:\n new_rev_found = True\n if not found:\n mod_new['reason-to-show'] = 'New module'\n new_mods.append(mod_new)\n if new_rev_found:\n mod_new['reason-to-show'] = 'Different revision'\n new_mods.append(mod_new)\n if len(new_mods) == 0:\n abort(404, description='No new modules or modules with different revisions found')\n output = {'output': new_mods}\n return output", "def pytest_assertrepr_compare(op: str, left: Any, right: Any) -> List[str]: # noqa: U100\n output = [\"Compare Result:\"]\n\n for line in list(dictdiffer.diff(left, right)):\n output.extend(pp.pformat(line).split(\"\\n\"))\n\n return output", "def print_response(response):\n #fyi this is not my code, i grabbed it from github\n #forgot to copy the url though\n for report in response.get('reports', []):\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])\n\n for row in report.get('data', {}).get('rows', []):\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n\n for header, dimension in zip(dimensionHeaders, dimensions):\n print header + ': ' + dimension\n\n for i, values in enumerate(dateRangeValues):\n print 'Date range: ' + str(i)\n for metricHeader, value in zip(metricHeaders, values.get('values')):\n print metricHeader.get('name') + ': ' + value" ]
[ "0.6958349", "0.6202376", "0.6095348", "0.5904432", "0.5885077", "0.5830992", "0.5810929", "0.57152325", "0.5700688", "0.5698527", "0.56759423", "0.56628805", "0.56229484", "0.55711806", "0.55609244", "0.55433357", "0.5525367", "0.549411", "0.5481868", "0.5465276", "0.5456778", "0.5436791", "0.5427032", "0.5395528", "0.537482", "0.53715324", "0.5352058", "0.52979964", "0.529103", "0.5286781" ]
0.73701763
0
Proces elements from the input queue until empty.
def run(self) -> None: while True: try: input_element = self.input_queue.get_nowait() self.process(input_element) except Empty: return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_queue_fast(self):\n while self.queue:\n self.queue.popleft()()", "def _wait_empty(self):\n while True:\n if self.queue.empty():\n # We still have to wait for the last queue item being processed\n # (queue.empty() returns True before queue.task_done() is\n # called).\n self.queue.join()\n return\n time.sleep(1)", "def get_all_from_queue(Q):\n try:\n while True:\n yield Q.get_nowait()\n except queue.Empty:\n raise StopIteration", "def get_all_from_queue(Q):\n try:\n while True:\n yield Q.get_nowait()\n except Queue.Empty:\n raise StopIteration", "def drainQueue(q):\n buf = []\n while True:\n # Get as much as possible without blocking\n try:\n while True:\n item = q.get_nowait()\n if item is None:\n return buf\n else:\n buf.append(item)\n except Queue.Empty:\n pass\n\n if buf:\n return buf\n\n # Nothing in the queue. Block for\n # one item, then go back and get any\n # that we can without blocking.\n item = q.get()\n if item is None:\n return buf\n else:\n buf.append(item)", "def AdvanceQueue(self):\r\n self.data.pop(0)\r\n return", "def get_all_nowait(queue: Queue) -> list:\n\n results = []\n\n while True:\n try:\n result = queue.get_nowait()\n results.append(result)\n except Empty:\n break\n\n return results", "def drain(queue):\n while not queue.is_empty():\n queue.remove()", "def _qprocess(self):\n while 1:\n t, args, kw = self.inq.get()\n ret = self.__call__(*args, **kw)\n self.outq.put((t, ret))", "def worker(self):\n while True:\n item,index = self.inbound.get()\n if index is None:\n self.buffer.append(item)\n self.index.value = self.index.value + 1 #index of next item for buffer\n if len(self.buffer)>self.size:\n del self.buffer[0]\n self.newitem.put(None)\n else:\n self.buffer[len(self.buffer)+(index - self.index.value)] = item", "def process_queue(self):\n while self.input_processing_running:\n\n # Process everything in the queue.\n while self.input_queue.qsize() > 0:\n try:\n _telem = self.input_queue.get_nowait()\n self.process_telemetry(_telem)\n\n except Exception as e:\n self.log_error(\"Error processing telemetry dict - %s\" % str(e))\n\n # Sleep while waiting for some new data.\n time.sleep(0.5)", "def use_queue():\n q = queue.Queue()\n for i in range(10):\n q.put_nowait(i)\n while q.qsize() > 0:\n element = q.get_nowait()\n sys.stdout.write(\"poping out from queue: {0}\\n\".format(element))", "def process_queue(self, queue):\n\n while queue:\n deferred, data = queue.popleft()\n deferred.callback(data)", "def get(self):\n while self.is_running():\n try:\n inputs = self.queue.get(block=True, timeout=5).get()\n if self.is_running():\n self.queue.task_done()\n if inputs is not None:\n yield inputs\n except queue.Empty:\n pass\n except Exception as e: # pylint: disable=broad-except\n self.stop()\n raise e", "def process_queue_slowly(self):\n start = time.process_time()\n while self.queue and time.process_time() - start < 1.0 / TICKS_PER_SECOND:\n self.queue.popleft()()", "def getAllFromQueue(self, Q):\n try:\n while True:\n yield Q.get_nowait()\n except Queue.Empty:\n raise StopIteration", "def processIncoming(self):\n while self.queue.qsize():\n try:\n # print 'queue'\n msg = self.queue.get(0)\n # Check contents of message and do what it says\n # As a test, we simply print it\n if msg == \"exit\":\n self.deviceError()\n if msg == \"error\":\n self.deviceError()\n else:\n self.decode(msg)\n except Queue.Empty:\n pass", "def my_consumer(q):\n while True:\n data = q.get()\n print('data found to be processed: {}'.format(data))\n processed = data * 2\n print(processed)\n\n if data is sentinel:\n break", "def process_queue(self):\n while not self.msg_queue.empty():\n addr, msg = self.msg_queue.get()\n if msg:\n print(msg)\n self.broadcast(addr, msg)\n else:\n self.clean(addr)", "def queue_loader(self, queue):\n for item in self.iterator():\n try:\n converted_item = self.converter(item)\n valid_item = self.validator(converted_item)\n except Exception as e:\n print(type(e), e)\n continue\n queue.put(valid_item)\n while queue.qsize() > 100:\n sleep(0.2)", "def poll(self):\n try:\n while True:\n self.handle(self.queue.get(block=False))\n except queue.Empty:\n pass\n\n [self.handle(x) for x in pygame.event.get()]", "def checkQueue( self ):\n if self.queue:\n yield self.writeToSerial( self.queue.pop( 0 ) )\n else:\n self.free = True", "def consume(iterator):\n deque(iterator, maxlen=0)", "def clear_queue(self):\n while not self.queue.empty():\n self.queue.get()", "def run(self) -> None:\n while self.data_incoming or len(self._queue):\n if not self._queue:\n logging.info(\"Consumer %d is sleeping since queue is empty\", self._name)\n time.sleep(0.75)\n print(self._queue.get())\n time.sleep(0.5)", "def feed (self, inputs):\n numinputs = len(inputs)\n outputs = [None for _ in xrange(numinputs)]\n for index, item in enumerate(inputs):\n self.queues[0].put((index, item))\n\n # get results\n finished = set()\n while len(finished) < len(outputs):\n index, result = self.queues[-1].get() # blocked\n outputs[index] = result\n finished.add(index)\n return outputs", "def _process_incoming_queue_messages(self):\n while self._queue.qsize():\n msg = self._queue.get()\n if msg == MAP_UPDATE:\n self._clear_measurement_progress_label()\n self._presenter.update_map(self.chosen_value.get())", "def dequeue(self):", "def _get_nowait(self):\n # Fulfills a waiting producer, returning its value, or raising Empty if\n # no fulfillable producers are waiting.\n def fulfill_waiting_producer():\n while True:\n if self._waiting_producers:\n produce_wish = self._waiting_producers.pop(0)\n with produce_wish.group.lock:\n if not produce_wish.group.fulfilled:\n return produce_wish.fulfill()\n else:\n raise Empty()\n\n if self._buf is not None and not self._buf.empty:\n value = self._buf.pop()\n try:\n # Cycles a producer's value onto the buffer\n produced = fulfill_waiting_producer()\n self._buf.push(produced)\n except Empty:\n pass\n return value\n else:\n return fulfill_waiting_producer()", "def schdule(self):\n while self.queue:\n if self.processing >= self.maxProcessing:\n # We have reached the maximum number of parallel\n # tasks.\n break\n\n item, completeDeferred = self.queue.pop(0)\n\n self.processing += 1 \n self.start(item).addBoth(self.done).chainDeferred(completeDeferred)" ]
[ "0.7123321", "0.6928471", "0.6863272", "0.68230313", "0.681069", "0.67634106", "0.6683323", "0.6645927", "0.66053593", "0.66042024", "0.65760016", "0.65450245", "0.65428", "0.65332437", "0.64878625", "0.6477527", "0.6431174", "0.63786554", "0.6355708", "0.6354153", "0.63382304", "0.6287964", "0.6271525", "0.6248273", "0.6200562", "0.6196528", "0.6194125", "0.61878234", "0.61864585", "0.61863744" ]
0.71853554
0
Process every input using the given worker class.
def multiprocess(inputs: list, worker_class: Any, num_threads: int = 40): input_queue = Queue() # type: ignore output_queue = Queue() # type: ignore for input_elm in inputs: input_queue.put(input_elm) threads = [worker_class(input_queue, output_queue) for _ in range(num_threads)] for thread in threads: thread.start() for thread in threads: thread.join() return get_all_nowait(output_queue)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processInputs(self):", "def process_inputs(self, inputs):", "def run(self):\n self.class_inst_obj.processor(self.msg)", "def process(self):\n raise NotImplementedError", "def run(self) -> None:\n\n while True:\n try:\n input_element = self.input_queue.get_nowait()\n self.process(input_element)\n except Empty:\n return", "def process(self):\n pass", "def process(self, input, is_processed=False):\n raise NotImplementedError", "def process(self):\n if len(self.inputs):\n self._process_input()\n while len(self.servers) > 0:\n self._process_input()\n self._write_file()", "def process(self):\n raise NotImplementedError('Method must be implemented by subclass.')", "def process(self):", "def process(self):", "def process(self):", "def process():", "def process(self, results):\n raise NotImplementedError", "def _worker(self, args):\n pass", "def worker(self, request):\n try:\n for processor in self.processors:\n if processor.accepted(request):\n processor.process(request)\n except Exception as e:\n #TODO print e\n print e\n pass\n finally:\n #waiter be awakened\n request.notify()", "def process(self):\n\n # validate processing\n if self.is_acceptable():\n # handle data and write log\n self.handle()", "def _spawn_workers(self):\n self._event.set()\n self._workers = [ClassifierWorker(self._event, self._queue, self._results) for x in range(self._NUM_WORKERS)]\n [worker.start() for worker in self._workers]", "def _map_to_workers(self, iterable, result_getter):\n if not self.is_started:\n raise RuntimeError(\"Cannot process inputs: must call start() first.\")\n\n tasks = TaskIterator(iterable)\n task = next(tasks)\n\n while True:\n try:\n self._send_task(task)\n task = next(tasks)\n except Queue.Full:\n for result in result_getter(): # I wish I had `yield from` :(\n yield result\n except StopIteration:\n break\n\n while not self.is_completed:\n for result in result_getter():\n yield result", "def run(self):\n while self.inputs:\n readable, writeable, exceptions = select(self.inputs,\n self.outputs,\n self.inputs)\n for s in readable:\n if s is self.server and self.accepting:\n self.accept(s)\n else:\n data = s.recv(1024)\n if data:\n self.parse(data.rstrip(), s)\n else:\n self.remove(s)\n\n # Writeable\n for s in writeable:\n self.send(s)\n\n # Exceptions\n for s in exceptions:\n self.remove(s)", "def process_class_list(self, module, classes):", "def __hgs_worker(classifier_type, features, labels, results_file_name,\n train_ratio, param_names, params):\n \n # Train the classifier.\n classifier = classifier_type(features, labels, training_ratio=0.7,\n **params)\n \n # Print the metrics for the classifier to the file.\n accuracy = classifier.accuracy()\n logloss = classifier.logloss()\n \n row = [params[name] for name in param_names]\n row += [accuracy, logloss]\n \n # Print results to file.\n with open(results_file_name, 'a') as file:\n csv.writer(file, lineterminator='\\n').writerow(row)\n \n # Indicate that a classifier has finished.\n print('Finished a classifier...')", "def process_queue(self):\n while self.input_processing_running:\n\n # Process everything in the queue.\n while self.input_queue.qsize() > 0:\n try:\n _telem = self.input_queue.get_nowait()\n self.process_telemetry(_telem)\n\n except Exception as e:\n self.log_error(\"Error processing telemetry dict - %s\" % str(e))\n\n # Sleep while waiting for some new data.\n time.sleep(0.5)", "def process(self, data_batch: Any, predictions: Sequence[dict]) -> None:\n self.results.extend(_to_cpu(predictions))", "def process(self, answers_probs):\n\n raise NotImplementedError(\"Subclass Responsibility\")", "def process(self, inputs):\n output = None\n return output", "def _process(self):\n self.kwargs[\"collect\"].process_scan_form_data(self.kwargs[\"data\"])", "def processing(self):\n pass", "def process_input(self):\n print(\"========================Start of Process_Input() Method*\")\n request_data = [\"name\", 0, 0, 0] # initialing th object variables\n req_data_counter = 0 # refers to an index in a list\n\n with open(self.__file_name) as input_file:\n whole_file = input_file.read().splitlines()\n for i in range(len(whole_file)):\n whole_file[i] = whole_file[i].split(',') # use comma as a delimiter\n for j in range(len(whole_file[i])):\n whole_file[i][j] = whole_file[i][j].strip()\n if req_data_counter < 4: # we will break the data into units\n request_data[req_data_counter] = whole_file[i][j]\n req_data_counter = req_data_counter + 1\n if req_data_counter > 3:\n # create object, having read all values for a single req\n new_request_object = Request.Request(request_data[0], request_data[1], request_data[2],\n request_data[3])\n self.input_list.append(new_request_object)\n assert isinstance(new_request_object, object) # asserting if item added is object request\n req_data_counter = 0 # resetting index counter to start reading new request data\n print(\"========================file reading finished*\")\n self.display_contents(self.input_list)\n print(\"========================End of Process_Input() Method *\")", "async def async_process_input(self, inp: inputs.Input) -> None:\n raise NotImplementedError()" ]
[ "0.62040085", "0.6178794", "0.5952422", "0.59248585", "0.5817719", "0.56970406", "0.5652122", "0.56006", "0.5596137", "0.555472", "0.555472", "0.555472", "0.55169404", "0.54495186", "0.5403452", "0.5399591", "0.5373088", "0.5360921", "0.5338759", "0.53068113", "0.53002506", "0.5289116", "0.52639675", "0.5218911", "0.51783663", "0.517185", "0.5152557", "0.5150168", "0.51367575", "0.5124231" ]
0.64647937
0
Function allows for current user information modification. There is feature for change of default picture that is assigned during registration of new user. Part of change user picture is connected with save_image() function located in `utils.py` where name of original picture file is processing and then saved new_project_form in render_template() return is intentionally located here to allow for rendering tasks.new_project_2 function
def account(): form = UpdateAccountForm() new_project_form = ProjectForm() if form.validate_on_submit(): if form.picture.data: # if statement responsible for change of default picture picture_file = save_image(form.picture.data) current_user.img_file = picture_file current_user.user_name = form.user_name.data current_user.email = form.email.data db.session.commit() flash("Changes saved", "success") return redirect(url_for('users.account')) elif request.method == "GET": form.user_name.data = current_user.user_name form.email.data = current_user.email img_file = url_for('static', filename='images/' + current_user.img_file) return render_template('account.html', title="Account", form=form, img_file=img_file, new_project_form=new_project_form)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def account():\n\n form = UpdateUserForm()\n\n if form.validate_on_submit():\n print(form)\n if form.picture.data:\n username = current_user.username\n pic = add_profile_pic(form.picture.data,username)\n current_user.profile_image = pic\n\n current_user.username = form.username.data\n current_user.email = form.email.data\n db.session.commit()\n flash('User Account Updated')\n return redirect(url_for('users.account'))\n\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.email.data = current_user.email\n\n profile_image = url_for('static', filename='profile_pics/' + current_user.profile_image)\n return render_template('account.html', profile_image=profile_image, form=form)", "def change_profile_img(self):\n get_photo = reddit_scrapper()\n get_photo.get_image()\n # Send image to instagram profile picture on the hidden input tag\n profile_pic_button = self.driver.find_elements_by_xpath(\n '//*[@id=\"react-root\"]/section/main/section/div[3]/div[1]/div[2]/form/input')[0].send_keys(os.getcwd() + '/daily_image/daily.jpg')\n\n time.sleep(1)\n save_profile_pic = self.driver.find_elements_by_xpath(\n '//button[contains(text(), \"Save\")]')[0].click()\n time.sleep(1)\n self.driver.get(base_url)", "def edit(request):\n if not request.user.is_authenticated():\n return redirect('/tasks/login/')\n args = {}\n args.update(csrf(request))\n images = ImageUpload.objects.all()\n args['user'] = auth.get_user(request)\n args['images'] = images\n data = Information.objects.get()\n form_information = InformationForm(instance=data)\n if request.method == 'POST' and request.FILES.get('photo') is None:\n form_post = InformationForm(request.POST, instance=data)\n if form_post.is_valid():\n form_post.save()\n return redirect('/')\n else:\n args['form'] = form_post\n elif request.method == 'POST' and request.FILES.get('photo') is not None:\n form_upload = ImageUploadForm(request.POST, request.FILES)\n if form_upload.is_valid():\n new_img = ImageUpload(photo=request.FILES['photo'])\n new_img.save()\n args['form'] = form_information\n args['success'] = 1\n else:\n args['form'] = form_information\n args['form_upload'] = form_upload\n else:\n args['form'] = form_information\n return render_to_response('tasks/edit.html', args, context_instance=RequestContext(request))", "def profile():\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n form = UserEditForm(obj=g.user)\n\n if form.validate_on_submit():\n if not User.authenticate(g.user.username, form.data[\"password\"]):\n flash(\"Invalid password.\", \"danger\")\n return render_template('/users/edit.html', form=form) \n # data = {k:v for k,v in form.data.items() if k != \"csrf_token\"}\n # data[\"image_url\"] = data[\"image_url\"] or None\n # data[\"header_image_url\"] = data[\"header_image_url\"] or None\n\n g.user.username = form.data[\"username\"]\n g.user.email = form.data[\"email\"]\n g.user.image_url = form.data[\"image_url\"] or None\n g.user.header_image_url = form.data[\"header_image_url\"] or None\n g.user.bio = form.data[\"bio\"]\n\n db.session.commit()\n\n flash(\"Profile edited!\", \"success\")\n return redirect(f'/users/{g.user.id}')\n\n return render_template('/users/edit.html', form=form)", "def profile(request, info=\"\", error_msg=\"\", messages=\"\"):\r\n try:\r\n user = _validate_and_get_geniuser(request)\r\n except LoggedInButFailedGetGeniUserError:\r\n return _show_failed_get_geniuser_page(request)\r\n\r\n email_form = forms.gen_edit_user_form(instance=user)\r\n affiliation_form = forms.gen_edit_user_form(instance=user)\r\n password_form = forms.EditUserPasswordForm()\r\n\r\n if request.method == 'POST':\r\n if 'affiliation' in request.POST:\r\n affiliation_form = forms.gen_edit_user_form(('affiliation',), request.POST, instance=user)\r\n if affiliation_form.is_valid():\r\n new_affiliation = affiliation_form.cleaned_data['affiliation']\r\n interface.change_user_affiliation(user, new_affiliation)\r\n info =\"Affiliation has been successfully changed to %s.\" % (user.affiliation)\r\n elif 'email' in request.POST:\r\n email_form = forms.gen_edit_user_form(('email',), request.POST, instance=user)\r\n if email_form.is_valid():\r\n new_email = email_form.cleaned_data['email']\r\n interface.change_user_email(user, new_email)\r\n info =\"Email has been successfully changed to %s.\" % (user.email)\r\n elif 'password1' in request.POST:\r\n password_form = forms.EditUserPasswordForm( request.POST, instance=user)\r\n if password_form.is_valid():\r\n new_password = password_form.cleaned_data['password1']\r\n interface.change_user_password(user, new_password)\r\n info =\"Password has been successfully changed\"\r\n\r\n username = user.username\r\n affiliation = user.affiliation\r\n email = user.email\r\n port = user.usable_vessel_port\r\n has_privkey = user.user_privkey != None\r\n #currently not used, needed if editing user port is allowed\r\n #port_range = interface.get_useable_ports()\r\n #port_range_min = port_range[0]\r\n #port_range_max = port_range[-1]\r\n\r\n return render_to_response('control/profile.html',\r\n {'email_form' : email_form,\r\n 'affiliation_form' : affiliation_form,\r\n 'password_form' : password_form,\r\n 'username' : username,\r\n 'affiliation' : affiliation,\r\n 'email' : email,\r\n 'port' : port,\r\n 'api_key' : user.api_key,\r\n 'has_privkey' : has_privkey,\r\n #'port_range_min' : port_range_min,\r\n #'port_range_max' : port_range_max,\r\n 'info' : info,\r\n 'error_msg' : error_msg,\r\n 'messages' : messages},\r\n context_instance=RequestContext(request))", "def form_valid(self, form):\n User.objects.filter(username=self.object).update(\n user_image =form.cleaned_data['user_image'],\n )\n myfile = self.request.FILES['user_image']\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n messages.success(self.request, 'Image uploaded successfully')\n return super().form_valid(form)", "def post(self, request, *args, **kwargs):\n user_prof = UserProfile.objects.get(user=request.user)\n form = AboutFunderForm(request.POST, request.FILES)\n if form.is_valid():\n name = form.cleaned_data['name']\n content = form.cleaned_data.get('content')\n funder_or_adviser = form.cleaned_data.get('funder_or_adviser')\n x = form.cleaned_data.get('x')\n y = form.cleaned_data.get('y')\n w = form.cleaned_data.get('width')\n h = form.cleaned_data.get('height')\n image = form.cleaned_data.get('image')\n # ATTENTION! change email if you ever want to allow more users to be able to edit funders or contributors.\n if user_prof.user.email == \"[email protected]\":\n new_about_person = AboutPerson(\n name=name,\n content=content,\n image=image,\n funder_or_adviser=funder_or_adviser\n )\n new_about_person.save()\n # If user inputs image file instead of url\n if image:\n # Gets the original image to be cropped\n photo = Image.open(form.cleaned_data.get('image'))\n # Cropps the image using values x,y,w,and h from the form\n cropped_image = photo.crop((x, y, w + x, h + y))\n # Splits the file name and the extension\n filename, file_extension = os.path.splitext(\n os.path.basename(urlparse(new_about_person.image.url).path))\n cropped_image.save(settings.BASE_DIR + \"/media/about_person/image/\" + filename + file_extension)\n print(filename)\n print(file_extension)\n print(settings.BASE_DIR + \"/media/about_person/image/\" + filename + file_extension)\n new_about_person.image = \"about_person/image/\" + filename + file_extension\n print(new_about_person.image)\n new_about_person.save()\n messages.success(request, \"Funder or Advisor Was Successfully Added!\")\n return HttpResponseRedirect(reverse_lazy('contribution:about'))\n else:\n return HttpResponseRedirect(reverse_lazy(\"contribution:home\"))\n else:\n print(\"Invalid\")\n display_error(form, request)\n return render(request, 'about_create.html',\n {\n 'form': form,\n 'user_prof': user_prof,\n })", "def update_profile(name):\r\n user = User.query.filter_by(name=name).first()\r\n if not user:\r\n return abort(404)\r\n if current_user.id != user.id:\r\n return abort(403)\r\n show_passwd_form = True\r\n if user.twitter_user_id or user.google_user_id or user.facebook_user_id:\r\n show_passwd_form = False\r\n usr, apps, apps_created = cached_users.get_user_summary(name)\r\n # Extend the values\r\n current_user.rank = usr.get('rank')\r\n current_user.score = usr.get('score')\r\n # Title page\r\n title_msg = \"Update your profile: %s\" % current_user.fullname\r\n # Creation of forms\r\n update_form = UpdateProfileForm(obj=user)\r\n update_form.set_locales(current_app.config['LOCALES'])\r\n avatar_form = AvatarUploadForm()\r\n password_form = ChangePasswordForm()\r\n external_form = update_form\r\n\r\n\r\n if request.method == 'GET':\r\n return render_template('account/update.html',\r\n title=title_msg,\r\n user=usr,\r\n form=update_form,\r\n upload_form=avatar_form,\r\n password_form=password_form,\r\n external_form=external_form,\r\n show_passwd_form=show_passwd_form)\r\n else:\r\n # Update user avatar\r\n if request.form.get('btn') == 'Upload':\r\n avatar_form = AvatarUploadForm()\r\n if avatar_form.validate_on_submit():\r\n file = request.files['avatar']\r\n coordinates = (avatar_form.x1.data, avatar_form.y1.data,\r\n avatar_form.x2.data, avatar_form.y2.data)\r\n prefix = time.time()\r\n file.filename = \"%s_avatar.png\" % prefix\r\n container = \"user_%s\" % current_user.id\r\n uploader.upload_file(file,\r\n container=container,\r\n coordinates=coordinates)\r\n # Delete previous avatar from storage\r\n if current_user.info.get('avatar'):\r\n uploader.delete_file(current_user.info['avatar'], container)\r\n current_user.info = {'avatar': file.filename,\r\n 'container': container}\r\n db.session.commit()\r\n cached_users.delete_user_summary(current_user.name)\r\n flash(gettext('Your avatar has been updated! It may \\\r\n take some minutes to refresh...'), 'success')\r\n return redirect(url_for('.update_profile', name=current_user.name))\r\n else:\r\n flash(\"You have to provide an image file to update your avatar\",\r\n \"error\")\r\n return render_template('/account/update.html',\r\n form=update_form,\r\n upload_form=avatar_form,\r\n password_form=password_form,\r\n external_form=external_form,\r\n title=title_msg,\r\n show_passwd_form=show_passwd_form)\r\n # Update user profile\r\n elif request.form.get('btn') == 'Profile':\r\n update_form = UpdateProfileForm()\r\n update_form.set_locales(current_app.config['LOCALES'])\r\n if update_form.validate():\r\n current_user.id = update_form.id.data\r\n current_user.fullname = update_form.fullname.data\r\n current_user.name = update_form.name.data\r\n current_user.email_addr = update_form.email_addr.data\r\n current_user.privacy_mode = update_form.privacy_mode.data\r\n current_user.locale = update_form.locale.data\r\n db.session.commit()\r\n cached_users.delete_user_summary(current_user.name)\r\n flash(gettext('Your profile has been updated!'), 'success')\r\n return redirect(url_for('.update_profile', name=current_user.name))\r\n else:\r\n flash(gettext('Please correct the errors'), 'error')\r\n title_msg = 'Update your profile: %s' % current_user.fullname\r\n return render_template('/account/update.html',\r\n form=update_form,\r\n upload_form=avatar_form,\r\n password_form=password_form,\r\n external_form=external_form,\r\n title=title_msg,\r\n show_passwd_form=show_passwd_form)\r\n\r\n # Update user password\r\n elif request.form.get('btn') == 'Password':\r\n # Update the data because passing it in the constructor does not work\r\n update_form.name.data = user.name\r\n update_form.fullname.data = user.fullname\r\n update_form.email_addr.data = user.email_addr\r\n update_form.ckan_api.data = user.ckan_api\r\n external_form = update_form\r\n if password_form.validate_on_submit():\r\n user = db.session.query(model.user.User).get(current_user.id)\r\n if user.check_password(password_form.current_password.data):\r\n user.set_password(password_form.new_password.data)\r\n db.session.add(user)\r\n db.session.commit()\r\n flash(gettext('Yay, you changed your password succesfully!'),\r\n 'success')\r\n return redirect(url_for('.update_profile', name=name))\r\n else:\r\n msg = gettext(\"Your current password doesn't match the \"\r\n \"one in our records\")\r\n flash(msg, 'error')\r\n return render_template('/account/update.html',\r\n form=update_form,\r\n upload_form=avatar_form,\r\n password_form=password_form,\r\n external_form=external_form,\r\n title=title_msg,\r\n show_passwd_form=show_passwd_form)\r\n else:\r\n flash(gettext('Please correct the errors'), 'error')\r\n return render_template('/account/update.html',\r\n form=update_form,\r\n upload_form=avatar_form,\r\n password_form=password_form,\r\n external_form=external_form,\r\n title=title_msg,\r\n show_passwd_form=show_passwd_form)\r\n # Update user external services\r\n elif request.form.get('btn') == 'External':\r\n del external_form.locale\r\n del external_form.email_addr\r\n del external_form.fullname\r\n del external_form.name\r\n if external_form.validate():\r\n current_user.ckan_api = external_form.ckan_api.data or None\r\n db.session.commit()\r\n cached_users.delete_user_summary(current_user.name)\r\n flash(gettext('Your profile has been updated!'), 'success')\r\n return redirect(url_for('.update_profile', name=current_user.name))\r\n else:\r\n flash(gettext('Please correct the errors'), 'error')\r\n title_msg = 'Update your profile: %s' % current_user.fullname\r\n return render_template('/account/update.html',\r\n form=update_form,\r\n upload_form=avatar_form,\r\n password_form=password_form,\r\n external_form=external_form,\r\n title=title_msg,\r\n show_passwd_form=show_passwd_form)\r\n # Otherwise return 415\r\n else:\r\n return abort(415)", "def edit_user():\n if CURR_USER_KEY in session:\n user = g.user\n form = ProfileEditForm(obj=user)\n\n if form.validate_on_submit():\n user.first_name = form.first_name.data\n user.last_name = form.last_name.data\n user.description = form.description.data\n user.email = form.email.data\n user.image_url = form.image_url.data or \"/static/images/default-pic.png\"\n\n db.session.commit()\n\n flash(\"Profile edited.\")\n return redirect(\"/profile\")\n\n return render_template('/profile/edit-form.html', form=form)\n else:\n return redirect('/login')", "def edit_user_information():\n session_id = request.args.get('session-id', None)\n old_username = request.args.get('user-id', None)\n user = get_user_by_id(old_username)\n if request.method == 'POST':\n surname = request.form['surname']\n name = request.form['name']\n birthdate = request.form['birthdate']\n new_username = request.form['username']\n today = datetime.date.today()\n reservations_list = get_user_reservations_list(old_username)\n cars_reservations_list = get_cars_user_reservations_list(reservations_list)\n reservations_status_list = get_reservations_status_list(reservations_list)\n if check_authentication(session_id, old_username):\n are_changes_valid = edit_user_info(name, surname, birthdate, old_username, new_username)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)\n if are_changes_valid == \"OK\":\n edit_session(session_id, new_username)\n return render_template('user_area.html', user=new_username, session_id=session_id, edit_mode=False,\n surname=surname, name=name, birthdate=birthdate, today=today,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list)\n else:\n return render_template('user_area.html', user=user.id, session_id=session_id, edit_mode=True,\n surname=user.surname, name=user.name, birthdate=user.birthdate,\n feedback_msg=are_changes_valid, today=today,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list)", "def makeProfile(request):\n upr = UserProfile()\n upr.user = request.user\n upr.image = \"images/no-pic.png\"\n upr.save()", "def _populate_user_and_project(self, template_dictionary, escape_db_operations=False):\n logged_user = get_logged_user()\n template_dictionary[KEY_USER] = logged_user\n show_help = logged_user is not None and logged_user.is_online_help_active()\n template_dictionary[KEY_SHOW_ONLINE_HELP] = show_help\n\n project = get_current_project()\n template_dictionary[KEY_PROJECT] = project\n if project is not None and not escape_db_operations:\n self.update_operations_count()\n return template_dictionary", "def _add_profile_image(self):\r\n self.profile_image_is_set = True\r\n file_name = filedialog.askopenfilename(initialdir=\"/\", title=self.language.refactor(\"Select GIF file\"),\r\n filetypes=((\"GIF files\", \"*.gif\"),))\r\n if file_name == '':\r\n self.new_user_window.lift()\r\n return\r\n\r\n self.add_profile_gif_button.destroy()\r\n gif_canvas = Ctk.CCanvas(self.new_user_window, corners='angular', size=(180, 180),\r\n bg=self.new_user_window['background'])\r\n gif_canvas.create_gif(gif_path=file_name, corner='round', size=(175, 175), pos=(90, 90),\r\n transparent=True, speed='normal')\r\n gif_canvas.place(*(15, 50))\r\n\r\n self.gif_file_path = file_name\r\n\r\n self.new_user_window.lift()", "def user_edit(user_id):\n\n if not g.user:\n return _get_json_message(\n INVALID_CREDENTIALS_MSG,\n INVALID_CREDENTIALS_STATUS_CODE)\n\n current_user = User.query.get_or_404(user_id)\n received = request.form\n file = request.files.get(\"image_url\")\n form = UserEditForm(csrf_enabled=False, data=received)\n\n if form.validate_on_submit():\n if not User.authenticate(g.user.username, form.password.data):\n return _get_json_message(\n \"unable-to-update-user\",\n INVALID_CREDENTIALS_STATUS_CODE)\n\n try:\n # update non image_url fields\n current_user.email = form.email.data\n current_user.first_name = form.first_name.data,\n current_user.last_name = form.last_name.data,\n current_user.hobbies = form.hobbies.data,\n current_user.interests = form.interests.data,\n current_user.zip_code = form.zip_code.data,\n current_user.friend_radius_miles = form.friend_radius_miles.data\n\n current_user.coordinates = User.get_coords(form.zip_code.data)\n\n # update image_url with uploaded file\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n url = upload_file_obj(file, S3_BUCKET, filename)\n\n current_user.image_url = url\n\n db.session.commit()\n\n return jsonify(user=current_user.serialize())\n except ClientError as e:\n print(e)\n return _get_json_message(\n \"image-upload-failed\",\n INVALID_CREDENTIALS_STATUS_CODE)\n\n return _get_json_message(\n \"unable-to-update-user\",\n INVALID_CREDENTIALS_STATUS_CODE)", "def update_picture(self, username, picture):\n self.update(('Picture', picture), username)", "def profile_pic(self, client_file_storage):\n\n # If we already have a profile picture, remove it\n if self.profile_pic_filename:\n filepath = os.path.join(\n current_app.config['UPLOADED_IMAGES_DEST'],\n self.profile_pic_filename)\n os.remove(filepath)\n self.profile_pic_filename = None\n self.profile_pic_url = None\n\n # This uploads & saves the file on the server\n # NOTE: It uses the secure_filename function...\n server_filename = images.save(client_file_storage)\n\n # Generate the URL to this file\n url = images.url(server_filename)\n\n # Store information with the user\n self.profile_pic_filename = server_filename\n self.profile_pic_url = url", "def account():\n \n form = UpdateAccountForm()\n \n # perform actions when the form is submitted\n if form.validate_on_submit():\n # checking if the form contains a picture file\n if form.picture.data:\n picture_file = save_picture(form.picture.data)\n current_user.image_file = picture_file\n # changing the current user details with the form data\n current_user.username = form.username.data\n current_user.email = form.email.data\n db.session.commit()\n flash('Your account has been updated!', 'success')\n return redirect(url_for('account'))\n # performs action if the form method is get\n elif request.method == 'GET':\n # setting the form data with the user data from the database\n form.username.data = current_user.username\n form.email.data = current_user.email\n image_file = url_for('static', filename='profile_pics/' + current_user.image_file)\n return render_template('account.html', title='Account',\n image_file=image_file, form=form)", "def add_profile_photo():\n pass", "def profile():\n\n form = EditUserForm(obj=g.user)\n\n if form.validate_on_submit():\n if User.authenticate(g.user.username, form.password.data):\n g.user.username = form.username.data\n g.user.email = form.email.data\n g.user.image_url = form.image_url.data\n g.user.header_image_url = form.header_image_url.data\n g.user.bio = form.bio.data\n g.user.private = form.private.data\n db.session.commit()\n return redirect(f'/users/{g.user.id}')\n flash('Incorrect password', 'danger')\n return render_template('users/edit.html', user_id=g.user.id, form=form)", "def generate_profile(request, pk=0):\n context = {}\n extra_dock = int(request.POST.get('extra_dock', 0))\n extra_firewall = int(request.POST.get('extra_firewall', 0))\n config = ConfigurationProfile.objects.filter(pk=pk).first()\n edit_mode = False\n if config is not None:\n edit_mode = True\n if request.method == 'POST':\n form = ProfileForm(request.POST, extra_dock=extra_dock, extra_firewall=extra_firewall, edit_mode=edit_mode)\n if form.is_valid() and request.POST['save'] != \"+ Add App\" and request.POST['save'] != \"Add App\":\n context['data'] = form.cleaned_data\n context['password'] = 'Nice Try!'\n context['payloads'] = get_payloads(request.POST)\n context['data']['static_apps'] = dock_app_list(context['data'])\n context['data']['firewall_apps'] = fw_app_list(context['data'])\n\n # If removal date, convert to string\n if context['data']['removal_date'] is not None:\n context['data']['removal_date'] = context['data']['removal_date'].strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n # Generate UUIDs for the payloads\n if not edit_mode:\n context['identifiers'] = generate_ids()\n else:\n profile_data = open(config.profile)\n data = json.load(profile_data)\n profile_data.close()\n context['identifiers'] = data['identifiers']\n\n # Save to file\n display_name = request.POST.get('display_name')\n filename = request.POST.get('filename')\n path = os.path.join(settings.MEDIA_ROOT, 'profiles', '{}.json'.format(filename))\n with open(path, 'w') as profile:\n profile.write(json.dumps(context))\n\n new_profile, created = ConfigurationProfile.objects.get_or_create(\n name=display_name,\n profile=os.path.join(settings.MEDIA_ROOT, 'profiles', '{}.json'.format(filename))\n )\n new_profile.scope = context['data']['scope']\n new_profile.save()\n\n # If 'Save and Redeploy' selected, configure MDM to update all previously installed copies as well\n if request.POST['save'] == 'Save and Redeploy':\n laptops = Laptop.objects.all().filter(mdm_enrolled=True, retired=False, installed__in=[new_profile])\n for laptop in laptops:\n laptop.installed.remove(new_profile)\n laptop.pending.add(new_profile)\n\n template = loader.get_template('default.html')\n return HttpResponse(template.render({\n 'title': \"Success!\",\n 'message': \"Your new configuration profile has been generated successfully! It is now available for \"\n \"download through the MDM.\",\n 'NO_FOOT': True,\n 'EXIT_BTN': True,\n 'EXIT_URL': reverse(\"mdm:list\")\n }, request))\n else:\n if request.POST['save'] == \"+ Add App\":\n extra_dock += 1\n elif request.POST['save'] == \"Add App\":\n extra_firewall += 1\n context['form'] = ProfileForm(request.POST, extra_dock=extra_dock, extra_firewall=extra_firewall,\n edit_mode=edit_mode)\n else:\n if edit_mode:\n profile_data = open(config.profile)\n file_data = json.load(profile_data)\n if file_data['data']['removal_date'] is not None:\n file_data['data']['removal_date'] = timezone.make_aware(\n datetime.datetime.strptime(file_data['data']['removal_date'], '%Y-%m-%dT%H:%M:%SZ'))\n profile_data.close()\n form = ProfileForm(None, initial=file_data['data'], extra_dock=file_data['data']['extra_dock'],\n extra_firewall=file_data['data']['extra_firewall'], edit_mode=True)\n else:\n identifier = str(uuid.uuid4())\n filename = \"profile-{}\".format(identifier[0:8])\n form = ProfileForm(initial={'filename': filename}, extra_dock=extra_dock, extra_firewall=extra_firewall,\n edit_mode=False)\n context['form'] = form\n\n # Ensure the automatic profile removal options are hidden if not being utilized\n context['custom_script'] = \"$(document).ready(function (){$('#id_auto_remove').change(function (){\" \\\n \"if (this.value == 'default') {$('#div_id_removal_date').hide();\" \\\n \"$('#div_id_removal_period').hide();}else{$('#div_id_removal_date').show();\" \\\n \"$('#div_id_removal_period').show();}});$('#id_auto_remove').change();});\"\n context['msg'] = \"Manage Configuration Profile\"\n return render(request, 'form_crispy.html', context)", "def userProfile(userid):\n images = get_uploaded_images()\n record = UserProfile.query.filter_by(id=userid).first()\n return render_template('userProfile.html', images=images, record =record)", "def my_team(request):\n template = loader.get_template('team/my_team.html')\n team = request.user.profile.team\n\n if team is not None:\n team_members = User.objects.filter(profile__team=team)\n\n context = {\n 'team_name': team.name,\n 'team_members': team_members,\n 'team_logo': team.logo,\n 'team_info': team.information\n }\n if request.POST.get('save'):\n if request.POST.get('new_name') != '':\n new_name = request.POST.get('new_name')\n team.name = new_name\n if 'logo_image' in request.FILES:\n team.logo = request.FILES['logo_image']\n new_info = request.POST.get('new_info')\n team.information = new_info\n team.save()\n\n context['team_name'] = team.name\n context['team_info'] = team.information\n context['team_logo'] = team.logo\n\n if request.POST.get('save_name'):\n new_name = request.POST.get('new_name')\n team.name = new_name\n team.save()\n context['team_name'] = team.name\n\n if request.POST.get('save_info'):\n new_info = request.POST.get('new_info')\n team.information = new_info\n team.save()\n context['team_info'] = team.information\n\n if request.POST.get('save_logo'):\n team.logo = request.FILES['logo_image']\n team.save()\n context['team_logo'] = team.logo\n\n if request.POST.get('leave_team'):\n request.user.profile.team = None\n request.user.profile.save()\n context = None\n return redirect('/')\n\n return CustomHttpResponse.send(template, context, request)\n\n else:\n return redirect('/team/new')", "def select_default_picture(sender, instance, **kwargs):\n if not instance.id:\n instance.picture = \"/static/user%s.png\"%(\"F\" if instance.female else \"M\")", "def account_view(request):\n \"\"\"if request.user.is_authenticated:\n form = None\n\n # TODO Objective 3: Create Forms and Handle POST to Update UserInfo / Password\n\n user_info = models.UserInfo.objects.get(user=request.user)\n context = { 'user_info' : user_info,\n 'form' : form }\n return render(request,'account.djhtml',context)\n request.session['failed'] = True\n return redirect('login:login_view')\n \"\"\"\n\n if request.user.is_authenticated:\n form = None\n # TODO Objective 3: Create Forms and Handle POST to Update UserInfo / Password\n existingUserInfo = models.UserInfo.objects.get(user=request.user)\n print(\"existingUserInfo:----------\",existingUserInfo.location)\n if request.method == 'POST':\n formName = request.POST.get('name')\n print(\"-------formName:\" + formName);\n\n if (formName == 'pwdForm'):\n password = request.POST['password']\n if password is not None and password != \"\":\n user = get_user(request)\n user.set_password(password)\n user.save()\n return redirect('login:login_view')\n else:\n request.user.employment = request.POST['employment']\n request.user.location = request.POST['location']\n request.user.birthday = request.POST['birthday']\n request.user.interests = request.POST['interests']\n inter = models.Interest(label=request.POST['interests'])\n inter.save()\n request.user.save()\n\n if request.POST['employment'] != '':\n existingUserInfo.employment = request.user.employment\n\n\n if request.POST['location'] != '':\n existingUserInfo.location = request.user.location\n\n if request.POST['birthday'] != \"\":\n existingUserInfo.birthday = request.user.birthday\n elif existingUserInfo.birthday==None:\n # existingUserInfo.birthday = datetime.strptime(str(existingUserInfo.birthday), '%Y-%m-%d')\n existingUserInfo.birthday = None\n\n if request.POST['interests'] != \"\" and request.POST['interests'] is not None:\n inter = models.Interest(label=request.POST['interests'])\n inter.save()\n existingUserInfo.interests.add(inter)\n\n existingUserInfo.save()\n\n\n context = {'user_info': existingUserInfo,\n 'login_form': form}\n return render(request, 'account.djhtml', context)\n request.session['failed'] = True\n return redirect('login:login_view')", "def modify_by_user():\n\n user_form = UserForm(request.form)\n\n user_form.username.data = current_user.username\n user_form.email.data = current_user.email\n user_form.password.data = current_user.password\n user_form.first_name.data = current_user.first_name\n user_form.last_name.data = current_user.last_name\n\n\n if user_form.validate_on_submit():\n\n if not request.form['username'] or request.form['username'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/modify_by_user.html', title='Modify Profile',\n user_form=user_form)\n if not request.form['email'] or request.form['email'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/modify_by_user.html', title='Modify Profile',\n user_form=user_form)\n if not request.form['password'] or request.form['password'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/modify_by_user.html', title='Modify Profile',\n user_form=user_form)\n if request.form['password'] != request.form['retype_password']:\n flash(\"Passwords are not the same!\",\"warn\")\n return render_template('user/modify_by_user.html', title='Modify Profile',\n user_form=user_form)\n\n\n hashed_password = user_manager.hash_password(request.form['password'])\n\n current_user.username = request.form['username']\n current_user.email = request.form['email']\n current_user.password = hashed_password\n current_user.first_name = request.form['first_name']\n current_user.last_name = request.form['last_name']\n current_user.confirmed_at = datetime.datetime.utcnow()\n\n try:\n correct = True\n db.session.commit()\n except Exception as e:\n # Catch anything unknown\n print(e)\n correct = False\n finally:\n if not correct:\n # Cleanup and show error\n db.session.rollback()\n flash('Error modifying user, make sure username and email are unique','error')\n return render_template('user/modify_by_user.html', title='Modify Profile',\n user_form=user_form)\n else:\n flash('The user was successfully modified.','success')\n return redirect(url_for('user_ksat.show_user'))\n\n return render_template('user/modify_by_user.html', title='Modify Profile',user_form=user_form)", "def upload_project(request):\n current_user = request.user\n current_user_name = current_user.username\n # project_ratings=Rating.objects.filter(id=project_id)\n if request.method == 'POST':\n form = ProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project_post = form.save(commit=True) \n else:\n raise Http404 \n \n return redirect(view_projects)\n else: \n project_form=ProjectForm()\n \n return render(request, 'upload_project.html', {'project_form':project_form})", "def map(item):\n if item.deleted or item.profile_picture_data_url is not None:\n return\n\n user_services.generate_initial_profile_picture(item.id)", "def save(self, *args, **kwargs):\n c_d = self.cleaned_data\n if c_d.get('id') and c_d.get('avatar') and (\n isinstance(c_d.get('avatar'), UploadedFile)):\n person = get_object_or_404(Person, id=c_d.get('id'))\n try:\n old_avatar = person.avatar.file.name\n except ValueError:\n old_avatar = None\n person = super(PersonForm, self).save(*args, **kwargs)\n user = person.user\n user.username = c_d['username']\n user.first_name = c_d['first_name']\n user.last_name = c_d['last_name']\n user.email = c_d['email_address']\n pass1 = c_d.get('new_password')\n if pass1:\n user.set_password(pass1)\n user.save()\n if isinstance(c_d.get('avatar'), UploadedFile):\n os.remove(self.cleaned_data['avatar'].file.name)\n if old_avatar:\n os.remove(old_avatar)\n return person", "def edit_user_profile(request):\n user = request.user\n user_profile = UserProfile.objects.filter(user=user)[0]\n if request.method == 'POST':\n form = MemberProfileForm(request.POST)\n additional_form = MemberAdditionalProfileForm(request.POST)\n if form.is_valid() and additional_form.is_valid():\n cd = form.cleaned_data\n user.first_name = cd['first_name']\n user.last_name = cd['last_name']\n user.email = cd['email']\n user.save()\n if 'picture' in request.FILES:\n file = request.FILES['picture']\n user_profile.picture.save(file.name, file, save=True)\n user_profile.gravatar = additional_form.cleaned_data['gravatar']\n user_profile.save()\n return HttpResponseRedirect('/')\n else:\n form = MemberProfileForm(instance=request.user)\n additional_form = MemberAdditionalProfileForm(instance=user_profile)\n return render_to_response('edit_profile.html', locals())", "def set_user_profile_picture(user_id, file_name):\n\n user = User.query.get(user_id)\n \n user.profile_picture = file_name\n db.session.commit()" ]
[ "0.63029945", "0.6147011", "0.60409224", "0.5987003", "0.5972466", "0.59428054", "0.5842993", "0.5805387", "0.5803387", "0.5779269", "0.57783127", "0.5758194", "0.57569313", "0.5720132", "0.57105464", "0.57057136", "0.56755704", "0.56709665", "0.56596", "0.56305", "0.5622149", "0.5602108", "0.559116", "0.55641824", "0.55527705", "0.55467355", "0.55410516", "0.55386084", "0.5531462", "0.55249935" ]
0.6888967
0
Function that render form for email input that is destination of utils.send_reset_email function responsible for sending email to user with token that is available for specific period of time and reset user's password
def reset_password(): if current_user.is_authenticated: return redirect(url_for('main.home')) form = RequestResetForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() send_reset_email(user) # located in utils.py flash('An email has been sent with instruction to reset your password', 'info') return redirect(url_for('users.login')) return render_template('reset_password_request.html', form=form)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_password_request():\n form = ResetPasswordRequestForm()\n if form.validate_on_submit():\n try:\n user = User.query.filter_by(email=form.email.data).first_or_404()\n except Exception:\n flash('This Email ID is Not Registered', 'error')\n return render_template('password_reset_request.html',\n form=form), 400\n\n if user:\n send_password_reset_email(user)\n flash('Please check your email for a password reset link.',\n 'success')\n return render_template('post_pass_reset_request.html',\n title=\"Reset Password\")\n else:\n flash(\n 'Your email address must be confirmed \\\n before attempting a password reset.',\n 'error')\n return redirect(url_for('auth.login'))\n\n return render_template('password_reset_request.html', form=form), 400", "def password_reset(request):\n\n\tcontext_dict = {}\n\tif request.method == 'POST':\n\t\temail = request.POST.get('email')\n\t\tif email:\n\t\t\tuser = models.Teacher.objects.get(\n\t\t\t\tsoft_delete=False, user__email=email\n\t\t\t)\n\t\t\tif not user:\n\t\t\t\tcontext_dict[\"message\"] = \"Email ID does'nt exist, Enter Correct details\"\n\t\t\tmail = {\n\t\t\t\t'email': email,\n\t\t\t\t'domain': request.META['HTTP_HOST'],\n\t\t\t\t'site_name': 'Placement Portal',\n\t\t\t\t'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n\t\t\t\t'user': user,\n\t\t\t\t'token': ''.join([random.choice(ascii_letters+digits) for i in range (128)]),\n\t\t\t\t'protocol': 'http',\n\t\t\t}\n\t\t\ttry:\n\t\t\t\treset_token = models.PasswordReset(\n\t\t\t\t\tuser=user,\n\t\t\t\t\ttoken=mail['token'],\n\t\t\t\t\ttoken_consumed=False,\n\t\t\t\t)\n\t\t\t\treset_token.save()\n\t\t\texcept Exception as e:\n\t\t\t\tprint (e)\n\t\t\tsubject_template_name = 'password_reset_email_subject.txt'\n\t\t\temail_template_name = 'password_reset_email.html'\n\t\t\tsubject = loader.render_to_string(subject_template_name, mail)\n\t\t\tsubject = ''.join(subject.splitlines())\n\t\t\temail_data = loader.render_to_string(email_template_name, mail)\n\t\t\tsend_mail(subject, email_data, DEFAULT_FROM_EMAIL, [email], fail_silently=False)\n\t\t\tcontext_dict[\"message\"] = \"Email has been sent to your registered Email ID with instructions.\"\n\treturn render(request, \"password_reset_form.html\", context_dict)", "def send_recovery_password_email(token: str, email: str) -> None:\n\n # TODO ...\n # Load html templates and get the content from it.\n # html_content = ...\n\n # You must have to send this as a anchor\n # to my-domain.com/reset-password?token=ad5a....\n link = f\"{SERVER_HOST}/reset-password?token={token}\"\n content = f\"\"\"\n <h1>Reset your password</h1>\n <p></p>\n <a href=\"{link}\" target=\"_blank\" rel=\"noopener noreferrer\">Press here</a>\n \"\"\"\n email = sender.create_email(\n to_list=[email],\n subject=f\"Recovery Password\",\n html_content=content,\n )\n sender.send_email(email_to_send=email)", "def reset_password():\n form = ResetPassword()\n if form.validate_on_submit():\n user_email = form.email.data\n mail_exist = db.check_email(user_email)\n if mail_exist is not None:\n new_password = generate_password()\n new_password_hash = generate_password_hash(new_password)\n username = mail_exist['username']\n db.update_password_username(username, new_password_hash)\n flash('Your new password has been sent to your mailbox')\n redirect('login')\n # send_password_reset_email(user_email, new_password)\n return redirect(url_for('login'))\n else:\n flash('This email address is not registered')\n return redirect('reset_password')\n return render_template('resetpassword.html', form=form)", "def reset_request():\n if current_user.is_authenticated:\n return redirect('/home')\n form = RequestResetForm()\n if form.validate_on_submit():\n staff = Staff.query.filter_by(email=form.email.data).first()\n send_reset_email(staff)\n flash('An email has been sent with instructions to reset your password.', 'info')\n return redirect(url_for('login'))\n return render_template('reset_request.html', title='Reset Password',\n form=form)", "def forgot_password():\r\n form = ForgotPasswordForm(request.form)\r\n if form.validate_on_submit():\r\n user = model.user.User.query\\\r\n .filter_by(email_addr=form.email_addr.data)\\\r\n .first()\r\n if user and user.email_addr:\r\n msg = Message(subject='Account Recovery',\r\n recipients=[user.email_addr])\r\n if user.twitter_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Twitter')\r\n elif user.facebook_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Facebook')\r\n elif user.google_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Google')\r\n else:\r\n userdict = {'user': user.name, 'password': user.passwd_hash}\r\n key = signer.signer.dumps(userdict, salt='password-reset')\r\n recovery_url = url_for('.reset_password',\r\n key=key, _external=True)\r\n msg.body = render_template(\r\n '/account/email/forgot_password.md',\r\n user=user, recovery_url=recovery_url)\r\n msg.html = markdown(msg.body)\r\n mail.send(msg)\r\n flash(gettext(\"We've send you email with account \"\r\n \"recovery instructions!\"),\r\n 'success')\r\n else:\r\n flash(gettext(\"We don't have this email in our records. \"\r\n \"You may have signed up with a different \"\r\n \"email or used Twitter, Facebook, or \"\r\n \"Google to sign-in\"), 'error')\r\n if request.method == 'POST' and not form.validate():\r\n flash(gettext('Something went wrong, please correct the errors on the '\r\n 'form'), 'error')\r\n return render_template('/account/password_forgot.html', form=form)", "def password_reset_token_created(sender, reset_password_token, *args, **kwargs):\n # send an e-mail to the user\n context = {\n 'current_user': reset_password_token.user,\n 'username': reset_password_token.user.username,\n 'email': reset_password_token.user.email,\n # ToDo: The URL can (and should) be constructed using pythons built-in `reverse` method.\n 'reset_password_url': \"http://some_url/reset/?token={token}\".format(token=reset_password_token.key)\n }\n\n # render email text\n email_html_message = render_to_string('email/user_reset_password.html', context)\n email_plaintext_message = render_to_string('email/user_reset_password.txt', context)\n\n msg = EmailMultiAlternatives(\n # title:\n \"Password Reset for {title}\".format(title=\"Some website title\"),\n # message:\n email_plaintext_message,\n # from:\n \"[email protected]\",\n # to:\n [reset_password_token.user.email]\n )\n msg.attach_alternative(email_html_message, \"text/html\")\n msg.send()", "def login_resetrequest():\n if request.method == \"GET\":\n # In browser request that user wants to reset the password\n return flask.render_template('reset-request.html', message=\"Please reset the password\")\n\n if request.method == \"POST\":\n # Create a token\n email = flask.request.form[\"email\"]\n\n # Find if an account with that name exists\n conn.register([model.User])\n admindb = conn[current_app.config[\"CONFIGDB\"]]\n\n userdoc = admindb[\"users\"].User.find_one({\"name\" : email, \"type\" : \"passwd\"})\n if userdoc == None:\n # user not found\n return flask.Response('{\"error\" : \"User not found\"}')\n\n # First reset the password\n name = userdoc[\"label\"]\n emailto = userdoc[\"name\"]\n\n # Create accout and a random tocken\n userdoc[\"token\"] = bson.ObjectId()\n userdoc[\"password_status\"] = \"reset-request\"\n\n # May only be useful for some\n if \"password_ready\" in userdoc:\n del userdoc[\"password_ready\"]\n\n userdoc.validate()\n userdoc.save()\n\n # Create email\n emailfrom = current_app.config[\"EMAIL_FROM\"] \n\n body = \"Hello \" + name + \",\\n\\n\"\n body = body + \"You recently requested a password reset for your account at https://slide-atlas.org.\"\n body = body + \"\\n To complete the request operation please follow the link below- \\n\"\n body = body + \"\\n \" + url_for('.login_confirm', _external=True) + \"?token=\" + str(userdoc[\"token\"]) + \" \\n\"\n body = body + \"\\nIf clicking on the link doesn't work, try copying and pasting it into your browser.\\n\"\n body = body + \"\\nThis link will work only once, and will let you create a new password. \\n\"\n body = body + \"\\nIf you did not request password reset, please disregard this message.\\n\"\n body = body + \"\\nThank you,\\nThe SlideAtlas Administration Team\\n\"\n\n # Create a text/plain message\n msg = MIMEText(body)\n\n # me == the sender's email address\n # you == the recipient's email address\n msg['Subject'] = 'Password reset confirmation for slide-atlas.org'\n msg['From'] = emailfrom\n msg['To'] = emailto\n print msg\n s = smtplib.SMTP(current_app.config[\"SMTP\"])\n try:\n out = s.sendmail(emailfrom, [emailto], msg.as_string())\n except:\n return flask.Response(\"{\\\"error\\\" : \\\"Error sending email\\\"}\")\n\n s.quit()\n return flask.Response(\"{\\\"success\\\" : \\\"\" + str(out) + \"\\\"}\")", "def forgotPassword():\n if request.method == 'POST':\n if emailform():\n email = request.form['email1']\n\n #Confirm the user exist\n if hl.confirmUser(email):\n user = hl.getUser(\"Email\",email)\n refLink = \"http://\"+request.headers['Host']+hl.genUrl(user[\"Name\"],\"Password\")\n #Send email\n msg = \"\"\"\n Dear {},\n\n You are receiving this email because you have requested your password be reset. \n Use the following link to reset your password:\n\n {}\n\n If you did not request that your password be changed, please reply to this email immediately.\n\n Regards,\n Onegroup Admin Team\n \"\"\".format(user[\"Name\"],refLink)\n\n emailMessage(\"Password Reset\", [user[\"Email\"]], msg)\n return redirect(url_for('confirm', confirmed = 'Password reset email has been sent.'))\n else:\n flash(\"User doesn't exists\")\n else:\n flash(\"Emails don't match\")\n \n return render_template('emailsend.html')", "def token_request(request):\n try:\n l_user = request.data[\"user\"] #or email\n except:\n return Response({'message':'No user information received.'}, status=status.HTTP_400_BAD_REQUEST)\n\n l_user = l_user.lower()\n\n try:\n user = User.objects.get(username=l_user)\n except:\n try:\n user = User.objects.get(email=l_user)\n except:\n return Response({'message': l_user + ' does not match any record.'}, status=status.HTTP_400_BAD_REQUEST)\n\n pin = random.randint(0, 1000000)\n try:\n subject = \"Password Reset Token.\"\n sendEmail(user, subject, \"Password Reset\", otp=pin)\n\n #Write to use record\n ResetRequests.objects.create(user = user, token = pin, use_case = 'password reset')\n \n #Add password reset request date here\n return Response({'message':'Token sent to registered email.', 'username' : user.username}, status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'message':'We could not send an email', 'error':e}, status=status.HTTP_400_BAD_REQUEST)", "def forgot_password():\n\n if not current_user.is_anonymous():\n return redirect(url_for(\"forum.index\"))\n\n form = ForgotPasswordForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n\n if user:\n token = user.make_reset_token()\n send_reset_token(user, token=token)\n\n flash((\"E-Mail sent! Please check your inbox.\"), \"info\")\n return redirect(url_for(\"auth.forgot_password\"))\n else:\n flash((\"You have entered an username or email that is not linked \\\n with your account\"), \"danger\")\n return render_template(\"auth/forgot_password.html\", form=form)", "def post(self):\n try:\n url = request.host_url + 'reset/password/'\n body = request.get_json()\n base_url = request.url_root\n email = body.get('email')\n\n if not email:\n raise SchemaValidationError\n\n user = User.objects.get(email=email)\n if not user:\n raise EmailDoesNotExistsError\n\n expires = datetime.timedelta(minutes=60)\n payload = {\"user_id\": str(user.id)}\n reset_token = create_access_token(payload, expires_delta=expires)\n\n return send_email('[Unboxit] Reset Your Password',\n sender='[email protected]',\n recipients=[user.email],\n text_body=render_template(\n 'components/reset_password.txt',\n url=url + reset_token),\n html_body=render_template(\n 'components/reset_password.html',\n url=url + reset_token,\n first_name=user.first_name,\n base_url=base_url))\n except SchemaValidationError:\n raise SchemaValidationError\n except DoesNotExist:\n raise EmailDoesNotExistsError\n except Exception as e:\n raise InternalServerError", "def send_password_reset_mail(email, token):\n print(\"reset password\")\n url = f\"{settings.SITE_URL}/reset-password?email={email}&token={token}\"\n SUBJECT = \"Reset Password Request\"\n # The HTML body of the email.\n body = \"\"\"\n <html>\n <head></head>\n <body>\n <p>Here is your password reset link:</p>\n <p><a href='{0}'>{1}</a></p>\n </body>\n </html>\n \"\"\".format(url, url)\n send_mail(SUBJECT, body, email)", "def reset_post():\n if g.session:\n # User is already authenticated\n return jsonify({'redirect': url_for('index.index')})\n\n form = request.values.get('form', default='email')\n token = request.values.get('token', default='')\n email = request.values.get('email', default='')\n password = request.values.get('password', default='')\n\n if form == 'password':\n try:\n user: User = db.session.query(User) \\\n .filter((User.password_token == token) & User.reset_active) \\\n .one()\n if user.is_reset_expired():\n return jsonify({'success': False, 'reason': 'expired'}), 401\n\n if len(password) < 8:\n return jsonify({'success': False, 'reason': 'password'}), 401\n\n user.set_password(password)\n db.session.commit()\n next_url = url_for('auth.reset_status', success=True)\n return jsonify({'success': True, 'redirect': next_url})\n except NoResultFound:\n return jsonify({'success': False, 'reason': 'token not found'}), 401\n else:\n try:\n user: User = db.session.query(User) \\\n .filter(User.email == email).one()\n user.reset_password()\n db.session.commit()\n\n reset_url = urllib.parse.urljoin(\n request.host_url,\n url_for('auth.reset_get', token=user.password_token))\n kwargs = {\n 'subject': gettext('Reset Password'),\n 'body': reset_url,\n 'recipients': [user.email]\n }\n mail.send_mail(**kwargs)\n next_url = url_for('auth.reset_status', sent=True)\n return jsonify({'success': True, 'redirect': next_url})\n except NoResultFound:\n return jsonify({'success': False, 'reason': 'email'}), 401", "def request_password_reset_token():\n j = request.get_json(force=True)\n user_requested = j['user'].lower()\n\n # Disabled user accounts can not request for a new password.\n target_user = User.query.filter_by(mail=user_requested).first()\n\n if target_user is None:\n return Errors.UNKNOWN_USER.make_json_response(status.HTTP_400_BAD_REQUEST)\n\n if target_user.state == StateType.DEACTIVATED:\n return Errors.DEACTIVATED_USER.make_json_response(status.HTTP_400_BAD_REQUEST)\n\n target_user.generate_password_request_token()\n\n send_mail(target_user.mail, render_template(\"password/reset_password_mail.txt\",\n greeting=get_opening_greeting(target_user),\n wlink=\"{}/password/reset/{}\".format(\n app.config['BUZZN_BASE_URL'],\n target_user.password_reset_token\n )), 'Passwort zurücksetzen für Buzzn-App')\n\n db.session.commit()\n return '', status.HTTP_201_CREATED", "def _request_reset(self, email):\n response = self.client.post(reverse('users.send_password_reset'),\n {'email': email})\n return response.context['token']", "def reset_token(token):\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n user = User.verify_reset_token(token)\n if user is None:\n message = \"This is an invalid or expired token\"\n return redirect(url_for(\"forgot\", message=message))\n form = ResetPasswordForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user.password = hashed_password\n db.session.commit()\n message = f'Password has been updated for {user.display_username}'\n return redirect(url_for('login', message=message))\n return render_template(\"reset_token.html\", title=\"Reset Pasword\", form=form, offer_login=True, offer_register=True)", "def forgot_password():\n if request.method == 'POST':\n if 'username' in request.form:\n username = request.form['username']\n user = Users.query.get(username)\n if user:\n reset_slug = utils.encrypt(username)\n reset_url = request.host_url + 'reset_password' + '/' + reset_slug\n from_email = ('[email protected]', 'TSG Bot')\n to_email = [(user.email, user.name)]\n subject = 'Password reset for Hades account'\n content = f\"Hello {user.name}, please click <a href=\\\"{reset_url}\\\">here</a> to reset your password!\"\n utils.send_mail(from_email, to_email, subject, content)\n return redirect(url_for('login'))\n return render_template('forgot_password.html')", "def handle_emails():\n email = request.data['email'].strip()\n user = User.query.filter_by(email=email).first()\n option = \\\n request.data['option'].strip() # have a <select> in the frontend\n token = s.dumps(email, salt='email-confirm')\n\n msg = Message('Reset password', sender=app.config['ADMINS'][0],\n recipients=[email])\n link = 'http://localhost:3000/confirm_email/{}/{}'\\\n .format(option, token)\n if user:\n msg.body = 'Your link is {}'.format(link)\n else:\n msg.body = 'You attempted to reset your password but you do not \\\n have an account with us. Please Sign Up and Log in. {}'\\\n .format('http://localhost:3000/register')\n\n mail.send(msg)\n return jsonify({\"message\":\"Please confirm your email.\"}), 201", "def user_reset_password(request, token):\n\n if request.user.is_authenticated():\n return redirect(settings.AFTER_LOGIN_REDIRECT_URL)\n\n form = ResetPasswordForm(request.POST or None)\n\n if request.method == \"POST\":\n if form.is_valid():\n user_auth = get_object_or_404(PasswordResetAuth, token=token)\n user = get_object_or_404(User, email=user_auth.email)\n\n if user_auth.choose_me is True:\n new_password = form.cleaned_data[\"new_password\"]\n user.set_password(new_password)\n user.save()\n\n user_auth.choose_me = False\n user_auth.save()\n return redirect(\"/login/\")\n\n error_message = \"* Either you are not an identified user or \"\\\n \"token has been expired. So please click on back.\"\n return render_to_response(\"login/reset_password.html\", {\n \"form\": form,\n \"error_message\": error_message\n }, context_instance=RequestContext(request))\n\n return render_to_response(\"login/reset_password.html\", {\n \"form\": form\n }, context_instance=RequestContext(request))", "def send_password_reset_email():\n aaa.send_password_reset_email(\n username=post_get('username'),\n email_addr=post_get('email_address')\n )\n return 'Please check your mailbox.'", "def reset_password(token):\n\n if not current_user.is_anonymous():\n return redirect(url_for(\"forum.index\"))\n\n form = ResetPasswordForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n expired, invalid, data = user.verify_reset_token(form.token.data)\n\n if invalid:\n flash((\"Your password token is invalid.\"), \"danger\")\n return redirect(url_for(\"auth.forgot_password\"))\n\n if expired:\n flash((\"Your password is expired.\"), \"danger\")\n return redirect(url_for(\"auth.forgot_password\"))\n\n if user and data:\n user.password = form.password.data\n user.save()\n flash((\"Your password has been updated.\"), \"success\")\n return redirect(url_for(\"auth.login\"))\n\n form.token.data = token\n return render_template(\"auth/reset_password.html\", form=form)", "def send_reset_email(staff):\n token = staff.get_reset_token()\n msg = Message('Password Reset Request', \n sender='[email protected]', \n recipients=[staff.email])\n msg.body = f\"\"\"To reset your password, visit the following link:\n{url_for('reset_token', token=token, _external=True)}\nIf you did not make this request, then simply record this email and no changes will be made.\"\"\"\n try:\n mail.send(msg)\n except Exception as e:\n print(e)", "def forgot():\n form = ForgotForm()\n\n if form.validate_on_submit():\n db.session.add(form.pw_reset)\n db.session.commit()\n\n form.pw_reset.send()\n flash('A password reset link has been sent to your email', 'alert-success')\n return redirect(url_for('default.home'))\n else:\n flash_form_errors(form)\n return render_template('forgot.html', form=form)", "def user_password_reset(self, request):\n reset_password_form = ResetPasswordForm(request.form)\n\n if request.method == \"POST\":\n if reset_password_form.validate_on_submit():\n if check_password_hash(current_user.password, reset_password_form.old_password.data):\n new_hashed_password = generate_password_hash(reset_password_form.password.data)\n\n temp = current_user.get_id()\n (role, email) = temp.split(\":\")\n\n # if first element is `sysadmin` instead of a scheme_id\n # call function to reset `sysadmin` pass\n if role == \"sysadmin\":\n self._scheme_handler.update_hash_password(email, new_hashed_password)\n else:\n # regular user reset\n self._student_handler.update_hash_password(current_user.scheme_id, current_user.k_number, new_hashed_password)\n\n flash(\"Password successfully updated\")\n else:\n flash(\"Old password incorrect\")\n else:\n flash(\"Please double check your new password is valid.\")\n \n return render_template(\"user/reset_password.html\", reset_password_form=reset_password_form)", "def reset_token(token):\n if current_user.is_authenticated:\n return redirect(url_for('LoadDonor'))\n staff = Staff.verify_reset_token(token)\n if staff is None:\n flash('That is an invalid or expired token', 'warning')\n return redirect(url_for('reset_request'))\n form = ResetPasswordForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n staff.password = hashed_password\n db.session.commit()\n flash('Your password has been updated! You are now able to log in', 'success')\n return redirect(url_for('login'))\n return render_template('reset_token.html', title='Reset Password', form=form)", "def request_password_reset():", "def post(self):\n data = request.get_json()\n user = actions.get_user_by_email(data['email'])\n html = '<p>To reset your password </p>'\n subject = 'Request for changing password, ' + user['username']\n actions.send_email(data['email'], user['username'], user['password'], subject,\n '/reset_password/', html, False)\n pass", "def password_reset_confirm(request, uidb64, token):\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n try:\n reset_form = ResetPasswordForm(instance=user)\n # urlsafe_base64_decode() decodes to bytestring on Python 3\n except (TypeError, ValueError, OverflowError, user.DoesNotExist):\n user = None\n if user is not None and default_token_generator.check_token(user, token):\n validlink = True\n title = ('Enter new password')\n if request.method == 'POST':\n if 'password-submit' in (request.POST):\n reset_form = ResetPasswordForm(request.POST,instance=user)\n password = request.POST.get(\"password_reset\", None)\n \n if reset_form.is_valid():\n user=reset_form.save(commit = False)\n user.save()\n return redirect('password_reset_complete')\n else:\n reset_form = ResetPasswordForm(instance=user)\n else:\n validlink = False\n reset_form = ResetPasswordForm(instance=user)\n title = ('Password reset unsuccessful')\n return redirect ('invalid_password_link')\n context = {\n 'reset_form': ResetPasswordForm,\n 'title': title,\n 'validlink': validlink,\n }\n return render(request, 'reset_confirm.html', context, {'reset_form': ResetPasswordForm})", "def reset_password(token):\n if current_user.is_authenticated:\n return redirect(url_for('main.index'))\n user = User.verify_reset_password_token(token)\n if not user:\n return redirect(url_for('main.index'))\n form = ResetPasswordForm()\n if form.validate_on_submit():\n user.set_password(form.password.data)\n user.email_confirmed = True\n db.session.commit()\n return render_template(\n 'successful_pass_reset.html', title=\"Password Reset\")\n return render_template('reset_password.html', title=\"Password Reset\",\n form=form), 417" ]
[ "0.7280214", "0.7197617", "0.7117962", "0.7043637", "0.704304", "0.6989297", "0.6984096", "0.69258046", "0.6875457", "0.6868493", "0.6868304", "0.6856506", "0.6787877", "0.6750242", "0.67212576", "0.6699849", "0.66945165", "0.66842115", "0.6680731", "0.66321975", "0.6630563", "0.66212755", "0.6620363", "0.66172636", "0.66118574", "0.6568382", "0.6567901", "0.6528503", "0.65186435", "0.6515892" ]
0.7324574
0
Returns pandas dataframe which has latest record for each manual id after merging all "sheet_name" in the previously indexed_files which are present in "indexed_files_dir"
def zeta0_creation(self, indexed_files_dir, merge_columns): indexed_files = [file for file in os.listdir(indexed_files_dir) if not file.startswith("~")] indexed_files_dict = {} indexed_files_dict.clear() dateList = [] del dateList[:] for file in indexed_files: dated = file.split('_')[-1].split('.')[0] dated = dated[4:] + dated[:4] dateList.append(dated) indexed_files_dict[dated] = file dataframes = {} for dated, file in indexed_files_dict.items(): file_name = indexed_files_dir + '\\' + file dataframes[dated] = pd.read_excel(file_name, sheet_name=0) dataframes[dated]['file_date'] = dated dataframes[dated]['mid'] = [int(elem.split('_')[-1]) for elem in dataframes[dated]['manual_id']] merged_df = pd.concat([dataframes[dated] for dated in dateList], ignore_index=True) merged_df = merged_df.sort_values('file_date', ascending=False) zeta0 = merged_df.drop_duplicates(subset='manual_id', keep='first') pd.set_option('mode.chained_assignment', None) for col in zeta0.columns: zeta0[col] = zeta0[col].astype('str') zeta0 = zeta0.apply(lambda x: x.str.strip() if x.dtype == "object" else x) zeta0 = zeta0.sort_values('mid', ascending=True) if "manual_id" not in merge_columns: merge_columns.append("manual_id") zeta0 = zeta0[merge_columns] # print(zeta0) return zeta0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_walkupseq_files(latest_tsca_id):\n paths = glob.glob('walkupseq_files/*sample_info*')\n\n dfs = []\n for f in paths:\n tmp = pd.read_table(f, encoding='latin1')\n dfs.append(tmp)\n\n df = pd.concat(dfs, axis=0)\n df.to_csv('walkupseq_files/walkupseq_all_combined_%s.txt'%latest_tsca_id, sep=\"\\t\", index=None)\n return df", "def master_idx_by_date(self, exptdate, timelapse=False):\n path = self.paths_dict[exptdate]\n datadir = os.path.join(os.path.dirname(path), 'data')\n os.path.exists(datadir)\n if not timelapse:\n fns = os.listdir(datadir)\n else:\n dirs = os.listdir(datadir)\n # Create a master idx dataframe based on the files found\n # in this experiments datadir\n strains = []\n filepaths = []\n\n for fn in fns:\n print(fn)\n if fn[-4:] == '.fcs':\n match = re.search(constants.patterns.strain_name, fn)\n if match:\n strains.append(match.group())\n filepath = os.path.join(datadir, fn)\n filepaths.append(filepath)\n\n df = pd.DataFrame({'strain': strains,\n 'filepath': filepaths})\n # Add clone indices to the dataframe\n for strain in df.strain.unique():\n\n n_clones = len(df[df.strain == strain])\n df.loc[df.strain == strain, 'clone'] = [int(idx) for idx in range(1, n_clones+1, 1)]\n\n # Lookup each strain in constants.strains_dir/Strains_Database.csv\n # and add information found in the database\n strains_df = pd.read_csv(os.path.join(constants.strains_dir, 'Strains_Database.csv'))\n\n for idx in df.index:\n strain_name = df.loc[idx, 'strain']\n if strain_name in strains_df.name.values:\n for col in strains_df.columns:\n df.loc[idx, col] = strains_df.loc[strains_df.name == strain_name, col].values[0]\n \n return df", "def get_data(self, df, latest_currency):\n file_paths = list(df[\"File\"])\n df = self.extract_df(file_paths[0])\n df = self.group_df(df)\n df = self.fill_league_currency(df, latest_currency)\n for file_path in file_paths[1:]:\n league = self.extract_df(file_path)\n league_grp = self.group_df(league)\n league_grp = self.fill_league_currency(league_grp, latest_currency)\n df = df.join(league_grp)\n df = df.reset_index(drop=True)\n return df", "def get_latest_league_data(self, df):\n max_date = pd.to_datetime(df[\"Date\"]).max()\n df = df[df[\"Date\"] == max_date]\n [latest_league_file_dir] = df[\"File\"].values\n df = self.extract_df(latest_league_file_dir)\n return df", "def get_dataframe(self):\n for i, study_id in enumerate(self.studies_to_combine):\n copy = repr(self.original_study_location).strip(\"'\")\n study_location = copy.replace(\"MTBLS1\", study_id)\n\n for maf in self.sort_mafs(study_location, study_id):\n maf_temp = None\n try:\n maf_temp = pandas.read_csv(os.path.join(study_location, maf), sep=\"\\t\", header=0, encoding='unicode_escape')\n except pandas.errors.EmptyDataError as e:\n logger.error(f'EmptyDataError Issue with opening maf file {maf}: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n except Exception as e:\n logger.error(f'Issue with opening maf file {maf}, cause of error unclear: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n\n cleanup_function = getattr(DataFrameUtils, f'{self.method}_maf_cleanup')\n maf_temp = cleanup_function(maf_temp, study_id, maf)\n maf_as_dict = totuples(df=maf_temp, text='dict')['dict']\n\n yield maf_as_dict", "def generate_postprocessed_files():\n get_excel_file = pd.ExcelFile('global_output.xlsx')\n get_sheet_names = get_excel_file.sheet_names\n\n writer = pd.ExcelWriter('master_ouput.xlsx', engine='xlsxwriter')\n for sheet in get_sheet_names:\n try:\n all_data = pd.DataFrame()\n sheetID = str(sheet)\n data = pd.read_excel('global_output.xlsx', sheet, dtype={'id': str})\n grouped_data = data.groupby(['Total Producers', 'Correct Producers Ratio', 'Collected Updates Ratio',\n 'Collected Votes Ratio', 'Collected Final Votes Ratio'], as_index=False)[\n 'Total Correct Ln(prod)',\n 'runs', 'Total Correct Ln(vote)',\n 'Runs With All Ln(prod)',\n 'Runs With All Ln(vote)',\n 'Runs With > 50% Correct', 'Runs With = Cn'].sum()\n\n grouped_data['num_correct_producers_Ln_prod'] = grouped_data['Total Correct Ln(prod)'] / grouped_data[\n 'runs']\n grouped_data['num_correct_producers_Ln_vote'] = grouped_data['Total Correct Ln(vote)'] / grouped_data[\n 'runs']\n grouped_data['percentage_for_50_%'] = (grouped_data['Runs With > 50% Correct'] / grouped_data['runs']) * 100\n grouped_data['Percentage Runs With = Cn'] = (grouped_data['Runs With = Cn'] / grouped_data['runs']) * 100\n\n all_data = all_data.append(grouped_data, ignore_index=True)\n\n all_data.to_excel(writer, sheet_name=sheet)\n except KeyError:\n continue\n writer.save()\n print(\"Merged File\")", "def auto_search_write(self, auto_search_result_df, out_csv):\n self.logger.info('Starting auto search and write')\n all_result_ids = auto_search_result_df['RESULT_ID'].unique()\n\n # validation of df structure\n required_col = ['RESULT_ID', 'SERIES_ID', 'RESULT_SERIES_SEQ_ID', 'QUERY_MOL_ID', 'RESULT_MOL_ID',\n 'RESULT_CONTEXT_ID', 'QUERY_FRAG_ID', 'QUERY_MOL_ID', 'QUERY_CONTEXT_ID', 'RESULT_FRAG_ID',\n 'QUERY_ORDER', 'RESULT_MOL_ACTIVITY']\n\n for col in required_col:\n if col not in auto_search_result_df.columns:\n raise Exception(\"Input data table does not have required columns: %s\" % col)\n\n # catch for empty table\n if auto_search_result_df.shape[0] == 0:\n print (\"No results found\")\n return False\n\n iteration = 1\n return_df = None\n\n for result_id in all_result_ids:\n\n self.logger.info(\"Result, series ID %s from table size %s: \" % (result_id, auto_search_result_df.shape[0]))\n\n sub_series_df = auto_search_result_df[auto_search_result_df['RESULT_ID'] == result_id]\n\n # get the original query mol_id_list in it's original query order\n # it can be mis-ordered due to strict_order=False param on the search method\n mol_id_list = list(zip(sub_series_df['QUERY_MOL_ID'].tolist(), sub_series_df['QUERY_ORDER'].tolist()))\n mol_id_list = sorted(mol_id_list, key=lambda xx: xx[1])\n mol_id_list = [x[0] for x in mol_id_list if x[1] > 0]\n\n self.logger.debug('Merging results to CSV frame for iteration %s and dataframe %s' %\n (iteration, str(sub_series_df.shape)))\n\n if iteration == 1:\n return_df = self.return_scored_series_dataframe(mol_id_list, sub_series_df, return_df, append=False)\n self.logger.debug('First iteration, sized at %s' % str(return_df.shape))\n iteration += 1\n else:\n # as above but append=True\n return_df = self.return_scored_series_dataframe(mol_id_list, sub_series_df, return_df, append=True)\n self.logger.debug('Merge operation, sized at %s' % str(return_df.shape))\n iteration += 1\n\n # return_df = self.enumerate_products(return_df, 'QUERY_MOL_CONTEXT', 'NEW_FRAG_R')\n\n return_df.to_csv(out_csv, index=False, float_format='%.3f') # , header=True)\n self.logger.info('Completed write of auto_search results')", "def history_clones(file, ht_df):\n if os.path.isfile(file):\n # if the file exists, we merge\n print(file + ' found, merging')\n df_file = pd.read_csv(file)\n\n ht_df['timestamp'] = pd.to_datetime(ht_df['timestamp']).dt.date\n\n df_file = pd.concat([df_file, ht_df])\n df_file['timestamp'] = df_file['timestamp'].astype(str)\n\n df_file.sort_values('timestamp', inplace=True)\n print(df_file.to_string())\n # we can't just drop the first instance: for the first day, we'll loose data.\n # so keep max value per date\n\n #df_file.drop_duplicates(subset=['timestamp'], keep='last', inplace=True)\n df_file = df_file.groupby('timestamp')[['uniques', 'count']].agg(['max']).reset_index()\n\n df_file.columns = df_file.columns.droplevel(level=1)\n #print(df_file.to_string())\n #print(df_file.columns)\n df_file.to_csv(file, index=False)\n\n else:\n # otherwise, just dump the df\n print('There is no file to merge, dumping df to ' + file)\n ht_df.to_csv(file, index=False)", "def merge_all_data(self):\n\n logging.info('***** Starting the merging process merge_all_data')\n\n \"\"\" All possible unique_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n date_times = np.array(date_times) \n\n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, = [] , [] , [] , [] , []\n best_ds_list = [] \n source_files = []\n station_configurations = []\n\n \"\"\" The items contained in the lists in the list below can be removed from the list when the record that was previously stored is removed. \"\"\"\n all_list = [all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, best_ds_list, source_files , station_configurations ] # holder of all the above lists\n all_list_name = ['all_combined_obs' , 'all_combined_head', 'all_combined_era5fb' , 'combined_indices' , 'combined_date_time' , 'best_ds_list', 'source_files' ] \n \n removed_record, kept_record = [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #dt_bestds_dic = {} # store the selected best dataset for each dt \n #date_times=date_times[0:30000]\n tot = len(date_times)\n tt=time.time()\n print('*** Merging ' , tot, ' records ***')\n \n early_datasets = True\n \n self.processed_dt = [] \n \n for dt, c in zip(date_times, range(tot) ): # loop over all the possible date_times \n\n if (c+1)%1000==0:\n print('Analize : ', str(c+1) , '/', str(tot) , ' ', dt , ' ',\n now(time.time()),'{:5.3f}'.format(time.time()-tt ))\n\n delete = self.delete_ds(dt) # check if there is a dataset to delete \n \n \"\"\" Finding if this record is the same as the previous one analyzed, according to the given time_shift \"\"\"\n if c == 0:\n is_same_record = False\n else:\n is_same_record = self.is_same_record( time_shift = self.hour_time_delta , dt = dt)\n \n \"\"\" Updating list of processed datetimes \"\"\"\n self.processed_dt.append(dt) # cannot put it before the check_timeshift or it will check itself \n\n \n cleaned_df_container = {} \n all_len = [] # will hold the length of all the obs_tabs \n \n for k in self.dataset_per_dt[dt].keys() : # checking the list of available datasets \n ''' {'era5_2': ['example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._1:82930.gz.nc', \n 'example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._82930.gz.nc']}\n ''' \n for F in self.dataset_per_dt[dt][k]: # checking the list of available files for the dataset\n \n if data[k][F][\"counter\"] %self.slice_size==0 or data[k][F][\"counter\"] == 0: # loading the data only at specific slices \n load = self.load_obstab_feedback_sliced(datetime=dt, dataset=k, file = F)\n \n data[k][F][\"counter\"] = data[k][F][\"counter\"] + 1 \n \n obs_tab, era5fb_tab = self.make_obstab_era5fb_dic(dataset = k , date_time = dt, File = F )\n\n if len(obs_tab['date_time'][:])==0: # go to next file if obs_tab is empty \n #print('ZERO length')\n continue \n\n all_len.append( len(obs_tab['date_time'][:] ) )\n \n if k not in cleaned_df_container.keys():\n cleaned_df_container[k] = {}\n\n cleaned_df_container[k][F] = {}\n cleaned_df_container[k][F]['obs_tab'] = obs_tab # cleaned dataframe \n cleaned_df_container[k][F]['era5fb_tab'] = era5fb_tab # cleaned dataframe \n \n \"\"\" Merging the different records found in the sifferent sources \"\"\"\n if bool(all_len): # skipping empty container dictionary. At this point I certainyl have one valid record \n best_ds, combined_obs_tab, combined_era5fb_tab, combined_head_tab, selected_file, best_file = self.combine_record(dt, container = cleaned_df_container)\n \n if is_same_record: # decide what to keep in case of same record\n temporary_previous = all_combined_obs[-1] # keep the temporary previous record \n\n if best_ds in ['era5_1','era5_2']: # best_ds from era5\n if best_ds_list[-1] not in ['era5_1','era5_2']: # remove previous non era5_1 or era5_2 record \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n\n elif best_ds_list[-1] in ['era5_1','era5_2']:\n if len(combined_obs_tab) <= len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab)\n continue # nothing to do, will keep the previous records -> go to next dt \n \n else: # case where both the current and previous are from era5_1 and era5_2, but the previous has smaller number of data \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # best_ds not from era5\n if best_ds_list[-1] in ['era5_1','era5_2']:\n #print('This best ds is ' , best_ds , ' but I will keep ' , best_ds_list[-1] )\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else:\n if len(combined_obs_tab) < len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue # nothing to do, will keep the previous records -> go to next dt \n \n elif len(combined_obs_tab) > len(all_combined_obs[-1] ): # remove previous, keep current \n for lista in all_list:\n lista.pop() \n #kept_record.append(combined_obs_tab) \n #removed_record.append(temporary_previous)\n \n elif len(combined_obs_tab) == len(all_combined_obs[-1] ): # prefer igra2, otherwise\n if best_ds == 'igra2':\n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # case where data source is not important, I keep the previous and do nothing \n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else: # not the same record, nothing special to do, keep both previous and current \n pass \n else:\n print(' Found an empty record / time shifted record ')\n continue\n \n\n \"\"\" Fill the best_ds list \"\"\"\n best_ds_list.append(best_ds)\n\n \"\"\" Storing the selected file for the source_configuration \"\"\"\n source_files.append(selected_file)\n \"\"\" Selecting the station_configuration \"\"\"\n station_configurations.append(self.data[best_ds][best_file]['station_configuration'] )\n \n \"\"\" Storing the combined era5fb, header and observations tables\"\"\"\n all_combined_era5fb.append(combined_era5fb_tab)\n all_combined_obs .append(combined_obs_tab)\n \n primary, name = self.data[best_ds][best_file]['station_configuration']['primary_id'][0] , self.data[best_ds][best_file]['station_configuration']['station_name'][0] \n #combined_head_tab['primary_station_id'] = [ primary ] * len( combined_head_tab ) \n #combined_head_tab['station_name'] = [ name ] * len( combined_head_tab ) \n \n combined_head_tab['primary_station_id'] = np.array( [primary] )\n combined_head_tab['station_name'] = np.array( [name] )\n \n all_combined_head .append(combined_head_tab)\n\n \"\"\" Dictionary to fill the best_ds for duplicates \"\"\"\n #dt_bestds_dic[dt] = {}\n #dt_bestds_dic[dt]['best_ds'] = best_ds\n #dt_bestds_dic[dt]['len'] = len(combined_obs_tab['date_time'])\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n combined_indices.append(len(combined_obs_tab['date_time'])) \n combined_date_time.append(dt)\n\n del cleaned_df_container \n \n \n \n #print(blue + 'Memory used after deleting the cleaned_df_container: ', process.memory_info().rss/1000000000 , cend)\n\n \"\"\" Removing remaining loaded df \"\"\"\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n try:\n del data[k][F]['era5fb_tab']\n print('=== removed era5fb ' , k , F )\n except:\n pass\n try:\n del data[k][F]['observations_table']\n print('=== removed obstab ' , k , F ) \n except:\n pass\n \n \n \"\"\" Saving a numpy dictionary \"\"\"\n print(\" === Saving the numpy dictionary of removed and kept records +++ \")\n #dic_records = { 'kept' : kept_record , 'removed': removed_record }\n #np.save(self.station + '_time_shift_removed_kept.npy',dic_records )\n \n \n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n combined_date_time = np.array(combined_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : combined_date_time.shape } , combined_date_time )\n di['recordtimestamp'].attrs['units']='seconds since 1900-01-01 00:00:00'\n\n \"\"\" Creating the merged indices mi \"\"\"\n mi = [] \n mi.append(0)\n for i in range(len(combined_indices)):\n mi.append( combined_indices[i] + mi[-1] )\n mi.pop()\n pop = np.array(mi) # removing last unecessary index \n di['recordindex'] = ( {'recordindex' : pop.shape } , pop )\n\n\n \"\"\" Creating the combined data \"\"\"\n logging.debug('*** Concatenating the observations_table ' ) \n combined_obs = {}\n #### Writing combined observations_table dic\n logging.info(' ***** Writing the observations_table to the netCDF output ***** ' ) \n for k in all_combined_obs[0].keys(): \n a = np.concatenate([all_combined_obs[i][k][:] for i in range(len(all_combined_obs))])\n if k == 'date_time':\n combined_obs[k]= a \n self.tot_records = len(combined_obs[k])\n self.write_merged(content = 'observations_table', table= {k:a})\n #logging.info('*** Written observations table %s: ', k)\n\n\n #self.tot_records = len(combined_obs['date_time'])\n del all_combined_obs\n print(blue + 'Memory used after deleting all_combined_obs dic: ', process.memory_info().rss/1000000000 , cend )\n \n dateindex = combined_obs['date_time']//86400 \n date_times, indices, counts = np.unique(dateindex, return_counts = True, return_index= True) \n di['dateindex'] = ( {'dateindex' : indices.shape } , indices ) # considers the day only \n del combined_obs\n \n combined_era5fb = {}\n #### Writing combined era5fb_table dic \n for k in all_combined_era5fb[0].keys():\n try:\n #combined_era5fb[k]=np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n #self.write_merged(content = 'era5fb', table= {k:combined_era5fb[k]})\n \"\"\" try replacing , remove combined_era5fb = {} \"\"\"\n a = np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n self.write_merged(content = 'era5fb', table= {k:a})\n logging.debug('*** Written era5fb %s: ', k)\n except:\n print(\"FAILED feedback variable \" , k)\n\n del all_combined_era5fb\n print(blue + 'Memory used after deleting era5fb_tab dic: ', process.memory_info().rss/1000000000 , cend)\n\n\n #### Writing combined header_table dic \n for k in all_combined_head[0].keys():\n print('head variable is', k )\n if ( k == 'comments' or k == 'history'):\n continue\n try:\n tab=np.concatenate([all_combined_head[i][k][:] for i in range(len(all_combined_head))])\n self.write_merged(content = 'header_table', table= {k: tab}) # { key: np.array([])}\n logging.info('*** Written header table %s: ', k)\n except:\n print('FFF FAILED variable in header table', k )\n\n del all_combined_head\n print(blue + 'Memory used after deleting all_merged head_tab dic: ', process.memory_info().rss/1000000000 , cend)\n \n self.write_merged(content = 'recordindex', table = di) \n self.write_merged(content = 'cdm_tables', table= '')\n\n\n source_conf=xr.Dataset()\n source_files = np.array(source_files).astype(dtype='|S70')\n source_conf['source_file'] = ( {'source_file' : source_files.shape } , source_files )\n self.write_merged(content = 'source_configuration', table= source_conf )\n\n print(0)\n\n\n \"\"\" Concatenation of station_configurations \"\"\"\n station_conf = pd.concat( station_configurations ) \n for k in station_conf.columns:\n try:\n a =np.array( station_conf[k])\n self.write_merged(content = 'station_configuration', table= {k:a})\n logging.debug('*** Written station_configuration %s: ', k)\n except:\n print(\" Failed station_configuration \" , k )\n \n return 0", "def get_newest_df(watchfolder, optional_column_names=[], existing_df=None):\n from measurement_directory import run_ids_from_txt, run_ids_from_filenames\n import os\n bc = load_breadboard_client()\n run_ids = []\n files = [filename for filename in os.listdir(watchfolder)]\n files_spe = []\n for file in files:\n if '.spe' in file:\n files_spe.append(file)\n elif 'run_ids.txt' in file:\n run_ids += run_ids_from_txt(\n os.path.abspath(os.path.join(watchfolder, file)))\n if existing_df is None:\n run_ids += run_ids_from_filenames(files_spe)\n df = bc.get_runs_df_from_ids(\n run_ids, optional_column_names=optional_column_names)\n else:\n run_ids = list(set(run_ids_from_filenames(files_spe)).union(set(run_ids)).difference(\n set(list(existing_df['run_id']))))\n if len(run_ids) > 0:\n df = existing_df.append(bc.get_runs_df_from_ids(run_ids,\n optional_column_names=optional_column_names),\n sort=False,\n ignore_index=True)\n else:\n df = existing_df\n\n def custom_sort(df):\n # takes in df and returns same df with user-interaction columns first\n #['run_id','badshot','manual_foo1','manual_foo2', 'listboundvar1', etc.]\n cols = list(df.columns)\n manual_cols = []\n for col in cols:\n if 'manual' in col:\n manual_cols += [col]\n manual_cols = sorted(manual_cols)\n user_interact_cols = ['run_id'] + ['badshot'] + manual_cols\n for col in user_interact_cols:\n cols.remove(col)\n return df[user_interact_cols + cols]\n\n df = custom_sort(df)\n df.sort_values(by='run_id', ascending=False, inplace=True)\n return df", "def combined_df(self) -> pd.DataFrame:\n return pd.concat([self.data, self.latest_data.reset_index()], ignore_index=True)", "def merge_all_data(self):\n \n logging.info('***** Starting the merging process ')\n\n \n \"\"\" All possible unqiue_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n \n date_times = np.array(date_times) \n \n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_merged_obs , all_merged_head, all_merged_fb , merged_indices , merged_date_time, mi= [] , [] , [] , [] , [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #for dt in date_times[3008:3100]: # loop over all the possible date_times \n \n tot = len(date_times)\n for dt, c in zip(date_times[3008:3100], range(tot) ): # loop over all the possible date_times \n #print('Analize : ', str(c) , '/', str(tot) , ' ', dt , ' ', now(time.time()) )\n \n logging.info('Analize : %s %s /', str(c) , str(tot) )\n \n cleaned_df_container = {} \n chunk = ''\n \n for k in self.dataset_per_dt[dt] : # checking the list of available datasets \n \n index, index_up = self.unique_dates[k]['indices'][dt]['low'] , self.unique_dates[k]['indices'][dt]['up'] # extracting the exact chunk of the dataframe where the data of this are stored \n \n chunk = self.data[k]['dataframe'].iloc[index:index_up]\n \n chunk['date_time'] = dt\n chunk = self.clean_dataframe(chunk) # cleaning from wrong or nan values \n \n if len(chunk)==0:\n continue\n \n cleaned_df_container[k] = {} \n cleaned_df_container[k]['df'] = chunk # cleaned dataframe \n\n \n if all(value == 0 for value in cleaned_df_container.values()):\n logging.debug('No data were found! ')\n continue\n \n merged_observations_table, best_ds, duplicates, header = self.merge_record(dt, container = cleaned_df_container)\n \n merged_observations_table['source_id'] = best_ds # adding extra columns i.e. chosen dataset, other dataset with data, number of pressure levels \n merged_observations_table['z_coordinate_type'] = 1 # only pressure inn [Pa] available at the moment. Check z_coordinate_type table for the correpsonding code \n \n \n \"\"\" Extracting the merged feedback, flagging the advanced_observations_feedback flag = 1\"\"\"\n feedback, merged_obs = self.get_reanalysis_feedback( dt, merged_observations_table , reanalysis='era5fb', best_ds= best_ds)\n all_merged_fb.append(feedback) \n all_merged_obs.append(merged_obs)\n \n \"\"\" Setting the correct report_id in the header table \"\"\"\n merged_report_id = merged_obs['report_id'].values[0] # same report_id as calculated in the observation_table \n header['report_id'] = merged_report_id \n all_merged_head.append(header)\n \n #if len(merged_observations_table) != len(header): \n #print('lengths check best ds: ', best_ds , ' obs_merged: ' , len(merged_observations_table), ' feedback:' , len(feedback) , ' header: ' , len(header) )\n #print( len(merged_observations_table), ' ' , len(feedback) )\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n merged_indices.append(len(merged_observations_table)) \n merged_date_time.append(dt)\n\n\n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n merged_date_time = np.array(merged_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : merged_date_time.shape } , merged_date_time )\n \n \n \"\"\" Creating the merged indices \"\"\"\n mi.append(0)\n for i,ind in zip(merged_indices[0:], range(len(merged_indices[0:]) ) ) :\n mi.append(mi[ind] + i )\n mi = np.array(mi) \n di['recordindex'] = ( {'recordindex' : mi.shape } , mi )\n self.MergedRecordIndex = di \n \n \n \"\"\" Creating the merged dataframes \"\"\"\n logging.debug('*** Concatenating the observations_table dataframes' ) \n merged_obs = pd.concat (all_merged_obs)\n \n self.MergedObs = merged_obs \n logging.debug('*** Finished concatenating theobservations_table dataframes' ) \n \n logging.debug('*** Concatenating the header_table dataframes' ) \n merged_hd = pd.concat (all_merged_head)\n self.MergedHead = merged_hd \n logging.debug('*** Finished concatenating the header_table dataframes' ) \n \n logging.debug('*** Concatenating the feedback dataframes' ) \n merged_fb = pd.concat (all_merged_fb)\n self.MergedFeedback = merged_fb \n logging.debug('*** Finished concatenating the feedback dataframes' ) \n\n return 0", "def main():\n data_dir = \".\\\\excel\\\\data\\\\\"\n archive_dir = \".\\\\excel\\\\archive\\\\\"\n xl_list = glob.glob(data_dir + \"*.xlsx\")\n\n try:\n for xl_file in xl_list:\n workbook = pd.ExcelFile(xl_file)\n\n if fnmatch.fnmatch(xl_file.lower(), \"*base*.xlsx\") == True:\n print(f\"Creating DataFrame for '{xl_file}'...\")\n \n df_base = workbook.parse(0, skiprows=1, header=None)\n df_base.columns = [\"dept\", \n \"category\", \n \"itemDesc\", \n \"itemCode\", \n \"itemSize\", \n \"pvtLblFlag\", \n \"buyerCode\", \n \"invUnitShipped\", \n \"invCaseShipped\", \n \"storeOrdProdQty\", \n \"shortedQty\", \n \"grossSvcLvl\", \n \"netSvcLvl\"]\n df_base[\"itemCode\"] = df_base[\"itemCode\"].map('{:0>6}'.format)\n df_base[\"buyerCode\"] = df_base[\"buyerCode\"] * 10\n df_base[\"itemDesc\"] = df_base[\"itemDesc\"] + \" \" + df_base[\"itemSize\"]\n \n print(f\"'{xl_file}' Successfully processed\\n\") \n elif fnmatch.fnmatch(xl_file.lower(), \"*short*.xlsx\") == True:\n print(f\"Creating DataFrame for '{xl_file}'...\")\n \n df_shorts = workbook.parse(0, skiprows=1, header=None)\n df_shorts.columns = [\"itemDesc\", \n \"itemCode\", \n \"yesterdayOOS\"]\n df_shorts[\"itemCode\"] = df_shorts[\"itemCode\"].map('{:0>6}'.format)\n df_shorts.drop(columns=[\"itemDesc\"], inplace=True)\n \n print(f\"'{xl_file}' Successfully processed\\n\") \n elif fnmatch.fnmatch(xl_file.lower(), \"*reason*.xlsx\") == True:\n print(f\"Creating DataFrame for '{xl_file}'...\")\n \n df_reason = workbook.parse(0, skiprows=2, header=None)\n df_reason.columns = [\"dept\", \n \"category\", \n \"itemDesc\", \n \"itemCode\", \n \"outOfStock\", \n \"manufacIssue\",\n \"disc\",\n \"other\",\n \"newItemIssue\"]\n df_reason[\"itemCode\"] = df_reason[\"itemCode\"].map('{:0>6}'.format)\n df_reason[\"max\"] = df_reason[[df_reason.columns[4], \n df_reason.columns[5], \n df_reason.columns[6], \n df_reason.columns[7], \n df_reason.columns[8]]].max(axis=1)\n df_reason.loc[df_reason[\"max\"] == df_reason[\"outOfStock\"], \"primaryReason\"] = \"Out Of Stock\"\n df_reason.loc[df_reason[\"max\"] == df_reason[\"manufacIssue\"], \"primaryReason\"] = \"Manufacturer Issue\"\n df_reason.loc[df_reason[\"max\"] == df_reason[\"disc\"], \"primaryReason\"] = \"Discontinued\"\n df_reason.loc[df_reason[\"max\"] == df_reason[\"other\"], \"primaryReason\"] = \"Other\"\n df_reason.loc[df_reason[\"max\"] == df_reason[\"newItemIssue\"], \"primaryReason\"] = \"New Item Issue\"\n df_reason.sort_values(by=[\"max\"], ascending=False, inplace=True)\n df_reason.drop(columns=[\"dept\", \n \"category\", \n \"itemDesc\", \n \"outOfStock\", \n \"manufacIssue\", \n \"disc\", \n \"other\", \n \"newItemIssue\", \n \"max\"], inplace=True)\n \n print(f\"'{xl_file}' Successfully processed\\n\") \n elif fnmatch.fnmatch(xl_file.lower(), \"*export*.xlsx\") == True:\n print(f\"Creating DataFrame for '{xl_file}'...\")\n \n to_drop = [\"14:HATFIELD NORTH\", \"1:BRATTLEBORO\"]\n \n df_cs = workbook.parse(0, skiprows=3, skipfooter=20, header=None)\n df_cs = df_cs[~df_cs[7].isin(to_drop)]\n df_cs = df_cs.filter([0, 14, 15, 17, 34])\n df_cs.columns = [\"custCode\", \n \"poDueDate\", \n \"poApptDate\", \n \"inStock\", \n \"daysOOS\"]\n df_cs[\"itemCode\"] = df_cs[\"custCode\"].astype(str).str[9:15]\n df_cs.drop(columns=[\"custCode\"], inplace=True)\n df_cs.drop_duplicates(inplace=True)\n\n print(f\"'{xl_file}' Successfully processed\\n\")\n\n for data_file in os.listdir(data_dir):\n if fnmatch.fnmatch(data_file, \"*.xlsx\") == True:\n print(f\"Deleting '{data_file}'...\\n\")\n os.remove(data_dir + data_file)\n\n df_join_1 = df_base.merge(df_reason, how=\"left\", on=\"itemCode\")\n df_join_2 = df_join_1.merge(df_shorts, how=\"left\", on=\"itemCode\")\n df_join_3 = df_join_2.merge(df_cs, how=\"left\", on=\"itemCode\")\n \n print(\"Exporting to Excel...\\n\")\n df_join_3.to_excel(f\".\\\\excel\\\\archive\\\\oos-data-{timestamp()}.xlsx\", index=False)\n\n sys.exit(0)\n except:\n try:\n df_join_1 = df_base.merge(df_reason, how=\"left\", on=\"itemCode\")\n df_join_2 = df_join_1.merge(df_shorts, how=\"left\", on=\"itemCode\")\n\n df_join_2[\"poDueDate\"] = \"NO CS DATA\"\n df_join_2[\"poApptDate\"] = \"NO CS DATA\"\n df_join_2[\"inStock\"] = \"NO CS DATA\"\n df_join_2[\"daysOOS\"] = \"NO CS DATA\"\n \n print(\"Exporting to Excel...\\n\")\n df_join_2.to_excel(f\".\\\\excel\\\\archive\\\\oos-data-{timestamp()}.xlsx\", index=False)\n except:\n if not os.path.exists(archive_dir):\n os.makedirs(archive_dir)\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\n sys.exit(1)", "def aggregate_results(output_files, agg_filename):\n\n print(file_marker + \"STARTING AGGREGATION\")\n feather_files = output_files\n\n results = []\n for i in range(len(feather_files)):\n print(file_marker + str(i))\n x = pd.read_feather(feather_files[i])\n results.append(x)\n \n overall_results = pd.concat(results, ignore_index=True, sort=False)\n opt_diff_results = overall_results\n\n opt_diff_results.reset_index(inplace=True, drop=True) \n # drop=True: column 'index' gets removed\n\n opt_diff_results.to_feather(agg_filename)\n print(file_marker + \"Aggregated results saved to: \" + agg_filename)", "def get_outdoor_data(temp_dir,site):\n if site == 'berk':\n files_od = glob(join(temp_dir,'outdoor','20*.xlsx'))\n elif site == 'bus':\n files_od = glob(join(temp_dir,'outdoor','Busara*.csv'))\n else:\n raise NameError(site)\n\n dfs = []\n for f in files_od:\n if site == 'berk':\n this_df = pd.read_excel(f,sheet_name=0,usecols='B:D',index_col=0,parse_dates=True, header=1)\n elif site == 'bus':\n this_df = pd.read_csv(f,usecols=[0,1,2],index_col=0,parse_dates=True,header=2)\n \n # drop missing values that prevented conversion to float type\n if this_df.iloc[:,0].dtype != np.float64:\n this_df = this_df[this_df.iloc[:,0] != ' ']\n this_df = this_df.astype(np.float64)\n\n # correct for weird timezones in berkeley datalogger\n this_df = correct_tz(this_df,site)\n \n this_df.columns = ['T','RH']\n this_df.index.name = 'time'\n\n # convert to celsius\n this_df['T'] = (this_df['T'] - 32) * 5/9\n dfs.append(this_df)\n \n df_od = pd.concat(dfs)\n\n # drop duplicated measurements\n df_od = df_od[~df_od.index.duplicated(keep='last')].sort_index()\n \n # separate out into daily min,mean,max\n groups = df_od.groupby(df_od.index.date)\n dfs_od = {'all':df_od,\n 'min': groups.min(),\n 'mean': groups.mean(),\n 'max': groups.max()}\n \n for i in ['min','mean','max']:\n # remove first and last day to ignore days where we did not get full recording\n dfs_od[i] = dfs_od[i].iloc[1:-1,:]\n \n # name index so that we can merge onto multiIndex'd dataframe\n dfs_od[i].index.name = 'date'\n \n return dfs_od", "def combine_excel_files(end_producer, step_producer, spec):\n glob.glob(\"excel/*.xlsx\")\n timestr = get_time()\n start_producer = spec['num_of_producers']\n try:\n if not os.listdir('merged-excel-docs'):\n print('Folder empty no need to remove files')\n os.mkdir('merged-excel-docs')\n except FileNotFoundError:\n os.mkdir('merged-excel-docs')\n\n writer = pd.ExcelWriter('merged-excel-docs/combined-result' + timestr + '.xlsx', engine='xlsxwriter')\n for ind_p in range(start_producer, end_producer, step_producer):\n all_data = pd.DataFrame()\n sheetID = str(ind_p)\n for f in glob.glob(\"excel/*.xlsx\"):\n df = pd.read_excel(f, \"P_\" + sheetID)\n all_data = all_data.append(df, ignore_index=True)\n all_data.to_excel(writer, sheet_name=\"P_\" + sheetID)\n writer.save()", "def extract_next_day_items(filename, ids_df, date_fields=[]):\n # An empty data frame to return\n new_items_df = pd.DataFrame()\n\n next_df = pd.DataFrame()\n try:\n if date_fields:\n next_df = pd.read_csv(filename, parse_dates=date_fields,\n converters={'FLIGHT_ID': lambda x: UUID(x)},\n memory_map=True)\n else:\n next_df = pd.read_csv(filename,\n converters={'FLIGHT_ID': lambda x: UUID(x)},\n memory_map=True)\n log.info('%s read ok', filename)\n except EnvironmentError:\n log.error('could not read file: %s', filename)\n return new_items_df # return empty DataFrame\n\n # Create a new dataframe WITHOUT any items that are in ids_df\n new_next_df = next_df[(~next_df['FLIGHT_ID'].isin(ids_df.index))]\n\n # Output the new next items\n new_next_filename = 'new_' + filename\n try:\n is_bz2 = has_bz2_extension(filename)\n if is_bz2:\n new_next_filename = new_next_filename[:-BZ2_LENGTH]\n\n new_next_df.to_csv(new_next_filename, index=False,\n date_format=ISO8601_DATETIME_FORMAT)\n log.info('written file: %s', new_next_filename)\n except EnvironmentError:\n log.error('could not write file: %s', new_next_filename)\n return new_items_df # return empty DataFrame\n\n # get the new items from the next DataFrame\n new_items_df = pd.merge(ids_df, next_df, left_index=True, right_on='FLIGHT_ID')\n replace_old_flight_ids(new_items_df)\n\n return new_items_df # return new items", "def looper(path2mdbs, tablename, csv=False):\n containing_folder = path2mdbs\n contained_files = os.listdir(containing_folder)\n df_dictionary={}\n\n count = 1\n basestring = 'file_'\n for i in contained_files:\n if os.path.splitext(os.path.join(containing_folder,i))[1]=='.mdb' or os.path.splitext(os.path.join(containing_folder,i))[1]=='.accdb':\n countup = basestring+str(count)\n # df creation/manipulation starts here\n print(i)\n df = main_translate(tablename,os.path.join(containing_folder,i))\n if df is not None:\n if 'DateLoadedInDB' in df.columns:\n df['DateLoadedInDB']=df['DateLoadedInDB'].astype('datetime64')\n df['DateLoadedInDB'] = datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n else:\n df['DateLoadedInDB'] = datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n\n df['DBKey'] = os.path.split(os.path.splitext(i)[0])[1].replace(\" \",\"\")\n # df add to dictionary list\n df_dictionary[countup] = df.copy()\n else:\n pass\n count+=1\n final_df = pd.concat([j for i,j in df_dictionary.items()], ignore_index=True).drop_duplicates()\n\n return final_df if csv==False else final_df.to_csv(os.path.join(containing_folder,tablename+'.csv'))", "def merge_physdfs2(files):\n\n temp_df = pd.read_csv(files[0], index_col=False)\n columns = temp_df.columns.tolist()\n merged_df = pd.DataFrame([], columns=columns)\n\n for file in files:\n df = pd.read_csv(file, index_col=False)\n\n # add 'rat_data' column to the merged df\n root_name = file.split('/')[-1]\n df = df.assign(raw_data=root_name)\n\n # add 'exp_label' column to the merged df\n cell_num = ''.join(re.findall(\"cell\\d{2}\", file))\n exp = file.split('_')[1]\n exp = ''.join(re.findall(\"[a-zA-Z]+\", exp))\n\n df = df.assign(exp_label=exp)\n df = df.assign(cell_num=cell_num)\n\n merged_df = pd.concat([merged_df, df], sort=True, ignore_index=True)\n\n return merged_df", "def merge_dfs(userdf, filtered_apidf):\n userdf['SOURCE']='USER'\n filtered_apidf['SOURCE']='API'\n filtered_apidf.rename(columns={'_id': 'bids_name'}, inplace=True)\n\n merged_df = pd.concat([userdf,filtered_apidf], sort=True).fillna(0)\n # merged_df['_INDEX']=merged_df.index\n\n # merged_df_with_index = pd.DataFrame(index = merged_df.index, data= merged_df)\n return merged_df", "def exptdf(self, exptdate, **kwargs):\n if 'master_index_df' in kwargs:\n master_idx = kwargs['master_index_df']\n else:\n master_idx = self.master_idx_by_date(exptdate) \n\n sampledfs = []\n # Read in data and add identifying information\n # based on master index\n print(f'Found master index with {len(master_idx)}')\n for idx in master_idx.index:\n row = master_idx.loc[idx, :]\n print(f'Looking for data at {row.filepath}')\n\n if os.path.exists(row.filepath):\n print(f'Found data')\n sampledf = FCMeasurement(ID=f'{row.strain}-{row.clone}', datafile=row.filepath).data\n print(f'Found {len(sampledf)} measurements in this file')\n # Annotate sample df\n for col in row.index:\n sampledf.loc[:, col] = row.loc[col]\n sampledfs.append(sampledf)\n else:\n print(f'No data found')\n\n if len(sampledfs) > 0:\n exptdf = pd.concat(sampledfs, ignore_index=True)\n else:\n exptdf = None\n print(f'No data found for exptdate {exptdate}')\n\n return exptdf", "def _make_current_jfiles(self):\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND segment_value_id = %s\n ORDER BY measurement_time DESC\n LIMIT 0, 1\"\"\",(self._id, self._segment_value_id))\n if res:\n last_data_set_instance = self._db.record[0]\n last_data_set_instance_id = last_data_set_instance['report_data_set_instance_id']\n \n self._jfile.make_current_data_set(last_data_set_instance_id)\n self._jfile.make_current_saved_data_set(last_data_set_instance_id)\n\n for pivot in self._pivots:\n self._jfile.make_current_pivot_set(pivot['report_data_set_pivot_id'], last_data_set_instance_id)\n\n for chart in self._charts:\n self._jfile.make_current_chart_set(chart['report_data_set_chart_id'], last_data_set_instance_id)", "def reindex_hfd5(self):\n dfs = []\n objectpath = os.path.join(self.rootpath, self.OBJECTPATH)\n for root, dirs, files in os.walk(objectpath, topdown=False):\n for name in files:\n blob_uuid = name\n dfs.append(self.load_blob_metadata_value_df(blob_uuid))\n df = pd.concat(dfs)\n self.index.df = df\n self.index.to_hdf5(os.path.join(self.rootpath, self.INDEXFILENAME))\n return df", "def parse_directory_of_series_files(self):\n if self.series_base_dir is None or len(self.series_file_list) < 1:\n self.logger.warn('Fatal: Base Directory not set %s')\n raise Exception('Error Base Directory not set')\n\n self.logger.info('Parsing dir of files from %s' % self.series_base_dir)\n\n self.ref_series_df = pd.DataFrame([], columns=['SERIES_ID', 'SERIES_SEQ_ID', 'CONTEXT',\n 'FRAG', 'MOL_ID', 'ACTIVITY'])\n\n required_col = ['SERIES_ID', 'SERIES_SEQ_ID', 'CONTEXT', 'FRAG', 'MOL_ID', 'ACTIVITY']\n max_series_id = 0\n\n for series_file in self.series_file_list:\n\n # print series_file\n temp_df = pd.read_csv(series_file) # , index_col=False)\n # print temp_df.columns\n\n # sanity check the data table for the columns we need\n for col in required_col:\n if col not in temp_df.columns:\n raise Exception(\"Input CSV %s does not have required columns: %s\" % (series_file, col))\n\n # re-sequence the series ID's\n if max_series_id == 0:\n max_series_id = temp_df['SERIES_ID'].max()\n else:\n max_series_id = self.ref_series_df['SERIES_ID'].max()\n # print max_series_id\n\n temp_df['SERIES_ID'] = temp_df['SERIES_ID'] + max_series_id\n temp_df['SOURCE_FILE'] = os.path.basename(series_file)\n\n # py2>3 explicit sort=False added\n self.ref_series_df = self.ref_series_df.append(temp_df, sort=False)\n self.logger.info('Appended dataframe shape %s to master dataframe %s' %\n (str(temp_df.shape), str(self.ref_series_df.shape)))\n # print ('Appended dataframe shape %s to master dataframe %s' % (str(temp_df.shape),\n # str(self.ref_series_df.shape)))\n # print self.ref_series_df['SERIES_ID'].max()\n\n self.series_comparison_df = self.ref_series_df", "def merge_df_rows(dlist):\n\n # Create Dataframe from the dlist files\n dframe = concat(dlist, axis=0, join='outer', sort=False)\n\n # Sort the df based on the datetime index\n dframe.sort_values(by='Dates', inplace=True)\n\n # Setting Dates as the dataframe index\n dframe.set_index(['Dates'], drop=True, inplace=True)\n\n # Dropiing duplicated time points that may exist in the data\n dframe = dframe[~dframe.index.duplicated()]\n\n return dframe", "def append_score():\n score_frame = fu.read_file_to_df(working_file_url, u'企业评分')\n score_frame = score_frame.set_index(u'企业编号'.encode('utf-8'))\n\n for file_n in annual_report_indexes:\n print file_n\n\n data_frame = fu.read_file_to_df(corporation_index_file_url, file_n + '_index')\n data_frame = data_frame.set_index('Unnamed: 0')\n\n data_frame = data_frame.join(score_frame)\n\n fu.write_file(data_frame, corporation_index_file_url, file_n + '_index', index=True)\n return", "def merge(df_list):\n df_final = pd.read_csv(df_list[0])\n for ind, df in enumerate(df_list):\n if ind >= 1:\n temp_df = pd.read_csv(df_list[ind])\n temp_df = temp_df.drop(['lbl'], axis=1)\n df_final = pd.merge(df_final, temp_df, on=['author_id'])\n final_path = os.path.join(os.path.expanduser(\"~/Desktop/Age-Detection\"), \"merged-feature-collection.csv\")\n df_final.to_csv(final_path, sep=',', index=False)\n return final_path", "def refresh_final_acc_df(self, report_peak_acc=False):\n\n # build case -> group dict\n group_dict = dict()\n with open(os.path.join(self.data_dir, \"net_configs.json\"), \"r\") as json_file:\n net_configs = json.load(json_file)\n\n for g in net_configs.keys():\n cases = net_configs[g]\n case_names = cases.keys()\n \n for c in case_names:\n\n group_dict[c] = g\n\n # load current df if exists\n df_name = \"final_acc_df.csv\"\n # curr_df = pd.read_csv(os.path.join(self.df_sub_dir, df_name))\n # curr_df.drop(columns=\"Unnamed: 0\", inplace=True)\n\n acc_arr = []\n case_dict = dict()\n with open(os.path.join(self.df_sub_dir, \"case_dict.json\"), \"r\") as json_file:\n case_dict = json.load(json_file)\n\n # walk dir looking for saved net stats\n net_dir = os.path.join(self.data_dir, f\"nets\")\n for root, _, files in os.walk(net_dir):\n \n # only interested in locations files are saved\n if len(files) <= 0:\n continue\n \n slugs = root.split(\"/\")\n\n # exclude some dirs...\n if any(self.exclude_slug in slug for slug in slugs):\n continue\n\n # only latest results\n if not \"adam_lravg_nosplit\" in slugs:\n continue\n\n # consider all files...\n for filename in files:\n\n # ...as long as they are perf_stats\n if not \"perf_stats\" in filename:\n continue\n \n filepath = os.path.join(root, filename)\n stats_dict = np.load(filepath, allow_pickle=True).item()\n \n # extract data\n dataset = stats_dict.get(\"dataset\") if stats_dict.get(\"dataset\") is not None else \"imagenette2\"\n net_name = stats_dict.get(\"net_name\")\n train_scheme = stats_dict.get(\"train_scheme\") if stats_dict.get(\"train_scheme\") is not None else \"sgd\"\n initial_lr = stats_dict.get(\"initial_lr\") if stats_dict.get(\"initial_lr\") is not None else -1\n case = stats_dict.get(\"case\")\n sample = stats_dict.get(\"sample\")\n group = stats_dict.get(\"group\")\n if group is None:\n group = group_dict.get(case)\n modified_layers = stats_dict.get(\"modified_layers\")\n if modified_layers is not None:\n case_dict[case] = {\n \"act_fns\": modified_layers.get(\"act_fns\"),\n \"act_fn_params\": modified_layers.get(\"act_fn_params\")\n }\n\n # array containing acc/loss\n perf_stats = np.array([s for s in stats_dict.get(\"perf_stats\") if s is not None])\n if len(perf_stats) == 0:\n continue\n\n # find peak accuracy?\n try:\n\n if report_peak_acc:\n i_acc = np.argmax(perf_stats[:,0])\n else:\n i_acc = -1\n (val_acc, val_loss, train_acc, train_loss) = perf_stats[i_acc]\n\n # for learning speed\n pct_acc = (self.pct / 100.) * val_acc\n i_first = next(x for x, val in enumerate(perf_stats[:,0]) if val > pct_acc)\n \n test_acc = stats_dict.get(\"test_acc\")\n\n acc_arr.append([dataset, net_name, train_scheme, group, case, i_acc, sample, val_acc, test_acc, i_first, initial_lr])\n\n # by epoch\n n_epoch_samples = 31\n epochs = [10*i for i in range(n_epoch_samples)]\n epochs = epochs[:-1] + [int(x) for x in np.linspace(epochs[-1], len(perf_stats)-1, 5)]\n epochs = list(set(epochs))\n for epoch in epochs:\n \n try:\n (val_acc, val_loss, train_acc, train_loss) = perf_stats[epoch]\n acc_arr.append([dataset, net_name, train_scheme, group, case, epoch, sample, val_acc, None, None, initial_lr])\n except IndexError:\n break\n\n except ValueError:\n print(f\"Max entry in {case} {sample} perf_stats did not match expectations.\")\n continue\n\n # make dataframe\n acc_df = pd.DataFrame(acc_arr, columns=self.net_idx_cols+[\"val_acc\", \"test_acc\", \"epochs_past\", \"initial_lr\"])\n\n # process\n # 1. mark mixed nets\n acc_df[\"is_mixed\"] = [len(case_dict[c][\"act_fns\"]) > 1 if case_dict.get(c) is not None else False for c in acc_df[\"case\"]]\n acc_df[\"cross_fam\"] = [len(case_dict[c][\"act_fns\"]) == len(set(case_dict[c][\"act_fns\"])) if case_dict.get(c) is not None else False for c in acc_df[\"case\"]]\n\n # 2. add columns for predictions\n acc_df[\"max_pred_val_acc\"] = np.nan\n acc_df[\"linear_pred_val_acc\"] = np.nan\n acc_df[\"max_pred_val_acc_p_val\"] = np.nan\n acc_df[\"linear_pred_val_acc_p_val\"] = np.nan\n\n acc_df[\"max_pred_test_acc\"] = np.nan\n acc_df[\"linear_pred_test_acc\"] = np.nan\n acc_df[\"max_pred_test_acc_p_val\"] = np.nan\n acc_df[\"linear_pred_test_acc_p_val\"] = np.nan\n\n # index new and old without group\n # idx_no_group = list(self.net_idx_cols + [\"epoch\"])\n # idx_no_group.remove(\"group\")\n # curr_df.set_index(idx_no_group, inplace=True)\n # acc_df.set_index(idx_no_group, inplace=True)\n\n # merge new and old, preferring new\n # ndf = pd.concat([curr_df[~curr_df.index.isin(acc_df.index)], acc_df])\n\n # port over group from old df where appropriate\n # ndf[ndf.index.isin(curr_df.index)][\"group\"] = curr_df[\"group\"]\n\n # 2.9. index with group\n ndf = acc_df\n ndf.reset_index(drop=False, inplace=True)\n ndf.set_index(self.net_idx_cols, inplace=True)\n\n # 3. predictions for mixed cases\n mixed_df = ndf.query(\"is_mixed == True\")\n for epoch in mixed_df.index.unique(level=5):\n\n for midx in mixed_df.query(f\"epoch == {epoch}\").index.values:\n\n # break up multi-index\n d, n, sch, g, c, e, s = midx\n \n # skip if already predicted\n try:\n prediction = ndf.at[midx, \"max_pred_val_acc\"]\n if not math.isnan(prediction):\n continue\n except:\n print(f\"Prediction did not match expectations at: {midx} - {prediction}\")\n continue\n\n # get rows in this mixed case\n mixed_case_rows = ndf.loc[(d, n, sch, g, c, e)]\n \n # get component case rows\n component_cases = get_component_cases(case_dict, c)\n component_rows = ndf.query(f\"is_mixed == False\") \\\n .query(f\"dataset == '{d}'\") \\\n .query(f\"net_name == '{n}'\") \\\n .query(f\"train_scheme == '{sch}'\") \\\n .query(f\"case in {component_cases}\") \\\n .query(f\"epoch == {e}\")\n\n # flag to indicate whether row used in prediction yet\n component_rows[\"used\"] = False\n\n # make a prediction for each sample in this mixed case\n for i in range(len(mixed_case_rows)):\n mixed_case_row = mixed_case_rows.iloc[i]\n\n # choose component row accs/learning epochs\n c_accs = []\n c_accs_test = []\n # c_epochs = []\n for cc in component_cases:\n c_row = component_rows \\\n .query(f\"case == '{cc}'\") \\\n .query(f\"used == False\")\n \n if len(c_row) == 0:\n break\n c_row = c_row.sample()\n c_accs.append(c_row.val_acc.values[0])\n c_accs_test.append(c_row.test_acc.values[0])\n # c_epochs.append(c_row.epochs_past.values[0])\n\n # mark component row as used in prediction\n component_rows.at[c_row.index.values[0], \"used\"] = True\n\n if len(c_accs) == 0:\n break\n\n max_pred = np.max(c_accs)\n lin_pred = np.mean(c_accs)\n\n ndf.at[(d, n, sch, g, c, e, mixed_case_row.name), \"max_pred_val_acc\"] = max_pred\n ndf.at[(d, n, sch, g, c, e, mixed_case_row.name), \"linear_pred_val_acc\"] = lin_pred\n \n if len(c_accs_test) == 0:\n continue\n\n ndf.at[(d, n, sch, g, c, e, mixed_case_row.name), \"max_pred_test_acc\"] = np.max(c_accs_test)\n ndf.at[(d, n, sch, g, c, e, mixed_case_row.name), \"linear_pred_test_acc\"] = np.mean(c_accs_test)\n\n # significance\n upper_dists = [\"val_acc\", \"val_acc\", \"test_acc\", \"test_acc\"]\n lower_dists = [\"max_pred_val_acc\", \"linear_pred_val_acc\", \"max_pred_test_acc\", \"linear_pred_test_acc\"]\n cols = [\"max_pred_val_acc\", \"linear_pred_val_acc\", \"max_pred_test_acc\", \"linear_pred_test_acc\"]\n for upper, lower, col in zip(upper_dists, lower_dists, cols):\n\n t, p = ttest_ind(ndf.at[(d, n, sch, g, c, e), upper], ndf.at[(d, n, sch, g, c, e), lower])\n if t < 0:\n p = 1. - p / 2.\n else:\n p = p / 2.\n ndf.loc[(d, n, sch, g, c, e), f\"{col}_p_val\"] = p\n\n # save things\n self.save_df(df_name, ndf)\n\n # TODO: separate the refresh code for this from final_acc_df???\n self.save_json(\"case_dict.json\", case_dict)", "def read_combine_elia_activated_energy(path,status):\r\n #loop, read in and combine all data files into one \"combined_data\"\r\n i=0\r\n dfsprice = []\r\n dfsvol = []\r\n data_files_price = glob.glob(path + 'ActivatedEnergyPrices*')\r\n data_files_volume = glob.glob(path + 'ActivatedEnergyVolumes*')\r\n print(str(datetime.datetime.utcnow()) + \" amount of files to combine: \" + str(len(data_files_volume)+len(data_files_price)))\r\n \r\n for file1 in data_files_price:\r\n i=i+1\r\n print(str(datetime.datetime.utcnow()) + \" processing file number: \"+ str(i))\r\n df1 = read_elia_activated_energy_prices(file1,status)\r\n dfsprice.append(df1)\r\n combined_data_price = pd.concat(dfsprice, axis = 0)\r\n \r\n #remove \"NRV in MW\" column, because it is duplicate \r\n combined_data_price = combined_data_price.drop(combined_data_price.columns[7], axis=1)\r\n \r\n for file2 in data_files_volume:\r\n i=i+1\r\n print(str(datetime.datetime.utcnow()) + \" processing file number: \"+ str(i))\r\n df2 = read_elia_activated_energy_volumes(file2,status)\r\n dfsvol.append(df2)\r\n combined_data_vol = pd.concat(dfsvol, axis = 0)\r\n \r\n result = pd.concat([combined_data_price, combined_data_vol], axis=1)\r\n result.reset_index(inplace=True)\r\n result[\"Timestamp\"]=pd.to_datetime(result[\"Timestamp\"],format=(\"%d/%m/%Y %H:%M\"))\r\n result=result.set_index(\"Timestamp\")\r\n print(str(datetime.datetime.utcnow()) + \" finished\")\r\n return result", "def fetch(index, outfile):\n populate_index(index, outfile=outfile)" ]
[ "0.5289423", "0.52650183", "0.52591753", "0.52265924", "0.51847774", "0.5076782", "0.5037608", "0.50096446", "0.5009643", "0.5001173", "0.4953621", "0.494462", "0.493642", "0.49348438", "0.49298787", "0.48461", "0.48410356", "0.48382828", "0.4834661", "0.48010856", "0.47960046", "0.47942355", "0.47829294", "0.47728148", "0.47712734", "0.47591963", "0.47439104", "0.4741437", "0.47197947", "0.47165987" ]
0.63463265
0
Helper function for break/clear parsing may be overridden. lookupmodule() translates (possibly incomplete) file or module name into an absolute file name.
def lookupmodule(self, filename): if os.path.isabs(filename) and os.path.exists(filename): return filename f = os.path.join(sys.path[0], filename) if os.path.exists(f) and self.canonic(f) == self.mainpyfile: return f root, ext = os.path.splitext(filename) if ext == '': filename = filename + '.py' if os.path.isabs(filename): return filename for dirname in sys.path: while os.path.islink(dirname): dirname = os.readlink(dirname) fullname = os.path.join(dirname, filename) if os.path.exists(fullname): return fullname return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lookup_module(filename):\r\n\r\n # stolen from pdb\r\n import os\r\n import sys\r\n\r\n if os.path.isabs(filename) and os.path.exists(filename):\r\n return filename\r\n f = os.path.join(sys.path[0], filename)\r\n if os.path.exists(f): # and self.canonic(f) == self.mainpyfile:\r\n return f\r\n root, ext = os.path.splitext(filename)\r\n if ext == '':\r\n filename = filename + '.py'\r\n if os.path.isabs(filename):\r\n return filename\r\n for dirname in sys.path:\r\n while os.path.islink(dirname):\r\n dirname = os.readlink(dirname)\r\n fullname = os.path.join(dirname, filename)\r\n if os.path.exists(fullname):\r\n return fullname\r\n return None", "def lookup_module(filename):\r\n if filename is None:\r\n return None\r\n\r\n if os.path.isabs(filename) and os.path.exists(filename):\r\n return os.path.realpath(filename)\r\n _, ext = os.path.splitext(filename)\r\n filename_ = filename\r\n if ext == '':\r\n filename_ = filename + '.py'\r\n if os.path.isabs(filename_):\r\n return os.path.realpath(filename_)\r\n for dir_name in sys.path:\r\n if dir_name is not None:\r\n while os.path.islink(dir_name):\r\n dir_name = os.readlink(dir_name)\r\n fullname = os.path.join(dir_name, filename_)\r\n if os.path.exists(fullname):\r\n return fullname\r\n return filename_", "def findModule(name):", "def _parse_module_name(program_param):\n if program_param and program_param.endswith(\".py\"):\n return program_param[:-3]\n return program_param", "def _get_module(self, filename, base):\n if not filename or not filename.endswith('.py'):\n utils._log('Cannot get module for non python-source file: ', filename)\n return '' # only pytnon modules are supported\n base = base or os.path.join(\n self.window.extract_variables().get('project_path', ''),\n self.window.extract_variables().get('project_base_name', ''))\n utils._log('Getting module for file %s relative to base %s' % (filename, base))\n if not filename.startswith(base):\n utils._log('Cannot determine module path outside of directory')\n return ''\n return filename.replace(base, '').replace(os.path.sep, '.')[:-3].strip('.')", "def resolve_name(name):\n parts = name.split('.')\n cursor = len(parts)\n module_name, rest = parts[:cursor], parts[cursor:]\n\n while cursor > 0:\n try:\n ret = __import__('.'.join(module_name))\n break\n except ImportError:\n if cursor == 0:\n raise\n cursor -= 1\n module_name = parts[:cursor]\n rest = parts[cursor:]\n ret = ''\n\n for part in parts[1:]:\n try:\n ret = getattr(ret, part)\n except AttributeError:\n raise ImportError\n\n return ret", "def resolve_relative_name(package, module, relative):\n\n if relative.startswith('.'):\n \n # Add a dummy module onto the end if this is a package. It will be\n # pulled off in the loop below.\n if package == module:\n module += '.dummy'\n \n parts = module.split('.')\n while relative.startswith('.'):\n relative = relative[1:]\n parts.pop(-1)\n relative = '.'.join(parts) + ('.' if relative else '') + relative\n\n return relative", "def _get_module_name(filename: str) -> str:\n return \".\".join(_get_relative(filename).split(os.path.sep)[2:]).replace(\".pyi\", \"\").replace(\".__init__\", \"\")", "def resolve_address(self, offset):\n symbol = self.get_name(offset)\n module = self.get_segment_name(offset)\n\n if not module and \"_\" in symbol:\n # No module name for the segment, try to determine from the symbol name\n symbol_split = symbol.split(\"_\")\n\n # Given a symbol, i.e. ws2_32_WSAStartup, can we find ws2_32.dll in the list of segments?\n for segment in idautils.Segments():\n segment_name = idc.get_segm_name(segment).lower()\n\n if segment_name.startswith(symbol_split[0].lower()):\n new_name = \"\"\n for i in range(0, len(symbol_split)):\n new_name = \"{}.dll\".format(\"_\".join(names[0:i]))\n if new_name == segment_name:\n break\n\n if new_name == segment_name:\n module = new_name\n break\n\n # Still nothing?!\n if not module and \"_\" in symbol:\n symbol_split = symbol.split(\"_\")\n\n j = 1\n if symbol_split[0] == \"ws2\":\n j += 1\n module = \"{}.dll\".format(\"_\".join(symbol_split[0:j]))\n else:\n module = \"{}.dll\".format(symbol_split[0])\n\n # Strip module name from symbol name\n if module:\n module_name = module.split(\".\")[0].lower()\n\n if symbol[:len(module_name)].lower().startswith(module_name):\n symbol = symbol[len(module_name) + 1:]\n\n if not symbol:\n symbol = \"{:x}\".format(offset)\n\n self.ret = (module, symbol)\n return self.ret", "def _resolve_name(name, package, level):\r\n if not hasattr(package, 'rindex'):\r\n raise ValueError(\"'package' not set to a string\")\r\n dot = len(package)\r\n for x in xrange(level, 1, -1):\r\n try:\r\n dot = package.rindex('.', 0, dot)\r\n except ValueError:\r\n raise ValueError(\"attempted relative import beyond top-level \"\r\n \"package\")\r\n return \"%s.%s\" % (package[:dot], name)", "def _resolve_name(path, package, start):\n\n if not hasattr(package, 'rindex'):\n raise ValueError(\"'package' not set to a string\")\n dot = len(package)\n for _ in range(start, 1, -1):\n try:\n dot = package.rindex('.', 0, dot)\n except ValueError:\n raise ValueError(\"attempted relative import beyond top-level \"\n \"package\")\n return \"{}.{}\".format(package[:dot], path)", "def _resolve_name(name, package, level):\n if not hasattr(package, 'rindex'):\n raise ValueError(\"'package' not set to a string\")\n dot = len(package)\n for x in xrange(level, 1, -1):\n try:\n dot = package.rindex('.', 0, dot)\n except ValueError:\n raise ValueError(\"attempted relative import beyond top-level package\")\n return \"%s.%s\" % (package[:dot], name)", "def getMangledName(self, name, module=None):\n if module is os.path:\n return \"os.path\"\n if isinstance(name, str) and (name.startswith(self.start) or name == self.package):\n return self.prefix + name\n return name", "def resolve(fname):\n if os.path.dirname(__file__):\n return os.path.dirname(__file__) + \"/../common/\" + fname\n else:\n return \"/common/\" + fname", "def get_module_name(module_path):\n return ntpath.split(module_path)[1].split(\".\")[0]", "def get_module_name(module_path):\n return ntpath.split(module_path)[1].split(\".\")[0]", "def testFindAASpamAbs(self):\r\n self.buildTempDirs()\r\n expected = os.path.join(self.temp_fake_aa, 'spam')\r\n aaeggs = os.path.join(self.temp_fake_aa, 'eggs.py')\r\n self.assertEqual(expected, modulefinder.get_module_filename('_fake.a.aa.spam', aaeggs))", "def resolve_import(self, item):\n name = item.name\n # The last part in `from a.b.c import d` might be a symbol rather than a\n # module, so we try a.b.c and a.b.c.d as names.\n short_name = None\n if item.is_from and not item.is_star:\n if '.' in name.lstrip('.'):\n # The name is something like `a.b.c`, so strip off `.c`.\n rindex = name.rfind('.')\n else:\n # The name is something like `..c`, so strip off just `c`.\n rindex = name.rfind('.') + 1\n short_name = name[:rindex]\n\n if import_finder.is_builtin(name):\n filename = name + '.so'\n return Builtin(filename, name)\n\n filename, level = convert_to_path(name)\n if level:\n # This is a relative import; we need to resolve the filename\n # relative to the importing file path.\n filename = os.path.normpath(\n os.path.join(self.current_directory, filename))\n\n if not short_name:\n try_filename = True\n try_short_filename = False\n elif item.source:\n # If the import has a source path, we can use it to eliminate\n # filenames that don't match.\n source_filename, _ = os.path.splitext(item.source)\n dirname, basename = os.path.split(source_filename)\n if basename == \"__init__\":\n source_filename = dirname\n try_filename = source_filename.endswith(filename)\n try_short_filename = not try_filename\n else:\n try_filename = try_short_filename = True\n\n files = []\n if try_filename:\n files.append((name, filename))\n if try_short_filename:\n short_filename = os.path.dirname(filename)\n files.append((short_name, short_filename))\n\n for module_name, path in files:\n for fs in self.fs_path:\n f = self._find_file(fs, path)\n if not f or f == self.current_module.path:\n # We cannot import a file from itself.\n continue\n if item.is_relative():\n package_name = self.current_module.package_name\n if package_name is None:\n # Relative import in non-package\n raise ImportException(name)\n module_name = get_absolute_name(package_name, module_name)\n if isinstance(self.current_module, System):\n return System(f, module_name)\n return Local(f, module_name, fs)\n\n # If the module isn't found in the explicit pythonpath, see if python\n # itself resolved it.\n if item.source:\n prefix, ext = os.path.splitext(item.source)\n mod_name = name\n # We need to check for importing a symbol here too.\n if short_name:\n mod = prefix.replace(os.path.sep, '.')\n mod = utils.strip_suffix(mod, '.__init__')\n if not mod.endswith(name) and mod.endswith(short_name):\n mod_name = short_name\n\n if ext == '.pyc':\n pyfile = prefix + '.py'\n if os.path.exists(pyfile):\n return System(pyfile, mod_name)\n elif not ext:\n pyfile = os.path.join(prefix, \"__init__.py\")\n if os.path.exists(pyfile):\n return System(pyfile, mod_name)\n return System(item.source, mod_name)\n\n raise ImportException(name)", "def resolve_name(name):\n parts = name.split('.')\n used = parts.pop(0)\n found = __import__(used)\n for part in parts:\n used += '.' + part\n try:\n found = getattr(found, part)\n except AttributeError:\n __import__(used)\n found = getattr(found, part)\n return found", "def resolve_full_name(base, name, level):\n if level == 0:\n return name\n bits = base.rsplit(\".\", level - 1)\n base = bits[0]\n return f\"{base}.{name}\" if name else base", "def get_mod_name():\n return sys.argv[0].split(\"/\")[-1].split(\".py\")[0]", "def resolve_name(name, *additional_parts):\n additional_parts = \".\".join(additional_parts)\n\n if additional_parts:\n name = name + \".\" + additional_parts\n\n parts = name.split(\".\")\n\n if len(parts) == 1:\n # No dots in the name--just a straight up module import\n cursor = 1\n fromlist = []\n else:\n cursor = len(parts) - 1\n fromlist = [parts[-1]]\n\n module_name = parts[:cursor]\n\n while cursor > 0:\n try:\n ret = __import__(\".\".join(module_name), fromlist=fromlist)\n break\n except ImportError:\n if cursor == 0:\n raise\n cursor -= 1\n module_name = parts[:cursor]\n fromlist = [parts[cursor]]\n ret = \"\"\n\n for part in parts[cursor:]:\n try:\n ret = getattr(ret, part)\n except AttributeError:\n raise ImportError(name)\n\n return ret", "def __get_non_python_library_module_file(module_name, environment=sys.path):\n found = None\n\n # Use the longer paths first\n paths = reversed(sorted(environment))\n for path in paths:\n base_path = path.replace(\"\\\\\", \"/\")\n if stypy_parameters_copy.type_inference_file_directory_name in path:\n base_path = base_path.replace(\"/\" + stypy_parameters_copy.type_inference_file_directory_name, \"\")\n\n temp = base_path + \"/\" + module_name.replace('.', '/') + \".py\"\n if os.path.isfile(temp):\n found = temp\n # Module (__init__) names have precedence over file names\n temp = base_path + \"/\" + module_name.replace('.', '/') + \"/__init__.py\"\n if os.path.isfile(temp):\n found = temp\n break\n if found is None:\n pass\n\n return found", "def infer_module_name(filename, fspath):\n filename, _ = os.path.splitext(filename)\n for f in fspath:\n short_name = f.relative_path(filename)\n if short_name:\n # The module name for __init__.py files is the directory.\n if short_name.endswith(os.path.sep + \"__init__\"):\n short_name = short_name[:short_name.rfind(os.path.sep)]\n return short_name.replace(os.path.sep, '.')\n # We have not found filename relative to anywhere in pythonpath.\n return ''", "def _build_lookup(tree: dict, stdlib_lookup: bool = False) -> None:\n def _apply(item: dict, python_stdlib: set) -> None:\n if item[\"type\"] == \"module\" and item[\"imports\"]:\n package = item[\"fullname\"].partition(\".\")[0]\n for import_module in item[\"imports\"].values():\n import_module[\"lookup\"] = None\n name, level, relative = _get_name_level_relative_import_module(import_module)\n # So we first try to find a module with the expected name in the same directory\n # We look the parent item of the current module\n target = _look_in_package(tree, item[\"path\"], name, level=level)\n if target:\n import_module[\"lookup\"] = target\n else:\n # We now look if a package or module has the same name (within the same package)\n target = find_tree(\n tree,\n lambda x, n, p: (x[\"fullname\"] == n) and (x[\"fullname\"].partition(\".\")[0] == p),\n args=(name, package)\n )\n if target:\n import_module[\"lookup\"] = target[\"path\"]\n elif relative:\n # We haven't found so it might be a symbol imported by a package in __init__.py\n # We don't want to let an internal reference as not found\n import_module[\"lookup\"] = \"@internal\"\n elif name.partition(\".\")[0] == item[\"fullname\"].partition(\".\")[0]:\n # This is in case a module from within the same package has not been found\n # We don't want to let an internal reference as not found\n import_module[\"lookup\"] = \"@internal\"\n else:\n # In last resort, we look for the package in the standard library\n if name in python_stdlib:\n import_module[\"lookup\"] = \"@stdlib\"\n apply_tree(tree, _apply, args=(_build_python_stdlib(stdlib_lookup),))", "def gethandlername(URL):\n match = re.search(\"/([a-zA-Z0-9_-]+)\\.prog($|/|\\?)\", URL)\n if not match:\n # Couldn't find the requested module\n raise404(\"Couldn't find a module name in URL \" + URL)\n return match.group(1)", "def getmodulename(path):\r\n info = getmoduleinfo(path)\r\n if info: return info[0]", "def get_module_reference_name(a_module):\n return a_module.__name__.split('.')[-1]", "def import_module(self, location, name):", "def modulename():\n from inspect import getmodulename,getfile\n return getmodulename(getfile(lambda x:x))" ]
[ "0.6935571", "0.6808267", "0.62503785", "0.620861", "0.60548043", "0.5981422", "0.59169036", "0.58730257", "0.58359164", "0.5832401", "0.58213973", "0.5811231", "0.5761079", "0.5698767", "0.5639836", "0.5639836", "0.56236005", "0.55848193", "0.5536372", "0.5535091", "0.550816", "0.5484442", "0.54712915", "0.5442625", "0.54187393", "0.54092216", "0.5389702", "0.5383725", "0.5377162", "0.53701556" ]
0.7024272
0
Wait n seconds before returning ok
def timeout(n): time.sleep(int(n)) return 'ok', 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait(n=3):\n sleep(n)", "def waitUntilSuccess():", "def functionThatWillTimeOut():\n time.sleep(5)", "def wait_for(test, timeout_seconds=DEFAULT_TIMEOUT):\n start = time.time()\n while True:\n if test():\n return True\n if time.time() - start > timeout_seconds:\n return False\n time.sleep(0.5)", "def wait(delay=2):\n time.sleep(delay)", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait(wait_time=WAIT_TIME):\n # time.sleep(wait_time)\n pass", "def __wait(min_sec, max_sec):\n time.sleep(randint(min_sec, max_sec))", "def wait(self, timeoout=None, state=\"C-completed\"):", "def wait(self, seconds):\n time.sleep(seconds)", "def wait():\n time.sleep(1)", "def waitFor(self,duration=2):\n time.sleep(duration)\n print('Done waiting for ',duration)\n return", "def WaitForAction(self, action):\n start_time = time.time()\n while time.time() - start_time < 20:\n if action():\n return True\n time.sleep(1)\n\n return False", "def wait(self, ms=None):\r\n util.raiseNotDefined()", "def wait_for_completion(self, timeout=10):\n cur_status = self.runtime_status()\n while cur_status not in ['FAILED', 'KILLED', 'FINISHED']:\n time.sleep(0.2)\n timeout -= 0.2\n cur_status = self.runtime_status()\n if timeout < 0:\n break\n\n return timeout > 0", "def wait(t):\n message = \"WAIT:\" + str(t) + '\\n'\n sock.sendall(message)\n time.sleep(t)\n return", "def assert_timeout(self) -> None:", "def wait_to_succeed(name, namespace, timeout):\n return watch.wait_to_succeed(name=name, namespace=namespace,\n timeout=timeout, group=GROUP, plural=PLURAL,\n version=VERSION)", "def wait(self, timeout=600):\n s = datetime.datetime.now()\n status = json.loads(self.get())\n while status['status'] != 'COMPLETE':\n status = self.get()\n e = datetime.datetime.now()\n if (e - s).seconds > timeout:\n raise RuntimeError('timeout')\n return status", "def do_wait(self):\n pass", "def wait(self, timeout):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def wait_fluently(condition: Callable, timeout: TimeoutType, err_msg: str):\n if timeout is None:\n timeout = 0\n start_time = time.time()\n while True:\n res = condition()\n if res:\n return res\n if time.time() - start_time >= timeout:\n raise TimeoutException(err_msg)\n time.sleep(0.3)", "def sleep(self):\n for i in range(10):\n if cancelled: return False\n time.sleep(1)\n return True" ]
[ "0.71733505", "0.7147747", "0.68212134", "0.6718219", "0.6702025", "0.6661689", "0.6661689", "0.6661689", "0.6635981", "0.65762943", "0.65646064", "0.6553791", "0.65345186", "0.649197", "0.6484155", "0.64481616", "0.6447762", "0.6443559", "0.6374297", "0.63630664", "0.62902665", "0.6289753", "0.6286876", "0.62640446", "0.6259149", "0.6259149", "0.6259149", "0.6259149", "0.62353873", "0.62310284" ]
0.8150463
0
int ploidy return all possible genotypes, completely determined by ploidy
def all_genotype(ploidy): return ["".join(comb) for comb in cwr("ACGT-", ploidy)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def genotype(args) :\n from genotyper import genotype_samples\n genotype_samples(args)", "def collapse_genotypes(pL,gL):\n if len(gL) < 2:\n return gL\n else:\n uniqueL = [] # list of unique genotypes relative to ploidy\n for g in gL:\n s = ''\n for i in xrange(len(pL)):\n s += ''.join(sorted(g[0:pL[i]]))\n g = g[pL[i]:]\n if s not in uniqueL:\n uniqueL.append(s)\n return uniqueL", "def get_all_possible_genotypes(self):\n # Get all genotypes.\n return mutations_to_genotypes(self.mutations, wildtype=self.wildtype)", "def genotypes(self):\n return self.data.genotypes.values", "def generate_genotype(self):\n genes = []\n for i in range(self.n_genes):\n genes.append(self.Gene(n_bases=self.n_bases))\n self.genes = genes", "def _build_genotypes(self):\n x = np.zeros(self.n)\n \n # Frequencies derived from HWE.\n num_hetero = 2 * self.maf * (1 - self.maf) * self.n\n num_homo_minor = self.maf ** 2 * self.n\n \n x[:num_hetero] = 1\n x[num_hetero:num_hetero+num_homo_minor] = 2\n np.random.shuffle(x)\n \n # Add noise for dosage values if needed.\n if self.dosage_var:\n x[x == 0] += np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 0]))\n )\n x[x == 1] += np.random.normal(0, self.dosage_var, len(x[x == 1]))\n x[x == 2] -= np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 2]))\n )\n\n # Mask some values if the call rate is not 1.\n if self.call_rate < 1:\n missing_rate = 1 - self.call_rate\n missing_number = missing_rate * self.n\n missing_idx = np.arange(0, self.n)\n np.random.shuffle(missing_idx)\n missing_idx = missing_idx[:missing_number]\n x[missing_idx] = np.nan\n \n return x", "def __generate_genotype(self):\n if len(self.genotype) < self.__individual_genotype_length:\n gene = ''\n \n while len(self.genotype) < self.__individual_genotype_length:\n gene = str(random.randint(0,1))\n \n self.genotype = self.genotype + gene", "def phenotypes(self):\n return self.data.phenotypes.values", "def calculate_genotype_probabilities(self):\n for name, member in self.members.items():\n member.genotype_probabilities = self.genotype_probabilities_of(name)", "def get_my_mutations(quality_cutoff, coverage_cutoff):\n\n # my_mutations = {}\n # with open('/home/perry/Projects/loh/working/murim.exome.aa_chg.vars') as f:\n # for line in f:\n # my_mutations[line.strip()] = True\n # return my_mutations\n\n bed_file = 'data/nimblegen/2.1M_Human_Exome_Annotation/2.1M_Human_Exome.bed'\n bed_chr2st2end, bed_chr2posLs = bed_tools.load_bed(bed_file, \n 'NimbleGen Tiled Regions')\n # NimbleGen Tiled Regions\n # Target Regions\n\n use_data_dir = '/home/perry/Projects/loh/data/all_non_ref_hg18/'\n all_somatic = {}\n all_inherited = {}\n cancer_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanT.ann')\n normal_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanN.ann')\n for exome in global_settings.exome_types:\n data_file = use_data_dir + exome\n inherited, somatic, murim = mutations.get_mutations(data_file, normal_qualities,\n cancer_qualities, quality_cutoff,\n False, coverage_cutoff)\n # only use the bed_tools NimbleGen\n # restriction for hg18 data\n for s in somatic['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_somatic[s] = True\n for i in inherited['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_inherited[i] = True\n return (set(all_somatic.keys()) & set(get_murim_covered(quality_cutoff)), set(all_inherited.keys()) & set(get_murim_covered(quality_cutoff)))", "def _process_genotypes(self, limit):\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n line_counter = 0\n\n raw = '/'.join((self.rawdir, 'genotype'))\n logger.info(\"building labels for genotypes\")\n geno = Genotype(g)\n fly_tax = 'NCBITaxon:7227'\n with open(raw, 'r') as f:\n f.readline() # read the header row; skip\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n for line in filereader:\n line_counter += 1\n\n (genotype_num, uniquename, description, name) = line\n\n # if self.testMode is True:\n # if int(object_key) not in self.test_keys.get('genotype'):\n # continue\n\n # add the internal genotype to pub mapping\n genotype_id = 'MONARCH:FBgeno'+str(genotype_num)\n self.idhash['genotype'][genotype_num] = genotype_id\n\n if description == '':\n description = None\n\n if not self.testMode \\\n and limit is not None and line_counter > limit:\n pass\n else:\n if self.testMode and \\\n int(genotype_num) not in \\\n self.test_keys['genotype']:\n continue\n\n model.addIndividualToGraph(\n genotype_id, uniquename,\n Genotype.genoparts['intrinsic_genotype'],\n description)\n # we know all genotypes are in flies\n # FIXME we assume here they are in melanogaster,\n # but that isn't necessarily true!!!\n # TODO should the taxon be == genomic background?\n geno.addTaxon(fly_tax, genotype_id)\n genotype_iid = self._makeInternalIdentifier(\n 'genotype', genotype_num)\n model.addComment(\n genotype_id, genotype_iid)\n if name.strip() != '':\n model.addSynonym(genotype_id, name)\n\n return", "def get_gene_biotypes(db_path, table=Annotation):\n session = start_session(db_path)\n query = session.query(table.GeneBiotype).distinct()\n return {x[0] for x in query.all()}", "def simulate_genotype_calls(\n n_variant: int, n_sample: int, p: Tuple[float, float, float], seed: int = 0\n) -> DataArray:\n rs = np.random.RandomState(seed)\n # Draw genotype codes with provided distribution\n gt = np.stack(\n [\n rs.choice([0, 1, 2], size=n_sample, replace=True, p=p)\n for i in range(n_variant)\n ]\n )\n # Expand 3rd dimension with calls matching genotypes\n gt = np.stack([np.where(gt == 0, 0, 1), np.where(gt == 2, 1, 0)], axis=-1)\n return xr.DataArray(gt, dims=(\"variants\", \"samples\", \"ploidy\"))", "def get_missing_genotypes(self):\n return utils.get_missing_genotypes(\n self.genotypes,\n mutations=self.mutations\n )", "def possible_motifs_by_length(length, base_set=\"ACGU\"):\n args = [base_set for i in xrange(length)]\n for permutation in itertools.product(*args):\n yield \"\".join(permutation)", "def phenotypes(self):\n\t\treturn Phenotype.PhenotypesByPatient(self.id, self.host)", "def genes():\n return [\"b2935\", \"b0723\", \"b0451\"]", "def simple_genotype_matrix(n, p):\n genotypes = np.zeros(shape=(n, p))\n for item in range(0, p):\n genotypes[:, item] = np.random.binomial(1, np.random.uniform(0.1, 0.5, 1), n)\n\n return genotypes", "def reproduce(population:list):\n new_gen = []\n probs = []\n for p in population:\n probs.append(p[3])\n while len(new_gen) != len(probs):\n parents = selection(probs)\n son,eval_son,daughter,eval_daughter = xo(population[parents[0]][0],population[parents[0]][1], population[parents[1]][0],population[parents[1]][1],2)\n new_gen.append([son,eval_son])\n new_gen.append([daughter,eval_daughter])\n # mutation\n # lets say 5% of the population gets mutated\n how_many_to_mutate = int(NUM_OF_CHROMOZOMS * (1/100))\n t = [i for i in range(NUM_OF_CHROMOZOMS)]\n # choose percent of the population randomly, uniformly\n indices_to_mutate = choice(t, how_many_to_mutate, replace=False)\n for i in range(len(indices_to_mutate)):\n mutate(new_gen[indices_to_mutate[i]])\n\n evaluateAll(new_gen)\n return new_gen", "def variations():", "def download_genotype_data():\n print(\"downloading genotype data\")\n download_from_url(PSAM_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.psam\", desc=\"downloading psam\")\n download_from_url(PVAR_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pvar.zst\",\n desc=\"downloading pvar\")\n download_from_url(PGEN_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pgen.zst\",\n desc=\"downloading pgen\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pvar\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pgen\")", "def _get_genotypes(self, samples, records, switch):\n\n variant = np.zeros(len(samples))\n for idx, sample in enumerate(samples):\n try:\n gt = records.genotype(sample)['GT']\n except IndexError:\n print(\"something went wrong with:\")\n print('sample:', sample, 'variant:', records, '-- set value to missing')\n gt = '.'\n if gt == '.':\n gt = 0\n else:\n gt = re.split('\\||/', gt)\n gt = list(map(int, gt))\n variant[idx] = np.sum(gt)\n if switch:\n variant = np.abs(variant - 2)\n return variant", "def calc_genotype(self, arch_param):\n\n def _parse(weights, genos):\n gene = []\n n = 2\n start = 0\n for i in range(self.steps):\n end = start + n\n W = weights[start:end].copy()\n G = genos[start:end].copy()\n edges = sorted(range(i + 2),\n key=lambda x: -max(W[x][k] for k in range(len(W[x])) if G[x][k] != 'none'))[:2]\n for j in edges:\n k_best = None\n for k in range(len(W[j])):\n if G[j][k] != 'none':\n if k_best is None or W[j][k] > W[j][k_best]:\n k_best = k\n gene.append([G[j][k_best], i + 2, j])\n start = end\n n += 1\n return gene\n\n normal_param = np.array(self.darts_cfg.super_network.normal.genotype)\n reduce_param = np.array(self.darts_cfg.super_network.reduce.genotype)\n geno_normal = _parse(arch_param[0], normal_param[:, 0])\n geno_reduce = _parse(arch_param[1], reduce_param[:, 0])\n return [geno_normal, geno_reduce]", "def _genotype_updated(self):\n if self.data.get(\"GT\", None) is None:\n self.gt_alleles = None\n self.called = None\n self.ploidy = None\n else:\n self.gt_alleles = []\n for allele in ALLELE_DELIM.split(str(self.data[\"GT\"])):\n if allele == \".\":\n self.gt_alleles.append(None)\n else:\n self.gt_alleles.append(int(allele))\n self.called = all([al is not None for al in self.gt_alleles])\n self.ploidy = len(self.gt_alleles)", "def genotype(self):\n\t\tgenotype = \"\"\n\t\tfields = vars(self)\n\t\tfor name, field in fields.items():\n\t\t\tif isinstance(field, Pattern):\n\t\t\t\tgenotype += field.genotype()\n\t\t\telse:\n\t\t\t\tgenotype += str(field)\n\t\t\tgenotype += \"\\0\"\n\n\t\treturn genotype", "def make_oligos(protein_seq_files, wt_dna_fasta, amino_acid_range, primer_file, restriction_enzyme):\n wt_sequence, wt_protein_dict = parse_wt_sequences(wt_dna_fasta)\n protein_variants_objs = parse_protein_sequences(protein_seq_files)\n\n for variant in protein_variants_objs:\n dna_variant = convert_to_dna(variant, wt_protein_dict)\n\n oligo_seq_obj, dna_fragment_seq_obj, protein_fragment = add_flanking_nucleotides(\n dna_variant, variant, wt_sequence, primer_file, amino_acid_range, restriction_enzyme\n )\n checked_oligo = run_checks(oligo_seq_obj, dna_fragment_seq_obj, protein_fragment, restriction_enzyme)\n sys.stdout.write(\">%s\\n\" % dna_variant.name)\n sys.stdout.write(\"%s\\n\" % checked_oligo)", "def GoAnnot(prots, gos, onlyProts=False):\r\n with resources.open_text(\"autoprot.data\",\"Homo_sapiens.gene_info\") as d:\r\n geneInfo = pd.read_csv(d, sep='\\t')\r\n with resources.open_text(\"autoprot.data\",\"gene2go_alt\") as d:\r\n gene2go = pd.read_csv(d, sep='\\t')\r\n prots = pd.DataFrame(pd.Series([str(i).upper().split(';')[0] for i in prots]), columns=[\"Gene names\"])\r\n prots = prots.merge(geneInfo[[\"Symbol\", \"GeneID\"]], left_on=\"Gene names\", right_on=\"Symbol\", how='inner')\r\n \r\n prots = prots.merge(gene2go[[\"GeneID\", \"GO_ID\", \"GO_term\"]], on=\"GeneID\", how='inner')\r\n if onlyProts == True:\r\n for idx, go in enumerate(gos):\r\n if idx == 0:\r\n redProts = prots[\"Symbol\"][prots[\"GO_term\"].str.contains(go)]\r\n else:\r\n redProts = redProts.append(prots[\"Symbol\"][prots[\"GO_term\"].str.contains(go)])\r\n return redProts.drop_duplicates()\r\n else: \r\n for idx, go in enumerate(gos):\r\n if idx == 0:\r\n redProts = prots[prots[\"GO_term\"]==go]\r\n else:\r\n redProts = redProts.append(prots[prots[\"GO_term\"]==go])\r\n return redProts.drop_duplicates()", "def perm_vs_hyp():\n\n return [\"P\",\"P\",\"P\",\"P\",\"P\"]", "def n(self):\n return len(self.genotypes)", "def _get_prochirality(self):\n for atom in self.invarioms:\n atom.get_prochirality()\n atom.invariom.get_prochirality()" ]
[ "0.69029266", "0.6711919", "0.6711495", "0.6549798", "0.5954817", "0.59225327", "0.57624036", "0.5698024", "0.56348896", "0.5612911", "0.5511534", "0.54864615", "0.5481483", "0.5451472", "0.54416704", "0.542101", "0.5358761", "0.5353916", "0.53418523", "0.53012073", "0.5292634", "0.5275678", "0.52519774", "0.52376693", "0.5203634", "0.51542187", "0.515295", "0.5145658", "0.5110281", "0.50952756" ]
0.78792745
0
str genotype str base return P(base in genotype)
def prob_t_N(genotype, base): cnter = Counter(genotype) return cnter.get(base, 0) * 1/len(genotype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_label(genotype_type):\n if genotype_type == \"Hom\":\n return 0\n elif genotype_type == \"Het\":\n return 1\n elif genotype_type == \"Hom_alt\":\n return 2", "def all_genotype(ploidy):\n return [\"\".join(comb) for comb in cwr(\"ACGT-\", ploidy)]", "def genotype(args) :\n from genotyper import genotype_samples\n genotype_samples(args)", "def fromgenotype(self):\n\t\tpass", "def base_codes(self):\n bases = []\n\n if self.is_gas_giant:\n bases.append(\"G\")\n if self.is_naval_base:\n bases.append(\"N\")\n if self.is_scout_base:\n bases.append(\"S\")\n if self.is_research_base:\n bases.append(\"R\")\n if self.is_tas:\n bases.append(\"T\")\n if self.is_consulate:\n bases.append(\"I\")\n if self.is_pirate_base:\n bases.append(\"P\")\n\n return \" \".join(bases)", "def genotype(self):\n\t\tgenotype = \"\"\n\t\tfields = vars(self)\n\t\tfor name, field in fields.items():\n\t\t\tif isinstance(field, Pattern):\n\t\t\t\tgenotype += field.genotype()\n\t\t\telse:\n\t\t\t\tgenotype += str(field)\n\t\t\tgenotype += \"\\0\"\n\n\t\treturn genotype", "def __generate_genotype(self):\n if len(self.genotype) < self.__individual_genotype_length:\n gene = ''\n \n while len(self.genotype) < self.__individual_genotype_length:\n gene = str(random.randint(0,1))\n \n self.genotype = self.genotype + gene", "def get_basestrings(self):\n baseStrs = set()\n for x in self.xvals():\n for y in self.yvals():\n p = self.get_plaquette(x, y)\n if p is not None and p.base is not None:\n baseStrs.add(p.base)\n return list(baseStrs)", "def generate_mutation(base):\n\tif base in ['A', 'C', 'G', 'T']:\n\t\tbases = ['A', 'C', 'G', 'T']\n\t\tbases.remove(base)\n\t\treturn np.random.choice(bases)\n\telse:\n\t\traise Exception('base is not a proper DNA nucleotide (ACGT).')", "def test_check_all_default_bases_positional(self, number, base):\n converted = positional.encode(number, base)\n self.assertEqual(positional.decode(converted, base), number)", "def get_ig_name ( base_name ) :\n return base_name + '-GW'", "def define_geotype(x):\n if x['population_km2'] > 2000:\n return 'urban'\n elif x['population_km2'] > 1500:\n return 'suburban 1'\n elif x['population_km2'] > 1000:\n return 'suburban 2'\n elif x['population_km2'] > 500:\n return 'rural 1'\n elif x['population_km2'] > 100:\n return 'rural 2'\n elif x['population_km2'] > 50:\n return 'rural 3'\n elif x['population_km2'] > 10:\n return 'rural 4'\n else:\n return 'rural 5'", "def base_pair(c):\n dna_complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n return dna_complement[c.upper()].lower() if c.upper() in dna_complement else 'unknown'", "def gen_pronto_from_raw(seq1, seq2, base=None, freq=None):\n data = gen_pronto_from_raw_int(seq1, seq2, base, freq)\n for value in data:\n yield \"{0:0{1}x}\".format(value, 4)", "def _extract_genotype(geno_field):\n # Assume the genotype is the first format field and raise if it's not\n geno = geno_field.split(':')[0]\n if not GENO_REGEX.search(geno):\n raise ValueError('\"{}\" does not look like a genotype'.format(geno))\n return geno", "def generatebasepairs(self, x):\n currentbases = \"\"\n for u, v in zip(x, range(len(x))):\n if u == 0:\n currentbases += '_'\n else:\n currentbases += self.sequences[v][u-1]\n\n return currentbases", "def create_plaquette(self, baseStr):\n raise NotImplementedError(\"Derived class must implement this.\")", "def get_genotype_from_call(ref_allele, alternate_allele, call):\n genotype = './.'\n if call.lower() == 'undefined' or call.lower() == 'undetermined':\n return genotype\n if call == 'Both':\n call = ref_allele + alternate_allele\n callset = set(call)\n if ref_allele in callset and len(callset) == 1:\n genotype = '0/0'\n elif ref_allele in callset and alternate_allele in callset:\n genotype = '0/1'\n callset.remove(ref_allele)\n elif alternate_allele in callset and len(callset) == 1:\n genotype = '1/1'\n else:\n msg = 'Call {call} does not match any of the alleles (ref:{ref_allele}, alt:{alternate_allele})'\n raise ValueError(msg.format(call=call, ref_allele=ref_allele, alternate_allele=alternate_allele))\n return genotype", "def base_repr(i, base):\n\n assert i>=0 and base>=2\n \n if i==0:\n return ['0']\n\n if base<=10:\n return _small_base(i, base)\n\n assert base<=36\n return _large_base(i, base)", "def generate(base):\n if base == '':\n yield base\n else:\n for character in JugglerPassGen.dictionary(base[0]):\n for rest in JugglerPassGen.generate(base[1:]):\n yield character + rest", "def get_primitives(base):\n\n operands = []\n operators = []\n for nparams, s in enumerate(base):\n s = s.replace('%', '%%').split()\n for s in (x.replace('_', ' ') for x in s):\n if nparams and '$' not in s:\n assert nparams in (1, 2)\n s = '%s%s$' % ('$' if nparams == 2 else '', s)\n assert nparams == s.count('$'), (nparams, s)\n s = s.replace('$', ' %s ').strip()\n\n # Normalize the spacing\n s = s.replace(' ,', ',')\n s = s.replace(' . ', '.')\n s = s.replace(' [ ', '[').replace(' ]', ']')\n s = s.replace(' ( ', '(').replace(' )', ')')\n if nparams == 1:\n s = s.replace('+ ', '+')\n s = s.replace('- ', '-')\n s = s.replace('~ ', '~')\n\n if nparams:\n operators.append((s, nparams))\n else:\n operands.append(s)\n return operators, operands", "def complement_base(base,material='DNA'):\n if base in 'Aa':\n if material == 'DNA':\n return 'T'\n elif material == 'RNA':\n return 'U'\n elif base in 'TtUu':\n return 'A'\n elif base in 'Gg':\n return 'C'\n else:\n return 'G'", "def lookup_phenotype_by_name( name, greent ):\n logger=logging.getLogger('application')\n #This performs a case-insensitive exact match, and also inverts comma-ed names\n hpo_ids = greent.hpo.search( name )\n if len(hpo_ids) == 0:\n logger.error('Could not convert phenotype name: {}.'.format(name))\n else:\n logger.debug('Found ids for phenotype name: {} {}.'.format(name,' '.join(hpo_ids)))\n return hpo_ids", "def make_presentBase(verb, conj_id):\n if conj_id == 3:\n # For verbs like cupio, statuo\n # the present base is cupi-, statu-\n if verb[0].endswith(\"io\") or verb[0].endswith(\"uo\"):\n b_present = verb[0][:-1]\n else:\n b_present = verb[1][:-3]\n elif conj_id == 4:\n b_present = verb[1][:-2] \n else: \n b_present = verb[1][:-2]\n return b_present", "def design_grna(seq):\n\n transcript = {'A': 'U', 'C': 'G', 'G': 'C', 'T': 'A'}\n grna = \"\".join(transcript[n] for n in seq)\n\n return grna", "def getAnsofBase(length, base):\n ans = 1\n for i in range(length-1):\n ans = ans * base + 1\n return ans", "def genes():\n return [\"b2935\", \"b0723\", \"b0451\"]", "def subspace2proposition(primes: dict, subspace: Union[dict, str]) -> str:\n\n if not subspace or subspace == len(primes) * \"-\":\n return \"TRUE\"\n\n if type(subspace) is str:\n subspace = pyboolnet.state_space.subspace2dict(primes, subspace)\n\n return \"&\".join([name if value == 1 else f\"!{name}\" for name, value in sorted(subspace.items())])", "def getSJMotifCode(startBases, endBases):\n\n motif = (startBases + endBases).upper()\n\n if motif == \"GTAG\":\n return 1\n elif motif == \"CTAC\":\n return 2\n elif motif == \"GCAG\":\n return 3\n elif motif == \"CTGC\":\n return 4\n elif motif == \"ATAC\":\n return 5\n elif motif == \"GTAT\":\n return 6\n else:\n return 0", "def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein" ]
[ "0.60589737", "0.5741285", "0.5715102", "0.5583383", "0.5533688", "0.55254024", "0.5489838", "0.5393062", "0.5336175", "0.5331854", "0.5328015", "0.5308519", "0.5293455", "0.5247759", "0.5218406", "0.5206605", "0.5196159", "0.51130825", "0.5110904", "0.50777864", "0.50563097", "0.50279284", "0.50170976", "0.5005599", "0.49523324", "0.49401432", "0.49291328", "0.4924317", "0.49187836", "0.49120113" ]
0.5752146
1
str genotype iterableobj bases_all_reads, list or np.array return P(data|genotype) == likelihood
def likelihood_genotype(genotype, bases_all_reads, error_rates): likelihood = 1 for observed_base in bases_all_reads: p = 0 for base in "ACGT-": l = prob_t_N(genotype, base) * error_rates[base][observed_base] p += l likelihood *= p return likelihood
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def genotype(args) :\n from genotyper import genotype_samples\n genotype_samples(args)", "def calculate_genotype_probabilities(self):\n for name, member in self.members.items():\n member.genotype_probabilities = self.genotype_probabilities_of(name)", "def genotypes(self):\n return self.data.genotypes.values", "def __init__(self, length=64, bitstring_or_list=None):\r\n self._fitness = 0\r\n self.repr = None\r\n self.default_length = length\r\n if not bitstring_or_list:\r\n # random array of bytes\r\n self.genes = [random.getrandbits(1)\r\n for _ in range(self.default_length)]\r\n else:\r\n if isinstance(bitstring_or_list, str):\r\n self.genes = [int(b) for b in bitstring_or_list]\r\n elif isinstance(bitstring_or_list, list):\r\n self.genes = bitstring_or_list", "def pval_at_rna_by_nbinom(\n self, pos_dict_of_counts: Mapping[str, List], neg_vals_at_rna: np.array, gene_and_type,\n log_if_values_above=1E9,\n log_values=False, which='per_read',\n verbose=False):\n\n if len(neg_vals_at_rna) == 0:\n return None\n\n log_scale_high_value = (np.mean(neg_vals_at_rna) > log_if_values_above)\n\n if log_values or log_scale_high_value:\n log_this_gene = True\n neg_vals_at_rna = np.log10(neg_vals_at_rna)\n else:\n log_this_gene = False\n \n #if not np.any(neg_vals_at_rna):\n #print(\"No positive values in negatives.\")\n # neg_vals_at_rna = np.array([\n # self.negatives.lowest_positive_vals[which][x]/10 for x in \\\n # self.negatives.metadata.random_proteins])\n #print(f\"negatives now {neg_vals_at_rna}\")\n mean_negative = np.average(neg_vals_at_rna)\n std_negative = np.std(neg_vals_at_rna)\n\n vmr = (std_negative**2)/mean_negative\n\n verbose and print(f'vmr for negatives={vmr}')\n # Use a poisson if the var/mean is low enough:\n if vmr < 2:\n verbose and print(\"Using poisson.\")\n self.stats_log['vmr<2'] += 1\n pois = stats.poisson(mean_negative)\n return self.use_dist(pos_dict_of_counts, log_this_gene, pois)\n\n verbose and print(\"Wil try to use NB.\")\n self.stats_log['vmr>=2'] += 1\n\n # Try to fit a NB useing statsmodels.\n q = sm.NegativeBinomial(\n neg_vals_at_rna, np.array([1] * len(neg_vals_at_rna)), loglike_method='nb2')\n try:\n res = q.fit(disp=0)\n except: # If a NB can't be fit, revert to a poisson.\n print(f\"Could not run q.fit(disp=0) on neg_vals_at_rna= {neg_vals_at_rna}. Using poisson.\")\n pois = stats.poisson(mean_negative)\n return self.use_dist(pos_dict_of_counts, log_this_gene, pois)\n\n # Create a scipy.stats.nbinom object to use its cdf, based on the statsmodels fit parameters.\n # There is no cdf function for the statsmodels object.\n mu = res.predict()[0] # alpha = res.params[1]\n size = 1. / res.params[1] # prob = size / (size + mu)\n\n verbose and print(f\"Fit NB mu={mu}\")\n \n pvals = self.use_dist(\n pos_dict_of_counts, log_this_gene, stats.nbinom(size, size/(size + mu)))\n\n return pvals", "def test_iter_genotypes(self):\n with self.reader_f() as f:\n for g in f.iter_genotypes():\n variant_name = VARIANT_NAME_FIX.get(\n (truth.variant_to_key[g.variant], g.coded),\n truth.variant_to_key[g.variant],\n )\n\n expected = truth.genotypes[variant_name]\n self.assertEqual(expected, g)", "def G_stat(data):\r\n # G = 2*sum(f_i*ln(f_i/f_i_hat)) over all i phenotypes/sample classes\r\n # calculate the total number of observations under the consideration that\r\n # multiple observations in a given group are averaged.\r\n n = sum([arr.mean() for arr in data])\r\n a = len(data) # a is number of phenotypes or sample classes\r\n obs_freqs = array([sample_type.mean() for sample_type in data]) # f_i vals\r\n exp_freqs = zeros(a) + (n / float(a)) # f_i_hat vals\r\n G = 2. * (obs_freqs * log(obs_freqs / exp_freqs)).sum()\r\n return G", "def n(self):\n return len(self.genotypes)", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n for cls in self.classes:\n class_probability = self.prior_prob[cls]\n for key, value in datum.items():\n relative_feature_values = self.likelihoods[cls][key]\n class_probability += math.log(relative_feature_values.get(datum[key], 0.01))\n\n logJoint[cls] = class_probability\n\n return logJoint", "def _genotype_updated(self):\n if self.data.get(\"GT\", None) is None:\n self.gt_alleles = None\n self.called = None\n self.ploidy = None\n else:\n self.gt_alleles = []\n for allele in ALLELE_DELIM.split(str(self.data[\"GT\"])):\n if allele == \".\":\n self.gt_alleles.append(None)\n else:\n self.gt_alleles.append(int(allele))\n self.called = all([al is not None for al in self.gt_alleles])\n self.ploidy = len(self.gt_alleles)", "def prob_t_N(genotype, base):\n cnter = Counter(genotype)\n return cnter.get(base, 0) * 1/len(genotype)", "def generate_genotype(self):\n genes = []\n for i in range(self.n_genes):\n genes.append(self.Gene(n_bases=self.n_bases))\n self.genes = genes", "def _get_genotypes(self, samples, records, switch):\n\n variant = np.zeros(len(samples))\n for idx, sample in enumerate(samples):\n try:\n gt = records.genotype(sample)['GT']\n except IndexError:\n print(\"something went wrong with:\")\n print('sample:', sample, 'variant:', records, '-- set value to missing')\n gt = '.'\n if gt == '.':\n gt = 0\n else:\n gt = re.split('\\||/', gt)\n gt = list(map(int, gt))\n variant[idx] = np.sum(gt)\n if switch:\n variant = np.abs(variant - 2)\n return variant", "def test_get_representatives(self):\r\n\r\n result = \"\"\">1: 5\r\nABABABA\r\n>3: 1\r\nBABA\r\n>4: 1\r\nABABAA\r\n>8: 2\r\nBABBA\r\n\"\"\"\r\n seqs = self.data.iteritems\r\n mapping = self.mapping\r\n test_result = list(get_representatives(mapping, seqs()))\r\n test_result_as_fasta = \"\".join(\r\n map(lambda a: a.to_fasta(), test_result))\r\n\r\n self.assertEqual(test_result_as_fasta, result)\r\n\r\n # another example\r\n mapping = {'1': ('a', 'b', 'c'),\r\n '2': ('d', 'e', 'f')}\r\n seqs = [('1', \"ACGT\"), ('2', \"TAGC\"), ('a', \"TTTTT\")]\r\n\r\n observed = list(get_representatives(mapping, seqs))\r\n expected = [BiologicalSequence(\"ACGT\", id=\"1\"),\r\n BiologicalSequence(\"TAGC\", id='2')]\r\n self.assertEqual(observed, expected)", "def likelihood(self):\n \n raise NotImplementedError()", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \"*** YOUR CODE HERE ***\"\n\t#Adds log(P(y)) to calculate P(y|f1,f2...)\n for label in self.legalLabels:\n\t\tlogJoint[label] += math.log(self.prior[label])\n\t#Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...)\n for key in datum:\n\t\t#if key == (7, 3):\n\t\t\t#print self.condprobs[key, 0]\n\t\tfor label in self.legalLabels:\n\t\t\t#print str(key) + str(datum[key])\n\t\t\tlogJoint[label] += math.log(self.condprobs[key, label][datum[key]])\n return logJoint", "def treats_data(data, typo='mean'):\n\n nomalized_data = []\n\n if typo == 'mean':\n for i, v in enumerate(data):\n nomalized_data.append( fitness_medio(data[i]) )\n\n if typo == 'gen_mean':\n gens = [0]*len(data[0])\n\n for i, v in enumerate(data):\n for j, v in enumerate(data[i]):\n gens[j] += data[i][j]\n for i,v in enumerate(gens):\n nomalized_data.append( (gens[i])/len(data[0]) )\n\n if typo == 'max':\n for i, v in enumerate(data):\n nomalized_data.append( max(data[i]) )\n\n if typo == 'gen_max':\n gens = [0]*len(data[0])\n\n for i, v in enumerate(data):\n for j, v in enumerate(data[i]):\n if data[i][j] > gens[j]:\n gens[j] = (data[i][j])\n\n for i,v in enumerate(gens):\n nomalized_data.append( (gens[i]) )\n\n return nomalized_data", "def _getPerBaseInfo( self, readGroup ):\r\n\r\n if 'AlignmentArray' not in readGroup:\r\n return None\r\n\r\n alignmentArrayDS = readGroup['AlignmentArray']\r\n dataSize = len(alignmentArrayDS)\r\n \r\n # fetch all to memory for speeding up, it \r\n # requires explicitly slicing coordinate to copy the data \r\n alignmentArray = alignmentArrayDS[0:dataSize] \r\n \r\n ### these are done in numpy, fast,.,\r\n binRBases = (alignmentArray & 0xf0) >> 4; \r\n binTBases = (alignmentArray & 0x0f) ;\r\n rSeqAll = \"\".join(Basemap[binRBases])\r\n tSeqAll = \"\".join(Basemap[binTBases])\r\n\r\n return { \"tSeq\":tSeqAll, \"rSeq\":rSeqAll }", "def viterbi(self, word_seq):\n # Initialize scores\n scores = [{}]\n path = {}\n # Populate scores\n for i in range(0, len(word_seq)):\n for label in self.label_type_map:\n scores[i][label] = 0\n scores.append({})\n self.initialize(scores, word_seq, path)\n path = self.iterate(scores, word_seq, path)\n return self.identify(scores, word_seq, path)", "def probability(structure,seq, react=None):\n return energy_to_proba(get_ens_energy(seq,react),get_stru_energy(structure,seq,react))", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint", "def gw_heritability(\n input_snp_filename: \"Data Input, use the SNPs file from dataParse\",\n output_summary_filename: 'output file for the genomewide results summary, use .csv',\n logger_filename: 'file for the logger, use a txt',\n sweeps: \"number of samples for each chain\" = 1000,\n burnin: \"number of burnin samples\" = 1000,\n n_chains: \"number of chains of the sampler\" = 4,\n n_cores: \"number of parallel cores to use\" = 4,\n N_1kG: \"number of SNPs onwhich the LD-score is calculates\" = 1290028,\n chromosome: \"chromosome on which the analysis is run\" = \"all\",\n sep: \"separator for the input files, use t for tab separated (not \\t)\" = \",\",\n model: 'regression model'='normal',\n fix_intercept = False,\n ):\n\n # Initialisation of the logger\n output_logger = log.setup_logger(\"output_logger\", logger_filename)\n log.initialise_log(output_logger,\n 'genome-wide regression, model: %s' %model,\n [input_snp_filename],\n [output_summary_filename],\n sweeps,\n burnin,\n chromosome = str(chromosome),\n other_params_diz = {'chains': n_chains, 'cores': n_cores})\n\n # Initialisation function, it reads the summary stats file, filters the SNPs,\n # creates the output files\n\n logging.info(\"Start Analysis\")\n\n snps = s.Snps()\n # read table\n snps.read_table(input_snp_filename, separator=sep)\n # generate chi squared stats\n snps.generate_stats()\n # update the summary stats\n snps.update_summary()\n output_logger.info(\" Sample size \" + str(snps.n_patients) + \"\\n\")\n\n\n snps.apply_filter_table(s.baghera_filter)\n snps.update_summary()\n output_logger.info(\"After baghera init filter.\\nNumber of SNPs: %s\\nNumber of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n # Non coding SNPs are assigned to a dummy gene, such that the regression is done on the entire SNPs' set\n snps.rename_non_annotated(name='NonCoding')\n\n if chromosome != \"all\":\n snps.apply_filter_table(snps.cut_single_chrom, **{'chromosome': chromosome})\n output_logger.info(\n \"Analysis restricted to chr %s\" %str(chromosome) )\n\n snps.update_summary()\n output_logger.info(\"Analysis. Number of SNPs: %s\\n, Number of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n\n if model =='normal':\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n elif model=='gamma':\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n else:\n logging.info('Normal model by default')\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n logging.info(\"Analysis complete\")", "def likelihood(A, B, word_list):\n\tstr_buf = []\n\tloglh = 0.0\n\tlh = 1\n\n\t# read each line and compute likelihood.\n\tfor line in sys.stdin:\n\t\tline = line.split()\n\t\t# print(line)\n\t\tif len(line) == 3:\n\t\t\tstr_buf.append((str(line[0]), str(line[1])))\n\n\t\telse:\n\t\t\t# if come to the end of a sentence\n\t\t\tif len(str_buf) != 0:\n\t\t\t\tstr_buf = [('<s>','BOS')] + str_buf + [('</s>', 'EOS')]\n\t\t\t\tfor i in range(len(str_buf) - 1):\n\t\t\t\t\t# print(str_buf[i][0], str_buf[i+1][0])\n\t\t\t\t\tif str_buf[i+1][0] in word_list:\n\t\t\t\t\t\t# print('debug: A[',str_buf[i][1],'][', str_buf[i+1][1],']:', A[ str_buf[i][1] ][ str_buf[i+1][1] ])\n\t\t\t\t\t\t# print('debug: B[',str_buf[i+1][1],'][', [str_buf[i+1][0]], ']:', B[ str_buf[i+1][1] ][str_buf[i+1][0]])\n\t\t\t\t\t\tloglh += ( log(A[ str_buf[i][1] ][ str_buf[i+1][1] ]) + log(B[ str_buf[i+1][1] ][str_buf[i+1][0]]) )\n\t\t\t\t\telse:\n\t\t\t\t\t\t# print('debug: else A[',str_buf[i][1],'][', str_buf[i+1][1],']:', A[ str_buf[i][1] ][ str_buf[i+1][1] ])\n\t\t\t\t\t\t# print('debug: else B[',str_buf[i+1][1],'][', '<UNK>', ']:', B[ str_buf[i+1][1] ][ '<UNK>' ])\n\t\t\t\t\t\tloglh += ( log(A[ str_buf[i][1] ][ str_buf[i+1][1] ]) + log(B[ str_buf[i+1][1] ]['<UNK>']) )\n\n\t\t\t\tlh = e ** loglh\n\t\t\t\t# for s in str_buf:\n\t\t\t\t# \tprint(s[0], end=\" \")\n\t\t\t\t# print(':', lh)\n\t\t\t\tprint(lh)\n\n\t\t\t\tstr_buf = []\t\n\t\t\t\tloglh = 0\n\t\t\t\tlh = 1", "def evaluate_iob(predicted, gold, label_field, stats):\n gold_cpu = gold.cpu().numpy()\n pred_cpu = predicted.cpu().numpy()\n gold_cpu = list(gold_cpu.reshape(-1))\n pred_cpu = list(pred_cpu.reshape(-1))\n # pred_cpu = [l for sen in predicted for l in sen]\n\n id2label = {v:k for k,v in label_field.items()}\n # Compute spans for the gold standard and prediction.\n gold_spans = to_spans(gold_cpu, id2label)\n pred_spans = to_spans(pred_cpu, id2label)\n\n # Finally, update the counts for correct, predicted and gold-standard spans.\n compare(gold_spans, pred_spans, stats, 'strict')", "def __init__(self, n, g_bases, g_len, m_prob):\n self.n = n\n self.g_bases = g_bases\n self.g_len = g_len\n self.m_prob = m_prob", "def evaluate ( self , genome ) :\n\n\t\tassert isinstance( genome , Genome ), 'genome supplied must be of type cc3dtools.Genome!'\n\t\tloci = genome.get_mutated_loci()\n\t\tmatched_phenotypes = []\n\t\tphenotypes = self.phenotypes.items()\n\n\t\tfor locus in loci:\n\t\t\tfor phenotype, region in phenotypes:\n\t\t\t\t# check if the locus is in the region\n\t\t\t\t# 'locus.locus' to get the float value of that mutation rather \n\t\t\t\t# than an object!\n\t\t\t\tif locus.locus > region[0] and locus.locus < region[1]:\n\t\t\t\t\tmatched_phenotypes.append( phenotype )\n\t\treturn Counter( matched_phenotypes )", "def _process_genotypes(self, limit):\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n line_counter = 0\n\n raw = '/'.join((self.rawdir, 'genotype'))\n logger.info(\"building labels for genotypes\")\n geno = Genotype(g)\n fly_tax = 'NCBITaxon:7227'\n with open(raw, 'r') as f:\n f.readline() # read the header row; skip\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n for line in filereader:\n line_counter += 1\n\n (genotype_num, uniquename, description, name) = line\n\n # if self.testMode is True:\n # if int(object_key) not in self.test_keys.get('genotype'):\n # continue\n\n # add the internal genotype to pub mapping\n genotype_id = 'MONARCH:FBgeno'+str(genotype_num)\n self.idhash['genotype'][genotype_num] = genotype_id\n\n if description == '':\n description = None\n\n if not self.testMode \\\n and limit is not None and line_counter > limit:\n pass\n else:\n if self.testMode and \\\n int(genotype_num) not in \\\n self.test_keys['genotype']:\n continue\n\n model.addIndividualToGraph(\n genotype_id, uniquename,\n Genotype.genoparts['intrinsic_genotype'],\n description)\n # we know all genotypes are in flies\n # FIXME we assume here they are in melanogaster,\n # but that isn't necessarily true!!!\n # TODO should the taxon be == genomic background?\n geno.addTaxon(fly_tax, genotype_id)\n genotype_iid = self._makeInternalIdentifier(\n 'genotype', genotype_num)\n model.addComment(\n genotype_id, genotype_iid)\n if name.strip() != '':\n model.addSynonym(genotype_id, name)\n\n return", "def analyse_loglike(test_data, mods):\r\n l1 = list(map(lambda x: x + ' NB', mods.names))\r\n l1.extend(list(map(lambda x: x + ' ZI', mods.names)))\r\n l1.extend(list(map(lambda x: x + ' P', mods.names)))\r\n loglikeNB = np.array(mods.compute_log_likelihood(test_data, 'NB'))\r\n loglikeZI = np.array(mods.compute_log_likelihood(test_data, 'ZI'))\r\n loglikeP = np.array(mods.compute_log_likelihood(test_data, 'P'))\r\n # loglikeG = np.array(mods.compute_log_likelihood_gaussian(test_data))\r\n # loglikegeo = np.array(mods.compute_log_likelihood_geom(test_data))\r\n LL = np.zeros((loglikeNB.shape[0] * 3, loglikeNB.shape[1]))\r\n LL[:loglikeNB.shape[0], :] = loglikeNB\r\n LL[loglikeNB.shape[0]:2 * loglikeNB.shape[0], :] = loglikeZI\r\n LL[2 * loglikeNB.shape[0]:3 * loglikeNB.shape[0], :] = loglikeP\r\n # LL[3 * loglikeNB.shape[0]:4 * loglikeNB.shape[0], :] = loglikeG\r\n # LL[4 * llzi.shape[0]:, :] = np.array(mods.loglikegeo)\r\n print('mean per model', list(zip(np.ma.masked_invalid(LL).sum(axis=1), map(lambda x: x.mod.name, mods.models))))\r\n print('mean per distrib')\r\n print(np.ma.masked_invalid(LL[:loglikeNB.shape[0], :]).mean())\r\n print(np.ma.masked_invalid(LL[loglikeNB.shape[0]:loglikeNB.shape[0] * 2, :]).mean())\r\n print(np.ma.masked_invalid(LL[loglikeNB.shape[0] * 2:loglikeNB.shape[0] * 3, :]).mean())\r\n # print(np.nanmean(LL[1-np.isinf(LL)], axis=1))\r\n # print(np.nanmean(LL[LL != np.inf],axis=1))\r\n LL[np.isnan(LL)] = 0\r\n LL[np.isinf(LL)] = 0\r\n LL[LL == 0] = -np.inf\r\n r = np.argmax(LL, axis=0)\r\n # LL /= mx\r\n print('mean_best', np.mean(np.ma.masked_invalid(LL[r, range(LL.shape[1])])))\r\n mx = np.max(LL, axis=0)\r\n LL = LL / mx\r\n means = test_data.get_miniOD(None)[test_data.get_stations_col(None)].mean(axis=0).to_numpy()\r\n # for i in np.unique(r):\r\n # print(means[r == i].max())\r\n print('mean NB', means[r < loglikeNB.shape[0]].mean())\r\n print('mean ZI', means[(r < 2 * loglikeNB.shape[0]) * (r > loglikeNB.shape[0])].mean())\r\n print('mean poisson', means[(r < 3 * loglikeNB.shape[0]) * (r > 2 * loglikeNB.shape[0])].mean())\r\n # print('mean ga', means[(r < 4 * llzi.shape[0]) * (r > 3 * llzi.shape[0])].mean())\r\n # print('mean Gaussian', means[r > 3 * loglikeNB.shape[0]].mean())\r\n print('model name, mean trips per model, LL/maxLL, N inf')\r\n for i in range(LL.shape[0]):\r\n print(l1[i], means[r == i].mean(), np.mean(np.ma.masked_invalid(LL[i, :])), np.sum(np.isinf(LL[i, :])))\r\n print(np.ma.corrcoef(np.ma.masked_invalid(LL[i, :]), means[:LL.shape[1]])[1, 0])\r\n plt.hist(r, bins=np.arange(-0.5, 3 * len(mods.names) + 1, 1))\r\n\r\n # l1.extend(list(map(lambda x: x + ' geo', mods.names)))\r\n # l1.extend(list(map(lambda x: x + ' G', mods.names)))\r\n plt.xticks(range(len(l1)), l1, rotation='vertical')\r\n plt.show()\r\n\r\n for m in mods.loglike:\r\n print(m)\r\n print(m[np.logical_not(np.isinf(m))].mean())", "def likelihoods(self, alleles):\n\n models = self.models_dict[len(alleles)]\n\n F = self.joint_frequencies_combo(alleles)\n\n ### BPH ###\n (((A0, A1),((B0,),)),) = models['BPH'][1].items()\n\n BPH = (A0 / A1) * F[B0]\n\n\n BPH += sum( sum(F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['BPH'][2].items())\n\n if len(alleles)>2:\n BPH += sum( sum(F[B0] * sum( F[B1] * F[B2] for (B1, B2) in C[B0]) for B0 in C) * A0 / A1\n for (A0, A1), C in models['BPH'][3].items())\n\n ### SPH ###\n (((A0, A1),((B0,),)),) = models['SPH'][1].items()\n SPH = (A0 / A1) * F[B0]\n\n SPH += sum( sum(F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['SPH'][2].items())\n\n ### DIPLOIDY ###\n (((A0, A1),((B0,),)),) = models['DISOMY'][1].items()\n DISOMY = (A0 / A1) * F[B0]\n\n DISOMY += sum( sum( F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['DISOMY'][2].items())\n\n ### MONOSOMY ###\n ((B0,),) = models['MONOSOMY'][1][(1,1)]\n MONOSOMY = F[B0]\n\n result = likelihoods_tuple(MONOSOMY, DISOMY, SPH, BPH)\n return result", "def get_bases():\n\treturn ((MV.ONE,),) + MV.blades[1:]\n\t# return ((MV.ONE,),) + MV.bases[1:]" ]
[ "0.58311933", "0.5258182", "0.51413745", "0.51239264", "0.50704426", "0.5054357", "0.50419044", "0.50068015", "0.49709237", "0.49687204", "0.49641567", "0.4958727", "0.4943448", "0.49398604", "0.49218807", "0.49215811", "0.4891338", "0.48754093", "0.4873586", "0.48568666", "0.48543763", "0.482769", "0.48221993", "0.48171592", "0.4813334", "0.480879", "0.4791081", "0.47846964", "0.47835147", "0.4782735" ]
0.702063
0
The base exception class of connection exceptions.
def __init__(self, error_msg): super(ConnectionException, self).__init__(error_msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, msg):\n\n super(DBConnectionError, self).__init__(msg)\n self.msg = msg", "def _create_exception(self, pgres=None, msg=None, cursor=None):\n assert pgres is None or cursor is None, \\\n \"cannot specify pgres and cursor together\"\n\n if cursor and cursor._pgres:\n pgres, cursor._pgres = cursor._pgres, ffi.NULL\n\n exc_type = exceptions.OperationalError\n code = pgmsg = None\n # _py_enc can be not initialized yet in case of errors when\n # establishing the connection\n err_enc = self._py_enc or 'utf-8'\n\n # If no custom message is passed then get the message from postgres.\n # If pgres is available then we first try to get the message for the\n # last command, and then the error message for the connection\n if pgres:\n pgmsg = libpq.PQresultErrorMessage(pgres)\n pgmsg = ffi.string(pgmsg).decode(err_enc, 'replace') \\\n if pgmsg else None\n\n # Get the correct exception class based on the error code\n code = libpq.PQresultErrorField(pgres, libpq.LIBPQ_DIAG_SQLSTATE)\n if code != ffi.NULL:\n code = bytes_to_ascii(ffi.string(code))\n exc_type = util.get_exception_for_sqlstate(code)\n else:\n code = None\n exc_type = exceptions.DatabaseError\n\n if not pgmsg:\n pgmsg = libpq.PQerrorMessage(self._pgconn)\n pgmsg = ffi.string(pgmsg).decode(err_enc, 'replace') \\\n if pgmsg else None\n\n if msg is None and pgmsg:\n msg = pgmsg\n for prefix in (\"ERROR: \", \"FATAL: \", \"PANIC: \"):\n if msg.startswith(prefix):\n msg = msg[len(prefix):]\n break\n\n # Clear the connection if the status is CONNECTION_BAD (fatal error)\n if self._pgconn and libpq.PQstatus(self._pgconn) == libpq.CONNECTION_BAD:\n self._closed = 2\n\n exc = exc_type(msg)\n exc.pgcode = code\n exc.pgerror = pgmsg\n exc.cursor = cursor\n exc._pgres = pgres\n\n return exc", "def SocketError(self) -> SocketError:", "def handle_demisto_exception(e):\n if 'Proxy Error' in str(e):\n raise ConnectionError(MESSAGES['PROXY_ERROR'])\n elif 'ReadTimeoutError' in str(e):\n raise ConnectionError(MESSAGES['REQUEST_TIMEOUT'])\n elif 'ConnectionError' in str(e) or 'ConnectTimeoutError' in str(e):\n raise ConnectionError(MESSAGES['CONNECTION_ERROR'])\n elif 'SSLError' in str(e):\n raise SSLError(MESSAGES['SSL_CERT_ERROR'])\n else:\n raise e", "def _raise_unknown_error(ex):\n raise MsticpyKqlConnectionError(\n \"Another exception was returned by the service\",\n *ex.args,\n f\"Full exception:\\n{str(ex)}\",\n title=\"connection failed\",\n )", "def exception(self, e):\n pass", "def exception(self) -> Exception:\n return self._exception", "def exceptionType(self):\n return ExceptionType.GeneralException", "def format_connection_exception(e, driver):\n if adodbapi is not None:\n if isinstance(e, OperationalError) and e.args and isinstance(e.args[0], com_error):\n e_comm = e.args[0]\n hresult = e_comm.hresult\n sub_hresult = None\n internal_message = None\n if e_comm.args and len(e_comm.args) == 4:\n internal_args = e_comm.args[2]\n if len(internal_args) == 6:\n internal_message = internal_args[2]\n sub_hresult = internal_args[5]\n base_message, base_conn_err = _lookup_conn_error_and_msg(hresult, internal_message)\n sub_message, sub_conn_err = _lookup_conn_error_and_msg(sub_hresult, internal_message)\n if internal_message == 'Invalid connection string attribute':\n if base_message and sub_message:\n conn_err = sub_conn_err if sub_conn_err else base_conn_err\n return base_message + \": \" + sub_message, conn_err\n else:\n # else we can return the original exception message + lookup the proper\n # ConnectionErrorCode for this issue\n conn_err = sub_conn_err if sub_conn_err else base_conn_err\n return repr(e), conn_err\n else:\n # if not an Operational error, try looking up ConnectionErr type\n # by doing a regex search on the whole exception message\n e_msg = repr(e)\n _, conn_err = _lookup_conn_error_and_msg(0, e_msg)\n return e_msg, conn_err\n\n elif pyodbc is not None:\n e_msg = repr(e)\n _, conn_err = _lookup_conn_error_and_msg(0, e_msg)\n if conn_err == ConnectionErrorCode.driver_not_found:\n installed, drivers = _get_is_odbc_driver_installed(driver)\n if not installed and drivers:\n e_msg += \" configured odbc driver {} not in list of installed drivers: {}\".format(driver, drivers)\n return e_msg, conn_err\n\n return repr(e), ConnectionErrorCode.unknown", "def db_connection_error(error):\n return internal_server_error(error)", "def cancelled_exception_class(cls) -> type[BaseException]:", "def WrappedException(self) -> object:", "def exception(self, *args, **kwargs):", "def clientconnfail(self) :\n\t\ttry :\n\t\t\treturn self._clientconnfail\n\t\texcept Exception as e:\n\t\t\traise e", "def test_exception_class_hierarchy(self) -> None:\n\n try:\n raise CustomDerivedError(state=\"test\")\n except CustomDerivedError as cex:\n assert type(cex) is CustomDerivedError\n assert \"test\" == cex.state\n except CustomError as cex:\n assert False, \"CustomDerivedError should have caught the exception.\"\n except:\n assert False, f\"Unhandled exception: {sys.exc_info()[0]}\"", "def unexpected_error(self, exception):", "def unexpectedException(self):", "def exception(self):\n return self._exception", "def solid_exception(self) -> Optional[BaseException]:\n return self.op_exception", "def exception(self) -> exceptions.ErrorMessageException:\n\n return ErrorMessage.ERROR_CODES_TO_EXCEPTIONS.get(\n self.error_code,\n exceptions.GenericException\n )", "def _get_exception(self):\r\n \r\n return self._exception", "def __init__(self, msg):\n\n super(DBValueError, self).__init__(msg)\n self.msg = msg", "def throw(self):\n pass", "def ConnectByNameError(self) -> _n_0_t_14:", "def what(self):\n return _libSALOME_LifeCycleCORBA.SALOME_Exception_what(self)", "def __init__(self, code, reason):\n super(RequestError, self).__init__(code, reason)", "def systcpconnfail(self) :\n\t\ttry :\n\t\t\treturn self._systcpconnfail\n\t\texcept Exception as e:\n\t\t\traise e", "def __init__(self, content, status):\n Exception.__init__(self)\n self.status = status\n self.content = content", "def connection_lost(self, exc):\n pass", "def __init__(self, message):\n ModelException.__init__(self, message)" ]
[ "0.6604351", "0.6386638", "0.6341203", "0.6303432", "0.62695044", "0.62493557", "0.6177012", "0.6173146", "0.6118006", "0.60822475", "0.60723776", "0.60721236", "0.6066645", "0.60590744", "0.60276735", "0.60103536", "0.59800124", "0.5904314", "0.5867853", "0.58674383", "0.58631456", "0.5806314", "0.5789059", "0.57713217", "0.57563686", "0.57542354", "0.57522684", "0.5751538", "0.5709271", "0.5696378" ]
0.7421417
0
Reloads the Polls file.
def reloadpolls(self, irc, msg, args): try: self.polls = yaml.load(open(self.pollFile, 'r'), Loader=yamlordereddictloader.Loader) except FileNotFoundError as e: log.warning("Couldn't open file: %s" % e) raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reloadfile(self, ):\n self.loadfile()", "def refresh(self):\n self.config.read(self.filename)\n self.loadRecentFiles()", "def reload(self):\n puts('Reloading application...')\n local('touch ../reload.txt')", "def reload(self):\n\n pass", "def reload(self):", "def reload(self):", "def handleReload(self, confInfo=None):", "def reload(self):\n with open(self._config) as f:\n self.data = json.load(f)", "def reload(self):\n self.read(self._cfg_path)", "def reload(self):\n if len(self.files) > 0:\n self.load(self.files, regfiles=self.regions)", "def reload(self):\n if self.filename is not None:\n self.channels.clear()\n try:\n self.open(self.filename)\n except EnvironmentError, e:\n log.warning('ChannelsDictionary.reload failed: %s', e)\n else:\n log.warning('ChannelsDictionary.reload without self.filename.')", "def reload(self, filename = None):\r\n if self.config.get('world', 'autosave'):\r\n self.save()\r\n self.load(filename or self.filename)", "def reload_cookies(self):\n\n if os.path.exists(self.location_of_cookies):\n with open(self.location_of_cookies, 'rb') as f:\n cookies = pickle.load(f)\n self.load_cookies(cookies, self.cookie_domain)\n \n f.close()", "async def poll_refresh(self) -> None:\n await self._send_message_get_response(OutgoingMessage(OutgoingMessageType.poll_refresh))", "def refresh(self):\n self.update_from_file()\n self.update_from_env()", "async def reload(ctx, name):\n await unload_extension(name, channel=ctx.channel)\n await load_extension(name, channel=ctx.channel)", "def reload_config(self):\n pass", "def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")", "def load(self, filepath=''):\n sleep(20)\n pass", "def refresh(self, btn=None):\n\n if self.visible and self.filename:\n self.load_file(self.filename, self.title)", "def reload(self):\n self.nextId = 0\n self.users.clear()\n self._nameCache.clear()\n self._hostmaskCache.clear()\n if self.filename is not None:\n try:\n self.open(self.filename)\n except EnvironmentError, e:\n log.warning('UsersDictionary.reload failed: %s', e)\n else:\n log.error('UsersDictionary.reload called with no filename.')", "def reload(self) -> None: # pragma: no cover\n raise NotImplementedError()", "async def giveaway_reload(self, ctx):\n self._load_games()\n await ctx.send(\n f\"Reloaded list of games ({len(self.steam_keys)} games)\")", "def refresh(self, list_of_tables):\n self.dismod_file.refresh(list_of_tables)", "def refresh(self, url, args, cancellationSignal):\n pass", "def receive_reload_request(self, _: EmptyMsg):\n self.update()", "def trigger_reloading(self) -> None:\n self.trigger_signal(\"reloading\")", "def reload(self):\n self.rpc.call(MsfRpcMethod.CoreReloadModules)", "def reload_info(self):\n self.__loop.run_until_complete(self.__reload_info())", "def reload( self ):\n\t\tCORE.info( 'Reloading resources: modules, categories' )\n\t\tmoduleManager.load()\n\t\tcategoryManager.load()\n\t\tRESOURCES.info( 'Reloading UCR variables' )\n\t\tucr.load()" ]
[ "0.74811375", "0.6804164", "0.6666194", "0.66235316", "0.65775543", "0.65775543", "0.65237594", "0.64763457", "0.6454685", "0.6289295", "0.62368816", "0.6080593", "0.6055079", "0.60324496", "0.5997774", "0.5995909", "0.59953433", "0.5965536", "0.5963405", "0.59284127", "0.5891991", "0.5854808", "0.5853909", "0.5841499", "0.5803716", "0.5801305", "0.5791334", "0.57649046", "0.57626754", "0.5754108" ]
0.7334716
1
[channel] Vote on a poll. Channel is only needed if used in a PM.
def vote(self, irc, msg, args, channel, pid, yaynay): if yaynay not in ['yay', 'nay']: irc.error("Valid Answers are 'yay' or 'nay'.") return if channel in self.polls.keys(): if self.polls[channel][pid]['concluded']: irc.reply("Poll #%s is finished, it does not accept updates." % pid) return if self._vote(irc, channel, msg.nick, pid, yaynay): irc.reply("Successfully voted on %s" % self.polls[channel][pid]['question']) else: log.debug('Not dumping due to no change.') else: irc.error("'%s' has no polls." % channel)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def votes(self, irc, msg, args, channel, pid):\n if channel and msg.args[0] in irc.state.channels:\n if msg.args[0] != channel:\n if ircdb.checkCapability(msg.prefix, 'admin') or ircdb.checkCapability(msg.prefix, 'owner'):\n irc.error(\"Not Implemented\")\n else:\n irc.errorInvalid('argument', channel)\n elif msg.args[0] == channel:\n irc.error(\"Not Implemented\")", "async def _vote_count(\n self, ctx: Context, *, channel: discord.TextChannel = None\n ):\n\n guild: discord.Guild = ctx.guild\n\n if not channel:\n channel = await self.get_vote_channel(guild)\n if isinstance(channel, str):\n return await ctx.send(channel)\n\n history = await channel.history(oldest_first=True).flatten()\n if len(history) > 100:\n return await ctx.send(_(\n \"I couldn't identify a voting channel. Please specify one explicitly.\"\n ))\n else:\n history = await channel.history(oldest_first=True).flatten()\n if len(history) > 100:\n return await ctx.send(_(\n \"That channel has too many messages!\"\n \" Please ask a host for manual vote count.\"\n ))\n\n if len(history) < 1:\n return await ctx.send(_(\"{} is empty.\").format(channel.mention))\n\n user_votes = {}\n player_role = guild.get_role(\n await self.config.guild(guild).player_id()\n )\n\n for message in history:\n author = message.author\n if player_role not in author.roles:\n continue\n vote = self.get_vote_from_message(message)\n if not vote:\n continue\n user_votes[f\"{author.name}#{author.discriminator}\"] = vote\n\n user_votes = await self.get_non_voters(guild, user_votes)\n\n votes = {}\n for user in user_votes:\n val = user_votes[user].capitalize()\n try:\n votes[val].append(user)\n except KeyError:\n votes[val] = [user]\n\n # max votes first\n votes = dict(sorted(\n votes.items(), key=lambda item: len(item[1]), reverse=True\n ))\n\n # Pop and add stuff back to dict for ordering purpose.\n try:\n votes[\"VTNL\"] = votes.pop(\"Vtnl\")\n except KeyError:\n pass\n try:\n votes[\"No vote\"] = votes.pop(\"No vote\")\n except KeyError:\n pass\n\n txt = \"\"\n\n for i, vote in enumerate(votes, start=1):\n voters = votes[vote]\n\n if vote == \"VTNL\":\n txt += _(\"\\n\\n**{}** - {} ({})\").format(vote, len(voters), \", \".join(voters))\n elif vote == \"No vote\":\n txt += _(\"\\n\\n**Not voting** - {} ({})\").format(len(voters), \", \".join(voters))\n else:\n txt += _(\"\\n{}. **{}** - {} ({})\").format(i, vote, len(voters), \", \".join(voters))\n\n title = _(\"Vote Count\")\n\n embed = discord.Embed(\n color=0x00CDFF, title=title,\n description=_(\"__Counting from {} channel.__\\n\\n{}\").format(\n channel.mention, txt.strip()\n )\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\n f\"**{title}**\\n\\n__Counting from {channel.mention}\"\n f\" channel.__\\n\\n{txt.strip()}\"\n )", "def receive_poll_answer(self, update, context):\n answer = update.poll_answer\n poll_id = answer.poll_id\n selected_options = answer.option_ids\n\n timeout = self.DEFAULT_DELETE_TIMEOUT\n vote_count = self.DEFAULT_VOTE_COUNT\n chat_data = get_chat(context.bot_data[poll_id][\"chat\"])\n if chat_data:\n timeout = chat_data.delete_timeout or self.DEFAULT_DELETE_TIMEOUT\n vote_count = chat_data.vote_count or self.DEFAULT_VOTE_COUNT\n\n if len(selected_options) < 1:\n context.bot_data[poll_id][\"count\"][\n context.bot_data[poll_id][\"voters\"][update.effective_user.id]\n ] -= 1\n return\n\n if selected_options[0] == 0:\n context.bot_data[poll_id][\"count\"][\"yes\"] += 1\n context.bot_data[poll_id][\"voters\"][update.effective_user.id] = \"yes\"\n elif selected_options[0] == 1:\n context.bot_data[poll_id][\"count\"][\"no\"] += 1\n context.bot_data[poll_id][\"voters\"][update.effective_user.id] = \"no\"\n\n # Close poll after three participants voted\n if (\n context.bot_data[poll_id][\"count\"][\"yes\"] == vote_count\n or context.bot_data[poll_id][\"count\"][\"no\"] == vote_count\n ):\n context.bot.stop_poll(\n context.bot_data[poll_id][\"chat\"],\n context.bot_data[poll_id][\"message_id\"],\n )\n if timeout == -2:\n return\n context.job_queue.run_once(\n self.sched_delete,\n timeout,\n context=(\n context.bot_data[poll_id][\"chat\"],\n context.bot_data[poll_id][\"message_id\"],\n ),\n )", "async def _msgvote_on(self, ctx):\n\n channel = ctx.message.channel\n if channel.id in self.settings[\"channels_enabled\"]:\n await self.bot.say(\"Msgvote mode is already on in this channel.\")\n else:\n self.settings[\"channels_enabled\"].append(channel.id)\n dataIO.save_json(self.settings_path, self.settings)\n await self.bot.say(\"Msgvote mode is now on in this channel.\")", "async def vote(self, ctx):\n embed = discord.Embed(title = \"Here are some bot lists that you can vote for me on, voters may soon™ recieve perks\", color = discord.Color.blurple())\n embed.add_field(name = \"Bots For Discord\", value = \"[Click Here](https://botsfordiscord.com/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Discord Boats\", value = \"[Click Here](https://discord.boats/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Divine Discord Bots\", value = \"[Click Here](https://divinediscordbots.com/bot/592811241756688405/vote)\") \n embed.add_field(name = \"Botlist.space\", value = \"[Click Here](https://botlist.space/bot/592811241756688405/upvote)\") \n embed.set_thumbnail(url = self.bot.user.avatar_url)\n await ctx.send(embed = embed)", "async def get_vote_channel(self, guild: discord.Guild):\n\n vote_channels = [\n ch for ch in guild.channels\n if \"voting\" in ch.name\n or \"vote\" in ch.name\n ]\n\n if len(vote_channels) < 1:\n return _(\n \"I couldn't identify a voting channel.\"\n \" Please specify one explicitly.\"\n )\n\n if len(vote_channels) > 1:\n # get channel with the largest suffixed number\n return max(\n vote_channels, key=lambda obj: int(obj.name.split(\"-\")[1])\n )\n\n else:\n return vote_channels[0]", "def receive_poll_answer(update: Update, context: CallbackContext) -> None:\n answer = update.poll_answer\n poll_id = answer.poll_id\n try:\n questions = context.bot_data[poll_id][\"questions\"]\n # this means this poll answer update is from an old poll, we can't do our answering then\n except KeyError:\n return\n selected_options = answer.option_ids\n answer_string = \"\"\n for question_id in selected_options:\n if question_id != selected_options[-1]:\n answer_string += questions[question_id] + \" and \"\n else:\n answer_string += questions[question_id]\n context.bot.send_message(\n context.bot_data[poll_id][\"chat_id\"],\n f\"{update.effective_user.mention_html()} feels {answer_string}!\",\n parse_mode=ParseMode.HTML,\n )\n context.bot_data[poll_id][\"answers\"] += 1\n # Close poll after three participants voted\n if context.bot_data[poll_id][\"answers\"] == 3:\n context.bot.stop_poll(\n context.bot_data[poll_id][\"chat_id\"], context.bot_data[poll_id][\"message_id\"]\n )", "def poll(update: Update, context: CallbackContext) -> None:\n questions = [\"Good\", \"Really good\", \"Fantastic\", \"Great\"]\n message = context.bot.send_poll(\n update.effective_chat.id,\n \"How are you?\",\n questions,\n is_anonymous=False,\n allows_multiple_answers=True,\n )\n # Save some info about the poll the bot_data for later use in receive_poll_answer\n payload = {\n message.poll.id: {\n \"questions\": questions,\n \"message_id\": message.message_id,\n \"chat_id\": update.effective_chat.id,\n \"answers\": 0,\n }\n }\n context.bot_data.update(payload)", "def receive_poll(self, update: Update, context: CallbackContext) -> None:\r\n actual_poll = update.effective_message.poll\r\n # Only need to set the question and options, since all other parameters don't matter for\r\n # a closed poll\r\n update.effective_message.reply_poll(\r\n question=actual_poll.question,\r\n options=[o.text for o in actual_poll.options],\r\n # with is_closed true, the poll/quiz is immediately closed\r\n is_closed=True,\r\n reply_markup=ReplyKeyboardRemove(),\r\n )", "def receive_poll(update: Update, context: CallbackContext) -> None:\n actual_poll = update.effective_message.poll\n # Only need to set the question and options, since all other parameters don't matter for\n # a closed poll\n update.effective_message.reply_poll(\n question=actual_poll.question,\n options=[o.text for o in actual_poll.options],\n # with is_closed true, the poll/quiz is immediately closed\n is_closed=True,\n reply_markup=ReplyKeyboardRemove(),\n )", "def vote(self, part_key, choice):\n part_data = self.get_participant(part_key)\n poll_key = part_data['poll']\n poll_data = self.get_poll(poll_key)\n num_choices = len(poll_data['choices'])\n if(choice not in range(num_choices)):\n raise Exception('Invalid choice value ' + choice +\n ' provided to model.vote()')\n part_data['choice'] = choice\n part_data['voted'] = True\n self.set_participant(part_key, part_data)\n # TODO: Remove the following log notification\n print ('Participant ' + part_data['email'] + ' voted for ' +\n poll_data['choices'][part_data['choice']] + '.')\n return part_data", "def vote_for_poll(request, question_id):\n question = Question.objects.get(pk=question_id)\n if not(q.can_vote()):\n messages.error(request, \"poll expires\")\n return redirect('polls:index')\n return render(request, \"polls/details.html\", {\"question\": question})", "async def vote_setup(ctx: commands.Context):\n session = session_maker()\n old_channel = session.query(Channel).filter_by(channel_id=ctx.channel.id).one_or_none()\n if old_channel is not None:\n await ctx.send('This channel is already setup.')\n return\n channel = Channel(server_id=ctx.guild.id, channel_id=ctx.channel.id)\n session.add(channel)\n session.commit()\n await ctx.send(f'{ctx.channel} set up for voting!')", "def __init__(self, poll, question, options, total_votes, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.poll = poll\n self.question = question\n self.options = options\n self.total_votes = total_votes", "async def vps(self, ctx):\n await ctx.send(\"https://discordapp.com/channels/566451504332931073/566451504903618561/662484243808780309\")", "def cmd_comment_vote(client, args):\n comment_vote = client.comment_vote(args.comment_id, args.vote)\n generate_output({'comment_vote': comment_vote})", "async def vote(ctx: commands.Context):\n await ctx.send(\"this isn't a command\")", "async def letVote(self, ctx):\n timeLeft = 300\n msgStart = await self.textChannel.send(\n self.roleForPlayer.mention + \" \\nDès maintenant les votes sont pris en compte. Votez parmis :```\" +\n \"``````\".join(\n self.getMembersName()) + \"```en écrivant un des pseudos ci-dessus en **_message privé_**.\\nÉvitez\"\n \" de trop spammer si vous ne voulez pas que le décompte soit trop \"\n \"long.\\nN'oubliez pas que vous ne pouvez pas voter pour vous même.\\n\"\n \"Vous avez \" + str(timeLeft) + \" secondes pour voter.\")\n for player in self.playersAndRoles:\n await player.user.send(\"Votez ici parmis :```\" + \"``````\".join(player.getMembersName()) +\n \"```Seul le dernier pseudo valide sera pris en compte.\")\n\n await asyncio.sleep(timeLeft - 30)\n await self.textChannel.send(\"Plus que 30s.\")\n await asyncio.sleep(30)\n msgEnd = await self.textChannel.send(\"Le décompte est terminé, obtention des votes ...\")\n votes = await self.getVote(msgStart=msgStart, msgEnd=msgEnd)\n await self.applyVote(votes=votes)\n await self.displayCourseOfTheGame()\n\n await self.textChannel.send(\"Fin de la partie. Suppression du channel dans 2 minutes.\")\n await asyncio.sleep(120)\n await self.endGame(ctx=ctx)", "async def votechannel_list(self, ctx):\n channels = await self.bot.db.execute(\n \"\"\"\n SELECT channel_id, voting_type FROM voting_channel WHERE guild_id = %s\n \"\"\",\n ctx.guild.id,\n )\n if not channels:\n raise exceptions.Info(\"There are no voting channels on this server yet!\")\n\n rows = []\n for channel_id, voting_type in channels:\n rows.append(f\"<#{channel_id}> - `{voting_type}`\")\n\n content = discord.Embed(\n title=f\":1234: Voting channels in {ctx.guild.name}\", color=int(\"3b88c3\", 16)\n )\n await util.send_as_pages(ctx, content, rows)", "def poll(self, poll_input):", "def upvote_comment():\n x = upvoteBot.main(number=1, submissionid='7xbrcw')\n return x", "async def vote_clear(ctx: commands.Context):\n session = session_maker()\n old_channel = session.query(Channel).filter_by(channel_id=ctx.channel.id).one_or_none()\n if old_channel is None:\n await ctx.send('This channel was never setup for votes.')\n return\n old_votes = session.query(Vote).filter_by(channel_id=ctx.channel.id).all()\n for old_vote in old_votes:\n session.delete(old_vote)\n session.commit()\n await ctx.send(f'Votes for {ctx.channel} cleared!')", "async def channel(self, ctx):\n pass", "async def _msgvote_off(self, ctx):\n\n channel = ctx.message.channel\n if channel.id not in self.settings[\"channels_enabled\"]:\n await self.bot.say(\"Msgvote mode is already off in this channel.\")\n else:\n self.settings[\"channels_enabled\"].remove(channel.id)\n dataIO.save_json(self.settings_path, self.settings)\n await self.bot.say(\"Msgvote mode is now off in this channel.\")", "def abstimmungen(self, irc, msg, args):\n if self._is_voting_enabled(irc, msg, reply=True):\n channel = msg.args[0]\n users = irc.state.channels[channel].users\n voting_timeout = int(self.registryValue(\"voting_timeout\"))\n\n votes = []\n for voting_id in self.running_votes:\n voting = self.running_votes[voting_id]\n votes.append(\"[ Abstimmung gegen %s (%d von %d Stimmen) noch %d Sekunden ]\" % (\n voting.target,\n voting.count_votes(users),\n voting.threshold,\n voting.remaining_time(voting_timeout)))\n if votes:\n irc.reply(\", \".join(votes))\n else:\n irc.reply(\"Momentan laufen keine Abstimmungen.\")", "async def vote_unsetup(ctx: commands.Context):\n session = session_maker()\n old_channel = session.query(Channel).filter_by(channel_id=ctx.channel.id).one_or_none()\n if old_channel is None:\n await ctx.send('This channel was never setup for votes.')\n return\n session.delete(old_channel)\n session.commit()\n await vote_clear(ctx)\n await ctx.send(f'{ctx.channel} no longer open for voting.')", "def update_vote(self, vote):\n enemy = Enemy(vote.target, history={}).update_hostility(hostility=4, message=vote)\n self.update_enemy(enemy)", "def slot_poll(self, _sender, _data):\r\n if self.client.secret and self.client.secret.know_secret():\r\n # poll recent own trades\r\n # fixme: how do i do this, whats the api for this?\r\n pass", "def up_vote(cls, user, message):\r\n pass", "def do_initiate(bot, msg, **kwargs):\n channel = kwargs.get('event').get('channel')\n instructions = textwrap.dedent(\n '''\n :cop:I am *{name}*, your election police.\n\n \n :grey_question:*How to Vote:*\n Voting in here is simple. Each candidate's profile is listed with a white-on-green checkmark beneath their profile. All you have to do is *click the checkmark once* for your preferred candidate.\n\n\n :warning:*Rules*:\n 1. *Only your first vote counts*. Regardless of the count on checkmark, only your first vote is valid and recorded. Subsequent votes or attemps to remove already cast ballots would be ignored.\n\n 2. *Do not try to post any messages in this channel* as such messages would be deleted immediately.\n\n Now...\n > _Be Nice, Be Respectful, Be Civil_ :simple_smile:\n\n\n I will now list the candidates. Happy Voting :simple_smile:\n > One more thing: _You can vote for yourself._\n\n '''.format(name=bot.username)\n )\n\n # Clear channel\n bot.clear_channel(channel)\n \n print 'Begin Inviting...'\n if 'DEBUG' in dir(bot.config) or 'TESTING' in dir(bot.config):\n print 'test invites'\n # for userid in bot.masters.values():\n # bot.invite_user_to_channel(channel, userid)\n else:\n for member in bot.team_members:\n bot.invite_user_to_channel(channel, member.get('id'))\n print 'End Inviting...'\n\n # Set channel topic\n bot.set_channel_topic(bot.stats.get(channel).get('topic'), channel)\n # Show instructions\n instruction_response = bot.post_msg(text=instructions, channel_name_or_id=channel)\n # Set channel purpose\n bot.set_channel_purpose(bot.stats.get(channel).get('purpose'), channel)\n # Pin message to channel\n bot.pin_msg_to_channel(channel, instruction_response.get('ts'))\n\n help_response = do_help(bot, **kwargs)\n bot.pin_msg_to_channel(channel, help_response.get('ts'))\n\n # Add candidates for this office\n for userid, data in bot.stats.get(channel).get('candidates').iteritems():\n bot.add_candidate(userid, channel)\n bot.vote_for(userid, channel)\n #bot.update_live_stats(channel)\n\n live_stats = bot.get_stats(channel)\n if live_stats is not None:\n response = bot.post_msg(\n text=live_stats,\n channel_name_or_id=channel\n )\n bot.stats.get(channel)['live_ts'] = response.get('ts')\n bot.db.session.query(bot.db.Office).filter_by(channel=channel).first().live_ts=response.get('ts')\n\n response = bot.post_msg(\n text='*NO ONGOING ELECTIONS IN THIS CHANNEL*',\n channel_name_or_id=channel\n )\n bot.stats.get(channel)['election_status_ts'] = response.get('ts')\n bot.db.session.query(bot.db.Office).filter_by(channel=channel).first().election_status_ts=response.get('ts')\n bot.stats.get(channel)['election_status'] = False\n bot.db.session.query(bot.db.Office).filter_by(channel=channel).first().election_status= False\n bot.db.session.commit()\n\n bot.log_msg('Channel{} prepared for voting.'.format(channel), channel)\n \n return True\n #return Response(bot.about)" ]
[ "0.6393847", "0.6147241", "0.59954494", "0.5947859", "0.5874632", "0.5840181", "0.5776968", "0.5758922", "0.5722969", "0.57202333", "0.5576358", "0.5565325", "0.5534625", "0.548925", "0.54667646", "0.5447865", "0.539419", "0.53278154", "0.53092563", "0.53039765", "0.528682", "0.5217865", "0.5210522", "0.51699525", "0.5149859", "0.51340574", "0.5121524", "0.5089862", "0.50854814", "0.5067571" ]
0.6149552
1
List waiters within the given configuration.
def ListWaiters(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def waiters(self):\n waiters = []\n\n for name, item in self._definition.get('waiters', {}).items():\n name = self._get_name('waiter', Waiter.PREFIX + name)\n waiters.append(Waiter(name, item))\n\n return waiters", "def get_list_of_configlets(configlets):\n\n futures_list = []\n results = []\n\n with ThreadPoolExecutor(max_workers=40) as executor:\n for configlet in configlets:\n futures = executor.submit(clnt.api.get_configlet_by_name, configlet)\n futures_list.append(futures)\n\n for future in futures_list:\n try:\n result = future.result(timeout=60)\n results.append(result)\n except Exception:\n results.append(None)\n return results", "def cronWaitingList(self, date):\n match = {\"task_type\": \"crontab\", \"task_day\": date, \"status\": \"waiting\"}\n l = []\n for doc in self.search(match):\n l.append(doc)\n return l", "def get_waiting_jobs(self):\n return []", "def list():\n # Calling config file\n cf = config.ReadFile(config_file)\n user = cf[\"authentication\"][\"user\"]\n\n l = []\n for job in cron:\n l.append(job)\n return l", "def watch_list(self) -> list:\n return []", "def create_checkers(config):\n\n checkers = []\n if 'checkers' in config:\n for checker_name, checker_config in config['checkers'].iteritems():\n if checker_name in __checkers:\n configs = None\n if type(checker_config) == list:\n configs = checker_config\n else:\n configs = [checker_config]\n for config in configs:\n ch = __checkers[checker_name]()\n ch.set_config(config)\n if ch:\n checkers.append(ch)\n return checkers", "def waitables(self):\n return (), (), ()", "def get_config(config):\n return ['-deadlock'] if 'ignore deadlock' in config else []", "def list(self):\n self.background_scheduler.print_jobs()", "def _ls_waiting_jobs(self):\n \n jobs = [j for j in os.listdir(pjoin(self._jobsdir, \"00_waiting\")) if j.endswith(self._job_ext)]\n \n if self._job_filter:\n jobs = [j for j in jobs if self._job_filter(pjoin(self._jobsdir, \"00_waiting\", j), j)]\n \n return jobs", "def wait_on_cluster_conditions(cluster, waiters):\n results = []\n start = datetime.datetime.now()\n while waiters:\n new_waiters = []\n for waiter in waiters:\n type = waiter.get(\"type\")\n name = waiter.get(\"name\")\n timeout = waiter.get(\"timeout\", 1800) # 30 minutes\n expiry = waiter.get(\"expiry\")\n namespace = waiter.get(\"namespace\", \"default\")\n if timeout:\n if not expiry:\n waiter[\"expiry\"] = start + \\\n datetime.timedelta(seconds=timeout)\n if datetime.datetime.now() > waiter[\"expiry\"]:\n waiters = []\n waiter.pop('expiry')\n return None, f\"Waiter: {waiter} expired on cluster: {cluster.id}\" # noqa\n if type == \"ingress\":\n ingress = cluster.ctl.get_ingress(\n name=name, namespace=namespace)\n ips = ingress.get(\"ips\")\n hostnames = ingress.get(\"hostnames\")\n if ips or hostnames:\n waiter.update({\"result\": ingress})\n waiter.pop(\"expiry\", None)\n results.append(waiter)\n else:\n new_waiters.append(waiter)\n waiters = new_waiters\n sleep(5)\n return results, None", "def get_binners(config):\n binners = []\n if config[\"binning\"][\"metabat\"]:\n binners.append(\"metabat\")\n if config[\"binning\"][\"concoct\"]:\n binners.append(\"concoct\")\n if config[\"binning\"][\"maxbin\"]:\n binners.append(\"maxbin\")\n return binners", "def wait_for_workers(self):\r\n stop = False\r\n workers = self.aggregator.get_participants()\r\n\r\n while not stop: \r\n try:\r\n with self.aggregator:\r\n resp = self.aggregator.receive(1)\r\n participant = resp.notification['participant']\r\n workers.append(participant)\r\n print('Task %s: participant %s has joined' % (self.task_name, participant))\r\n except Exception as err:\r\n print(\"Task %s: joined %d participants out of %d\" % (self.task_name, len(workers), self.Nworkers))\r\n #print(err)\r\n #print('Check here: error')\r\n #import code\r\n #code.interact(local=locals())\r\n pass\r\n\r\n if len(workers) == self.Nworkers:\r\n stop = True\r\n\r\n workers = self.aggregator.get_participants()\r\n return list(workers.keys())", "def list(self, config_path: str, results_filter: Optional[ObjectType]) -> List[str]:\n ...", "def list(self):\n for item in self._config:\n item.list()", "def PingWorkers(config, wait_time = None):\n if wait_time:\n wait_time = int(wait_time)\n if not config.HasCommandChannels():\n raise ConfigException(\"No URL found for sending command messages. Update \"\n \"your cluster configuration.\")\n for node in Worker.PingWorkers(config.command_sender,\n config.command_response_receiver, wait_time):\n print \" \".join(map(str, node))", "def waiting_clients(self):\n return self.storage.iterkeys()", "def list_configurations(configurationType=None, filters=None, maxResults=None, nextToken=None, orderBy=None):\n pass", "async def list_tasks():", "def _config_list(res, ctx):\n\n if _has_error_code(res):\n return print_errors(res, ctx)\n\n lines = []\n for config in res['configs']:\n line = '* ' if config['current'] else ' '\n\n if ctx.verbose:\n line += config['mtime'] + ' '\n\n line += config['name']\n lines.append(line)\n\n return \"\\n\".join(lines)", "def download_listings(self):\n ExecConfigMethod(\n self.api_session, channel_id=self.channel_id, source=self.source,\n property_name='DownloadListings', function_name='DownloadListings')", "def watch_deployment_config_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_deployment_config_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/watch/deploymentconfigs'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def list_configurations(MaxResults=None, NextToken=None):\n pass", "def _get_monitor_tasks(self, desired_config):\n create_monitors = list()\n delete_monitors = list()\n update_monitors = list()\n\n for hm_type in ['http', 'https', 'tcp', 'icmp', 'udp']:\n existing = self._bigip.get_monitors(hm_type)\n config_key = \"{}_monitors\".format(hm_type)\n desired = desired_config.get(config_key, dict())\n\n (create_hm, update_hm, delete_hm) = (\n self._get_resource_tasks(existing, desired)[0:3])\n\n create_monitors += create_hm\n update_monitors += update_hm\n delete_monitors += delete_hm\n\n return (create_monitors, update_monitors, delete_monitors)", "def cmd_list_resources(config=DEFAULT_LINUX_PATH):\n config = load_config_file(expand_config_path(config))\n px = connection_proxmox(config[\"proxmox\"])\n try:\n if config[\"pools\"]:\n l, h = list_resources(px, config[\"pools\"])\n return tabulate(l, h)\n else:\n print(\"Dick 'pools' is empty\")\n except KeyError:\n print(\"Missing 'pools' dict in config file\")\n sys.exit(1)", "def wait_children(self, timeout=None):\n self.join_children(timeout=timeout)\n return [x.get() for x in self.children]", "def get_job_listings(self):\r\n\r\n for attempt in range(5):\r\n try:\r\n job_listings = WebDriverWait(self.driver, 8).until(\r\n EC.presence_of_all_elements_located((By.XPATH, '//li[@class=\"jobs-search-results__list-item occludable-update p0 relative ember-view\"]')))\r\n except Exception as e:\r\n print('An error occurred: ', e)\r\n driver.refresh()\r\n else:\r\n job_results = self.driver.find_element_by_xpath('//small[@class=\"display-flex t-12 t-black--light t-normal\"]')\r\n job_results_num = str(job_results.text).split()[0].replace(',', '')\r\n first_page_url = self.driver.current_url\r\n\r\n for job in job_listings:\r\n self.driver.implicitly_wait(5)\r\n mouse = ActionChains(self.driver).move_to_element(job)\r\n mouse.perform()\r\n self.apply_to_job(job)\r\n\r\n if int(job_results_num) > 24:\r\n time.sleep(2)\r\n all_pages = self.driver.find_element_by_xpath('//li[@class=\"artdeco-pagination__indicator artdeco-pagination__indicator--number ember-view\"]')\r\n last_page = all_pages[len(all_pages)-1].text\r\n\r\n last_page_int = int(re.sub(r'[^/d]', '', last_page)) # Replace any character except the blank space with \"\"\r\n get_last_page = self.driver.find_element_by_xpath(\"//button[@aria-label='Page \"+str(total_pages_int)+\"']\")\r\n get_last_page.send_keys(Keys.RETURN)\r\n last_page_url = self.driver.current_url\r\n total_jobs = int(last_page.split('start=', 1)[1])\r\n\r\n # Go through all pages and apply\r\n for page in range(25, last_page_int):\r\n self.driver.get(first_page_url + '&start=' + str(page))\r\n time.sleep(3)\r\n for attempt in range(5):\r\n try:\r\n new_job_listings = WebDriverWait(self.driver, 8).until(\r\n EC.presence_of_all_elements_located((By.XPATH, '//li[@class=\"jobs-search-results__list-item occludable-update p0 relative ember-view\"]')))\r\n except Exception as e:\r\n print('An error occurred: ', e)\r\n driver.refresh()\r\n else:\r\n for new_job in new_job_listings:\r\n self.driver.implicitly_wait(5)\r\n mouse_new = ActionChains(self.driver).move_to_element(new_job)\r\n mouse_new.perform()\r\n self.apply_to_job(new_job)\r\n else:\r\n print('You have applied to all jobs available. Closing program...')\r\n time.sleep(3)\r\n self.driver.quit()", "def test_list_config_nodes(self):\n with self.override_role():\n self.config_client.list_config_nodes()", "def list_notebook_instance_lifecycle_configs(NextToken=None, MaxResults=None, SortBy=None, SortOrder=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None, LastModifiedTimeBefore=None, LastModifiedTimeAfter=None):\n pass" ]
[ "0.71054786", "0.6426477", "0.586981", "0.57586163", "0.5726431", "0.5626258", "0.55491465", "0.5529439", "0.5473015", "0.54290795", "0.5414866", "0.53946763", "0.53753406", "0.536865", "0.5325333", "0.5261703", "0.5254371", "0.5234681", "0.52307975", "0.51577234", "0.5136059", "0.51083416", "0.5097008", "0.5076341", "0.5075224", "0.50708354", "0.5069733", "0.5036469", "0.5027848", "0.5006232" ]
0.6771325
1
Save seed into temp file.
def saveseed(self, seed): savefile = gettempdir() + '/last_test_seed_fate.tmp' if args.verbose: print('Saving run into ' + savefile) with open(savefile, 'w') as f: f.write(str(seed))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_seed(self, seed: np.ndarray):\n print(\"Reconstructed trace saved as seed to \", CONFIG_DIR)\n np.savetxt(CONFIG_DIR / self.name_seed, seed.view(float).reshape(-1, 2))", "def insert_test( hash, random, seq ):\n try:\n with open(os.path.join(SEED_DIRECTORY, \"%s_%s\" % (hash, 0)), \"w+\") as f:\n record_used('seeds', hash)\n pickle.dump({'hash': hash, 'random': random, 'seq': seq }, f)\n except IOError:\n if not os.environ.get('CALIENDO_TEST_SUITE', None):\n logger.warning( \"Failed to open %s\" % hash)", "def local_seed(self) -> str:\n assert self.definition.settings.sp_root_dir\n seed_file = self.definition.settings.sp_root_dir.joinpath(\"seed.txt\")\n if not seed_file.exists():\n seed = str(encode_hex(bytes(random.randint(0, 255) for _ in range(20))))\n seed_file.write_text(seed)\n else:\n seed = seed_file.read_text().strip()\n return seed", "def seed():", "async def save(self, job, options=None):\n if options is None:\n options = {}\n\n if not options.get('secretseed'):\n bundle = False\n filename = '/data/freenas-v1.db'\n else:\n bundle = True\n filename = tempfile.mkstemp()[1]\n os.chmod(filename, 0o600)\n with tarfile.open(filename, 'w') as tar:\n tar.add('/data/freenas-v1.db', arcname='freenas-v1.db')\n tar.add('/data/pwenc_secret', arcname='pwenc_secret')\n\n def read_write():\n with open(filename, 'rb') as f:\n f2 = os.fdopen(job.write_fd, 'wb')\n while True:\n read = f.read(1024)\n if read == b'':\n break\n f2.write(read)\n f2.close()\n await self.middleware.run_in_thread(read_write)\n\n if bundle:\n os.remove(filename)", "def save(self, filename='test'):\n file = open(filename+'.txt','w')\n pickle.dump(self, file)\n file.close()", "def make_temp_file():\n global TEST_DATA_PATH\n TEST_DATA_PATH = tempfile.mkstemp()", "def save_tmp_file(self, data):\n with open(self.tmp_file, 'wb') as f:\n f.write(data)", "def _save(self):\n\t\t\n\t\tdirectory = self.Output_path\n\n\t\t# replace with \n\t\t# file_name = hermes.mk_themis_file_name(themis_obj = self)\n\t\tfile_name = f'Themis_{self.CELL_ID[\"experiment\"]}_u{self.CELL_ID[\"unit\"]}_c{self.CELL_ID[\"cell\"]}_r{self.CELL_ID[\"run\"]}.pkl'\n\n\t\tsave_path = directory / file_name\n\n\t\t# Atomic saving (helpful?)\n\t\ttemp_path = save_path.with_suffix(save_path.suffix + '.tmp')\n\t\t\n\t\tself.SavePath = save_path\n\n\t\t\n\t\twith open(temp_path, 'wb') as f:\n\t\t\tpickle.dump(self, f)\n\n\t\ttemp_path.rename(save_path)\n\n\t\tprint(f'Saved {self.RUN_KEY} as {save_path}')", "def save(self):\n\t\tself.CONFIG.save()\n\t\tself.temp_files.save()", "def save(self):\n\n pattern = '{}_{}_{}ep.pt' if self.checkpoint_filename_pattern is None else self.checkpoint_filename_pattern\n filename = pattern.format('sherlock1', time.strftime(\"%Y-%m-%d_%H-%M-%S\"),\n self.monitors['loss_train'].num_epochs)\n full_filename = self.full_path(filename)\n c = {\n 'state_dict': self.net.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'monitors': self.monitors,\n 'parent': self.parent,\n 'args': vars(args) # convert args to dict\n }\n torch.save(c, full_filename)\n if not args.tuning and args.delete and self.last_checkpoint is not None:\n os.remove(self.last_checkpoint)\n self.last_checkpoint = full_filename\n return filename", "def temporary(cls):\n fh, path = tempfile.mkstemp(suffix='.hdf5')\n os.close(fh)\n self = cls(path, 'w')\n self.path = path\n return self", "def persist(self, filepath):\n joblib.dump('hello-steppy', filepath)", "def seed():\n pass", "def seed():\n pass", "def save(self):\n if self.hasChanged:\n filePath = self.path\n tempPath = filePath+'.tmp'\n fileDir = os.path.split(filePath)[0]\n if not os.path.exists(fileDir): os.makedirs(fileDir)\n cPickle.dump(self.data,open(tempPath,'w'))\n renameFile(tempPath,filePath,True)\n self.hasChanged = False", "def save_checkpoint(self, filename='checkpoint.pth'):\n torch.save(self.state_dict(), filename)", "def _save(self, filename = str(int(time()))):\n if filename:\n with open(filename, 'w') as f:\n f.write('null')\n self.prompt_time = 0\n exit()", "def save(self, path):\n individual = self.population.fittest_individual()\n order = [int(l) for l in individual.label_order]\n fitness = individual.fitness\n data = {'name': self.ds.name,\n 'num_labels': len(order),\n 'order': order,\n 'fitness': fitness\n }\n with open(path, 'w') as f:\n json.dump(data, f)", "def gen_int(filename):\n random.seed()\n random.randint(-100,100)\n with open(filename, \"w\") as f:\n for i in range(1000):\n f.write(str(random.randint(-100,100)))\n f.write(\" \")\n # f.write(\"hello\")", "def temp_dump(self, session_id):\n f = open(pathlib.Path(basedir).joinpath('static', 'temp', session_id, 'hero_pickle_storage.json'), 'w')\n stored_info = jsonpickle.encode(self)\n f.write(stored_info)\n f.close()", "def saveCheckpoint(self):\n time_stamp = time.strftime('%Y%m%d%H%M%S', time.gmtime())\n state_filename = os.path.join(self.saving_dir, 'checkpoint.' + time_stamp + '.pth.tar')\n mem_filename = os.path.join(self.saving_dir, 'memory.' + time_stamp + '.pth.tar')\n state = self.getSavingState()\n memory = {\n 'memory': self.memory\n }\n torch.save(state, state_filename)\n torch.save(memory, mem_filename)", "def save(self):\n memento = self.create_memento()\n import datetime\n f = open(str(datetime.datetime.now()).replace(' ','_')+'.saved_story','w')\n cPickle.dump(memento,f)\n f.close()\n zcanvas.message(\"Saved!\")", "def save(self, filename=\"fitter.pickle\"):\n\n with open(filename, \"wb\") as outfile:\n pickle.dump(self, outfile)", "def _generate_to_tempfile(self, generator):\r\n (output_fd, output_path) = tempfile.mkstemp()\r\n with os.fdopen(output_fd, 'w') as output:\r\n generator.write(output)\r\n return output_path", "def dump_to_tmpfile(obj):\n\n import tempfile\n\n fname = tempfile.mktemp()\n with open(fname, \"w\") as txtfile:\n txtfile.write(str(obj))\n\n print(\"str(obj) was written to {}\".format(fname))\n\n return fname", "def save_checkpoint(state, filename):\n print (\"=> Saving a new best\")\n torch.save(state, filename) # save checkpoint", "def save_training(self):\n\n filename = str(hashlib.sha1(str(self.training_data).encode(\"utf-8\"))\n .hexdigest())\n path = \"./training/\" + filename + \".json\"\n\n data = {\n \"states\": self.states,\n \"transitions\": self.transitions,\n \"matrix\": self.matrix.tolist()\n }\n\n with open(path, \"w\") as outfile:\n json.dump(data, outfile)", "def update_seed_parameters(parameters, samples):\n\n with open(\"../../output/seed.tmp\", \"w\") as f:\n f.write(f\"{parameters[0]+parameters[1]}\\n\")\n f.write(f\"{samples}\")", "def test_to_file(self):\n fd, fp = mkstemp()\n close(fd)\n st = SampleTemplate.create(self.metadata, self.new_study)\n st.to_file(fp)\n self._clean_up_files.append(fp)\n with open(fp, 'U') as f:\n obs = f.read()\n self.assertEqual(obs, EXP_SAMPLE_TEMPLATE)\n\n fd, fp = mkstemp()\n close(fd)\n st.to_file(fp, {'2.Sample1', '2.Sample3'})\n self._clean_up_files.append(fp)\n\n with open(fp, 'U') as f:\n obs = f.read()\n self.assertEqual(obs, EXP_SAMPLE_TEMPLATE_FEWER_SAMPLES)" ]
[ "0.71522933", "0.6273968", "0.62532884", "0.6251843", "0.604025", "0.60345274", "0.60236645", "0.6002649", "0.5998057", "0.5997992", "0.5983726", "0.5970545", "0.5964728", "0.5925001", "0.5925001", "0.5902035", "0.5890514", "0.58809185", "0.5874178", "0.58705467", "0.5869121", "0.58676714", "0.58513814", "0.5847211", "0.5844433", "0.5839203", "0.58270246", "0.58026654", "0.5786167", "0.577891" ]
0.8186387
0
Returns an HTML script element for including a script from the admin media url (or other location if an absolute url is given).
def include_admin_script(script_path): if not absolute_url_re.match(script_path): script_path = '%s%s' % (settings.ADMIN_MEDIA_PREFIX, script_path) return '<script type="text/javascript" src="%s"></script>' % script_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resource_js(self):\n \n portal_url = getSite().absolute_url()\n \n return \"\"\"\n <script type=\"text/javascript\" src=\"%s/++resource++swfobject.js\"></script>\n <script type=\"text/javascript\" src=\"%s/++resource++audio_player.js\"></script> \n <script type=\"text/javascript\"> \n AudioPlayer.setup(\"%s/++resource++audio_player.swf\", { \n width: 300\n }); \n </script>\n \"\"\" % (portal_url, portal_url, portal_url)", "def propeller_javascript_url():\n return javascript_url()", "def get_vendor_js():\n return (\"://plotly-load_from_python.js\",)", "def replacement(self):\n assert (self.src or self.inline) and not (self.src and self.inline)\n if self.src:\n return '<script async type=\"text/javascript\" src=\"%s\"></script>' % urllib.quote(self.src)\n else:\n return '<script>\\n%s\\n</script>' % self.inline", "def load_script(browser, url):\r\n if browser.current_url.startswith('file:'):\r\n url = 'https:' + url\r\n browser.execute_script(\"\"\"\r\n var script_tag = document.createElement(\"script\");\r\n script_tag.setAttribute(\"type\", \"text/javascript\");\r\n script_tag.setAttribute(\"src\", arguments[0]);\r\n document.getElementsByTagName(\"head\")[0].appendChild(script_tag);\r\n \"\"\", url)", "def _load_snippet(filename) -> str:\n fullpath = f'{dirname(__file__)}/js/{filename}'\n file = open(fullpath, 'r')\n script = file.read()\n file.close()\n return script", "def inline_javascript(html_src, path=None):\n javascript_re = re.compile(\"\\<script src\\=\\\"([0-9a-zA-Z./]+)\\\"\\>\\</script>\")\n\n def fetch_jssource(in_match):\n rel_path = in_match.group(1)\n jspath = os.path.join(path, rel_path)\n return \"<script>\\n{0}\\n</script>\".format(open(jspath, 'r').read())\n\n return javascript_re.sub(fetch_jssource, html_src)", "def audio_file_player(self):\n if self.audio_file:\n file_url = settings.MEDIA_URL + str(self.content)\n player_string = '<audio src=\"%s\" controls>Your browser does not support the audio element.</audio>' % (file_url)\n return player_string", "def amp_url(self):\n return self.url.child(\"amp\")", "def get_media_js(self):\n media_js = uniquify_sequence(self.media_js + self.plugin_media_js)\n\n return media_js", "def get_embed_url(self):\n return self.embed_url", "def get_embed_url(self):\n if not self.get_video_id() or not self.get_username():\n return ''\n \n return 'http://cdn.livestream.com/embed/%s?layout=4&amp;clip=%s' % (self.get_username(), self.get_video_id())", "def topcoat_icons_script_tag():\n return u'<script type=\"text/javascript src=\"%s\"></script>' % topcoat_icons_script_url()", "def get_embed_url(self):\n if not self.get_video_id():\n return ''\n \n if self.get_video_id() == -1:\n return self.original_url\n \n return 'https://www.slideshare.net/slideshow/embed_code/%s' % self.get_video_id()", "def get_url(self,urldata):\n return \"%s?%s\" % (self.script_url, urllib.urlencode(urldata,1))", "def get_vendor_js(cls):\n return (\n vendor_static_dependencies[\"cesiumjs\"].get_custom_version_url(\n url_type=\"js\", version=cls.cesium_version\n ),\n )", "def getBaseURL():\n return getQualifiedURL(getScriptname())", "def get_embed_url(self):\n if not self.original_url:\n return ''\n \n return 'https://vine.co/v/%s/embed/simple' % (self.get_video_id())", "def js(filepath):\n return static_file(filepath, root=\"public\")", "def _get_scripts_resource(pe):\n return next(\n (\n entry.directory.entries[0].directory.entries[0]\n for entry in pe.DIRECTORY_ENTRY_RESOURCE.entries\n if entry.name and entry.name.string == b\"PYTHONSCRIPT\"\n ),\n None,\n )", "def url(self, url):\n prefix = self.request_local.environ['toscawidgets.prefix']\n script_name = self.request_local.environ['SCRIPT_NAME']\n if hasattr(url, 'url_mapping'):\n url = url.url_mapping['normal']\n return ''.join([script_name, prefix, url])", "def core_cdn_file(request, source):\n\n file_path = settings.CENTIPAIR_TEMPLATE_DIR + \"/cdn/\" + source\n source_file_url = settings.TEMPLATE_STATIC_URL + \"/\" + file_path\n return source_file_url", "def get_url(self):\n if not self.get_video_id():\n return ''\n \n if self.get_video_id() == -1:\n return self.original_url\n \n return 'http://www.slideshare.net/slideshow/embed_code/%s' % self.get_video_id()", "def driver(self):\n return '<static-vmedia>'", "def third_party_scripts(request):\n return {\n 'ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE':\n settings.ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE\n }", "def mediaplayer(src,width=400,height=250):\n return XML('<embed allowfullscreen=\"true\" allowscriptaccess=\"always\" flashvars=\"height=%(height)s&width=%(width)s&file=%(src)s\" height=\"%(height)spx\" src=\"%(url)s\" width=\"%(width)spx\"></embed>'%dict(url=URL('static','plugin_wiki/mediaplayer.swf'),src=src,width=width,height=height))", "def client_plugin_source(self, language):\n\n static = self.static\n if static is None:\n return None\n\n filename = os.path.join(static, \"main.\" + language)\n realfilename = os.path.realpath(filename)\n\n if not realfilename.startswith(self.static + '/'): # pragma: no cover\n raise ValueError(\"Invalid language `%s`\" % language)\n\n if not os.path.isfile(realfilename):\n return None\n\n return realfilename", "def bootstrap_javascript_url():\n return javascript_url()", "def get_embed_url(self):\n raise NotImplementedError(\"Subclass must implement abstract method get_embed_url\")", "def render_external(plugin, **kwargs):\n\n html = oembed_html(plugin.url)\n if 'youtube.com' in html:\n return mark_safe(\n '<div class=\"flex-video widescreen\">{}</div>'.format(html))\n if 'vimeo.com' in html:\n return mark_safe(\n '<div class=\"flex-video widescreen vimeo\">{}</div>'.format(html))\n return mark_safe(html)" ]
[ "0.5999079", "0.56655055", "0.5653992", "0.56134677", "0.5572652", "0.5382267", "0.5336885", "0.5328502", "0.53220886", "0.53072685", "0.5276165", "0.52621883", "0.5245122", "0.5236014", "0.523167", "0.5201591", "0.51624304", "0.51335704", "0.51318634", "0.51224566", "0.5115419", "0.5115169", "0.5107584", "0.51004356", "0.5084249", "0.50680596", "0.5040471", "0.5032476", "0.5027374", "0.5023635" ]
0.7652485
0
The index view, for the home page. Shows Campaigns this UserProfile is in.
def index(request): context = dict() if request.user.is_authenticated(): context['campaigns'] = [ CampaignSerializer(c).serialize() for c in request.user.userprofile.campaigns.order_by('pk')] return render(request, 'voter_validation/index.html', context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index(request): \n \n all_projects = models.Project.objects.all()\n projects = get_objects_for_user(request.user, 'view_project', all_projects)\n \n fbads_settings = FacebookAdsSettings.objects.first()\n return render_to_response('index.html',{\n 'projects': projects, \n 'fbads_settings': fbads_settings},\n context_instance=RequestContext(request))", "def index(request):\n\n LOGGER.debug('socialprofile_demo.views.index')\n\n response_data = {}\n\n return render_to_response('index.html', response_data, context_instance=RequestContext(request))", "def index(request):\n return render(request, 'commissioned_sites/index.html',\n {'sites': CommissionedSite.objects.all().order_by('-date')},\n context_instance=RequestContext(request))", "def index(page):\r\n per_page = 24\r\n count = cached_users.get_total_users()\r\n accounts = cached_users.get_users_page(page, per_page)\r\n if not accounts and page != 1:\r\n abort(404)\r\n pagination = Pagination(page, per_page, count)\r\n if current_user.is_authenticated():\r\n user_id = current_user.id\r\n else:\r\n user_id = 'anonymous'\r\n top_users = cached_users.get_leaderboard(current_app.config['LEADERBOARD'],\r\n user_id)\r\n return render_template('account/index.html', accounts=accounts,\r\n total=count,\r\n top_users=top_users,\r\n title=\"Community\", pagination=pagination)", "def index(request):\n users = User.objects.filter(is_staff=False, is_active=True).order_by('username')\n return render(request, 'users/view_all_users.html',\n { 'users': users })", "def index():\r\n if current_user.is_authenticated():\r\n user_id = current_user.id\r\n else:\r\n user_id = 'anonymous'\r\n top_users = cached_users.get_leaderboard(current_app.config['LEADERBOARD'],\r\n user_id=user_id)\r\n\r\n return render_template('/stats/index.html', title=\"Community Leaderboard\",\r\n top_users=top_users)", "def index(request):\n home_user = request.user.profile\n \"\"\"num_activities = Activity.objects.count()\"\"\"\n Cactivity = CompletedActivity.objects.filter(user=home_user)\n UActivity = Cactivity.values('activity_id', 'activity__name', 'activity__value', 'activity__group').annotate \\\n (count=Count('activity__name'), earned=Sum('activity__value'))\n TimesCompelted = Cactivity.annotate(count=Count('activity__name'))\n # Generate counts of some of the main objects\n\n context = {\n 'huser': home_user,\n 'Lname' : home_user.user.last_name,\n 'Fname': home_user.user.first_name,\n 'num_activities': 1,\n 'activity_list' : UActivity,\n \"times_completed\" : TimesCompelted\n }\n\n # Render the HTML template index.html with the data in the context variable\n return render(request, 'index.html', context=context)", "def index(request):\n\t# Generate counts of some of the main objects\n\tnum_customers = Customers.objects.all().count()\n\tnum_instructors = Instructors.objects.all().count()\n\tnum_membership_plans = MembershipPlans.objects.count() # The 'all()' is implied by default.\n\n\tselectInstructors = SelectInstructors();\n\tjoinQuery = JoinQuery();\n\taggregationQuery = AggregationQuery();\n\tdivisionQuery = DivisionQuery();\n\tnestedAggregationQuery = NestedAggregationQuery();\n\tdeleteOperationCascade = DeleteOperationCascade();\n\tdeleteOperation = DeleteOperation();\n\tupdateNumberOfPeople = UpdateNumberOfPeople();\n\t# Render the HTML template index.html with the data in the context variable\n\treturn render(\n\t\trequest,\n\t\t'index.html',\n\t\tcontext={'num_membership_plans':num_membership_plans,'num_instructors':num_instructors,'num_customers':num_customers,'select_instructors':selectInstructors,'join_query':joinQuery,'aggregation_query':aggregationQuery,'division_query':divisionQuery,'nested_aggregation_query':nestedAggregationQuery,'delete_operation_cascade':deleteOperationCascade, 'delete_operation': deleteOperation, 'update_number_of_people': updateNumberOfPeople},\n\t)", "def index():\n user, user_id = get_user()\n # Get this user's course with their roles\n my_courses = []\n if user:\n my_courses = user.get_courses()\n # Get all public courses\n public_courses = Course.get_public()\n\n return render_template('courses/index.html',\n user=user,\n my_courses=my_courses,\n public_courses=public_courses)", "def index(request):\n params = get_user_profile_params(request)\n\n competition = Competition.get_active()\n params['top_competition_id'] = competition.id\n params['minify_js'] = settings.MINIFY_JS\n\n params['first_page_text'] = ''\n config = Config.objects.all()\n if config.count() > 0:\n params['first_page_text'] = config[0].first_page_text\n\n #order email test\n #order = Order.objects.get(pk=25)\n #send_order_email(order.email, order, order.items.all)\n\n return render(request, 'base.html', params)", "def index(request, extra_context={}, user=AnonymousUser()):\r\n\r\n # The course selection work is done in courseware.courses.\r\n domain = settings.FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False\r\n # do explicit check, because domain=None is valid\r\n if domain is False:\r\n domain = request.META.get('HTTP_HOST')\r\n\r\n courses = get_courses(user, domain=domain)\r\n courses = sort_by_announcement(courses)\r\n\r\n context = {'courses': courses}\r\n\r\n context.update(extra_context)\r\n return render_to_response('index.html', context)", "def index(request):\n if request.user.is_authenticated:\n return redirect('/dashboard')\n else:\n context = {'client_id': settings.OPENHUMANS_CLIENT_ID,\n 'oh_proj_page': settings.OH_ACTIVITY_PAGE}\n\n return render(request, 'main/index.html', context=context)", "def index():\n user_list = Users.query.all()\n return render_template('users/index.html'\n ,user_list=user_list\n ,t=t\n ,m=m)", "def index(request):\n # Generate counts of some of the main objects\n num_orgs=Organization.objects.count()\n num_contacts=Contact.objects.count()\n # Get the number of Volunteers\n num_volunteers=ContactTypeTag.objects.filter(tag_type__exact='vo').count()\n num_projects=Project.objects.count() # The 'all()' is implied by default.\n \n # Render the HTML template index.html with the data in the context variable\n return render(\n request,\n 'index.html',\n context={\n 'num_orgs':num_orgs,\n 'num_contacts':num_contacts,\n 'num_volunteers':num_volunteers,\n 'num_projects':num_projects,\n },\n )", "def list_campaigns(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def index(request):\n if request.user is None:\n return view_all(request, index_call=True)\n else:\n return mine(request)", "def index(request):\n\n dests = Destination.objects.all()\n\n return render(request,'index.html',{'dests': dests})", "def my_index(request):\n try:\n denied = models.ParticipantStatus.objects.get(codename=models.ParticipantStatus.DENIED)\n except:\n denied = -1\n\n competitions_im_creator_of = Competition.objects.filter(creator=request.user).order_by('-pk').select_related(\n 'creator').annotate(num_participants=Count('participants'))\n competitions_im_admin_of = Competition.objects.exclude(pk__in=[c.pk for c in competitions_im_creator_of]).filter(\n admins__in=[request.user]).order_by('-pk').select_related('creator').annotate(num_participants=Count('participants'))\n my_competitions = list(competitions_im_creator_of) + list(competitions_im_admin_of)\n\n # Invalid select related previously\n published_competitions = models.Competition.objects.filter(published=True).select_related('creator').annotate(num_participants=Count('participants'))\n published_competitions = reversed(sorted(published_competitions, key=lambda c: c.get_start_date))\n context_dict = {\n 'my_competitions': my_competitions,\n # Invalid select related previously\n 'competitions_im_in': list(request.user.participation.all().exclude(status=denied)),\n 'published_competitions': published_competitions,\n }\n return render(request, \"web/my/index.html\", context_dict)", "def contest_won_viewall(request):\n is_loggedin, username = get_session_variables(request)\n contest_list = Contest_won.objects.all()\n\n if contest_list:\t\n contest_participant_list = []\n for contest_won_obj in contest_list:\t\n c_id = contest_won_obj.contest_id\n c_p_objs = Contest_won_participant.objects. \\\n filter(contest_id = c_id)\n contest_participant_list.extend(c_p_objs)\n\n return render_to_response('achievement/contest_viewall.html', \\\n {'is_loggedin':is_loggedin, \\\n 'username':username, \\\n 'contest_list':contest_list, \\\n 'contest_participant_list':contest_participant_list}, \\\n RequestContext(request))\n else:\n return render_to_response('achievement/noview.html', \\\n {'is_loggedin':is_loggedin, \\\n 'username':username, \\\n 'type': 'Contest\\'s won'}, \\\n RequestContext(request))", "def all_accounts(request):\n accounts = Account.objects.all()\n return render(request, 'app/home.html', {'accounts': accounts})", "def get(self):\n query = Campaign.query\n return paginate(Campaign.__tablename__, query, self.schema), HTTPStatus.OK", "def index(request):\n\n context = {'employees': User.objects.select_related('profile').filter(is_staff=True).order_by('first_name')}\n return render(request, 'Employees/index.html', context)", "def all_users(request):\n # order users by last name\n users = UserProfile.objects.all().order_by('last_name')\n return render(request, \"allusers.html\", {'users': users})", "def show_campaigns(request, utm_campaign, **kwargs):\n \n err_msg = ''\n try:\n err_msg = str(kwargs['kwargs']['err_msg'])\n except:\n pass\n \n test_type_override = ''\n try:\n test_type_override = MySQLdb._mysql.escape_string(request.POST['test_type_override'])\n \n if test_type_override == 'Banner':\n test_type_var = FDH._TESTTYPE_BANNER_\n elif test_type_override == 'Landing Page':\n test_type_var = FDH._TESTTYPE_LP_\n elif test_type_override == 'Banner and LP':\n test_type_var = FDH._TESTTYPE_BANNER_LP_\n \n except:\n test_type_var = ''\n pass\n \n try:\n \"\"\" Find the earliest and latest page views for a given campaign \"\"\"\n lptl = DL.LandingPageTableLoader()\n ccrml = DL.CiviCRMLoader()\n \n start_time = ccrml.get_earliest_donation(utm_campaign)\n end_time = ccrml.get_latest_donation(utm_campaign)\n \n one_step = lptl.is_one_step(start_time, end_time, utm_campaign) \n \n if not(one_step): \n start_time = lptl.get_earliest_campaign_view(utm_campaign)\n end_time = lptl.get_latest_campaign_view(utm_campaign) \n\n interval = 1\n \n \"\"\" Create reporting object to retrieve campaign data and write plots to image repo on disk \"\"\"\n ir = DR.IntervalReporting(was_run=False, use_labels=False, font_size=20, plot_type='line', query_type='campaign', file_path=projSet.__web_home__ + 'campaigns/static/images/')\n \n \"\"\" Produce analysis on the campaign view data \"\"\" \n ir.run(start_time, end_time, interval, 'views', utm_campaign, {}, one_step=one_step)\n \n \"\"\" \n ESTIMATE THE START AND END TIME OF THE CAMPAIGN\n ===============================================\n \n Search for the first instance when more than 10 views are observed over a sampling period\n \"\"\"\n \n col_names = ir._data_loader_.get_column_names()\n \n views_index = col_names.index('views')\n ts_index = col_names.index('ts')\n \n row_list = list(ir._data_loader_._results_) # copy the query results\n for row in row_list:\n if row[views_index] > 100:\n start_time_est = row[ts_index]\n break\n row_list.reverse()\n for row in row_list:\n if row[views_index] > 100:\n end_time_est = row[ts_index]\n break\n \n \n \"\"\"\n BUILD THE VISUALIZATION FOR THE TEST VIEWS OF THIS CAMAPAIGN\n ============================================================ \n \"\"\"\n \n \"\"\" Read the test name \"\"\"\n ttl = DL.TestTableLoader()\n row = ttl.get_test_row(utm_campaign)\n test_name = ttl.get_test_field(row ,'test_name')\n \n \"\"\" Regenerate the data using the estimated start and end times \"\"\"\n ir = DR.IntervalReporting(was_run=False, use_labels=False, font_size=20, plot_type='line', query_type='campaign', file_path=projSet.__web_home__ + 'campaigns/static/images/')\n ir.run(start_time_est, end_time_est, interval, 'views', utm_campaign, {}, one_step=one_step)\n \n \"\"\" Determine the type of test (if not overridden) and retrieve the artifacts \"\"\"\n test_type, artifact_name_list = FDH.get_test_type(utm_campaign, start_time, end_time, DL.CampaignReportingLoader(query_type=''), test_type_var)\n \n return render_to_response('campaigns/show_campaigns.html', {'utm_campaign' : utm_campaign, 'test_name' : test_name, 'start_time' : start_time_est, 'end_time' : end_time_est, 'one_step' : one_step, \\\n 'artifacts' : artifact_name_list, 'test_type' : test_type, 'err_msg' : err_msg}, context_instance=RequestContext(request)) \n\n except Exception as inst:\n \n logging.error('Failed to correctly produce campaign diagnostics.')\n logging.error(type(inst))\n logging.error(inst.args)\n logging.error(inst)\n \n \"\"\" Return to the index page with an error \"\"\"\n err_msg = 'There is insufficient data to analyze this campaign: %s. Check to see if the <a href=\"/LML/\">impressions have been loaded</a>. <br><br>ERROR:<br><br>%s' % (utm_campaign, inst.__str__())\n \n return index(request, kwargs={'err_msg' : err_msg})", "def view(args):\n if args.available:\n printAvailableCampaigns()\n if args.search_help:\n print(getSearchQueryHelp())", "def get_list_of_campaigns(self, limit=0, offset=0):\n logger.info(\"Function call: get_list_of_campaigns\")\n return self.__handle_result(self.__send_request('campaigns', 'GET', {'limit': limit or 0, 'offset': offset or 0}))", "def index(request):\n\n chats = Chat.objects.all().order_by('-created_at')\n\n if request.user.is_authenticated():\n chats = chats.filter(friend_groups__in=request.user.get_profile().\\\n friend_groups.all().values_list('id'))\n else:\n chats = chats.filter(friend_groups__isnull=True)\n\n return render_to_response('index.html', {\n 'chats': chats[:10],\n }, context_instance=RequestContext(request))", "def dashboard(request):\r\n profile = get_object_or_404(Profile, user=request.user)\r\n wallet = Wallet.objects.get(user=request.user)\r\n history = History.objects.get(pk=1)\r\n referrals = Referral.objects.filter(referee=request.user).count()\r\n invoices = Invoice.objects.filter(issuer=request.user).count()\r\n return render(request, 'coin/dashboard.html', {'profile': profile, \r\n 'wallet': wallet, 'history': history, 'referrals': referrals, \r\n 'invoices': invoices})", "def my_dashboard(request):\n #Get the associated contact for our user\n user_con = request.user.contact\n qs_proj_assoc, qs_task_assoc = get_tiered_upcoming(user_con)\n\n #Get the projects associated with the user\n user_proj_table = table_proj.ProjectAssocAjaxTable(qs_proj_assoc)\n #Get the tasks associated with the user\n user_task_table = table_task.TaskAssocAjaxTable(qs_task_assoc)\n\n # Render the HTML template index.html with the data in the context variable\n return render(\n request,\n 'my_dashboard.html',\n context={\n 'user_con':user_con,\n 'user_proj_table':user_proj_table,\n 'user_task_table':user_task_table,\n 'project_source' : 'data-dashboard-project-upcoming',\n 'task_source' : 'data-dashboard-task-upcoming',\n 'input_id' : user_con.pk,\n 'print_url':reverse_lazy('my-dashboard-print'),\n },\n )", "def test_admin_sms_campaign_view_list(self):\n response = self.client.get('/admin/sms_module/smscampaign/')\n self.failUnlessEqual(response.status_code, 200)" ]
[ "0.6278449", "0.5947191", "0.57573825", "0.57182586", "0.5707867", "0.5703257", "0.56390655", "0.5605963", "0.55317235", "0.5515906", "0.5514333", "0.5508371", "0.54833335", "0.5471401", "0.5455781", "0.54364294", "0.5406502", "0.5397928", "0.53924406", "0.53866136", "0.5347947", "0.5315827", "0.5313521", "0.52813923", "0.5277998", "0.5271437", "0.5270981", "0.52542007", "0.5239706", "0.52305603" ]
0.6828237
0
Shows validation UI for a given campaign, if this UserProfile is authorized to do data entry for the specified Campaign. This is also the endpoint for searching for Voters as part of validation. If doing a search, assume that a sufficient number of the specified fields is present (taken care of in frontend form validation).
def validate(request, campaign_id): if not request.user.userprofile.in_campaign(campaign_id): return HttpResponseRedirect(reverse("voter_validation:index")) campaign_id = int(campaign_id) campaign = get_object_or_404(Campaign, id=campaign_id) # Get the number of signatures validated by the current user for this # campaign, and also for the past 24 hours. val_sigs_set = ValidationRecord.objects.filter( validator=request.user.userprofile, campaign=campaign) val_sigs_24h = val_sigs_set.filter( last_updated__gte=datetime.now(SERVER_TIME_ZONE) - timedelta(hours=24)) context = { "campaign_name": campaign.name, "campaign_id": campaign_id, "val_sigs": val_sigs_set.count(), "val_sigs_24h": val_sigs_24h.count(), } # Search if specified in POST search = request.POST.get("search", "false") if search.lower() == "true": name = request.POST.get("name", None) address = request.POST.get("address", None) res_zip = request.POST.get("zip", None) # Pass in campaign_id so we can check the Voter was previously validated voters = voter_search(name, address, res_zip, campaign_id=campaign_id) context.update({ "name": name, "address": address, "zip": res_zip, "results": voters, }) return render(request, "voter_validation/validation.html", context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index(request):\n context = dict()\n if request.user.is_authenticated():\n context['campaigns'] = [\n CampaignSerializer(c).serialize() for c in\n request.user.userprofile.campaigns.order_by('pk')]\n return render(request, 'voter_validation/index.html', context)", "def view(args):\n if args.available:\n printAvailableCampaigns()\n if args.search_help:\n print(getSearchQueryHelp())", "def validate(self, data):\n company = data['company']\n if self.context['request'].user.has_perm(\"view_company\", company):\n return data\n else:\n raise PermissionDenied()", "def show_campaigns(request, utm_campaign, **kwargs):\n \n err_msg = ''\n try:\n err_msg = str(kwargs['kwargs']['err_msg'])\n except:\n pass\n \n test_type_override = ''\n try:\n test_type_override = MySQLdb._mysql.escape_string(request.POST['test_type_override'])\n \n if test_type_override == 'Banner':\n test_type_var = FDH._TESTTYPE_BANNER_\n elif test_type_override == 'Landing Page':\n test_type_var = FDH._TESTTYPE_LP_\n elif test_type_override == 'Banner and LP':\n test_type_var = FDH._TESTTYPE_BANNER_LP_\n \n except:\n test_type_var = ''\n pass\n \n try:\n \"\"\" Find the earliest and latest page views for a given campaign \"\"\"\n lptl = DL.LandingPageTableLoader()\n ccrml = DL.CiviCRMLoader()\n \n start_time = ccrml.get_earliest_donation(utm_campaign)\n end_time = ccrml.get_latest_donation(utm_campaign)\n \n one_step = lptl.is_one_step(start_time, end_time, utm_campaign) \n \n if not(one_step): \n start_time = lptl.get_earliest_campaign_view(utm_campaign)\n end_time = lptl.get_latest_campaign_view(utm_campaign) \n\n interval = 1\n \n \"\"\" Create reporting object to retrieve campaign data and write plots to image repo on disk \"\"\"\n ir = DR.IntervalReporting(was_run=False, use_labels=False, font_size=20, plot_type='line', query_type='campaign', file_path=projSet.__web_home__ + 'campaigns/static/images/')\n \n \"\"\" Produce analysis on the campaign view data \"\"\" \n ir.run(start_time, end_time, interval, 'views', utm_campaign, {}, one_step=one_step)\n \n \"\"\" \n ESTIMATE THE START AND END TIME OF THE CAMPAIGN\n ===============================================\n \n Search for the first instance when more than 10 views are observed over a sampling period\n \"\"\"\n \n col_names = ir._data_loader_.get_column_names()\n \n views_index = col_names.index('views')\n ts_index = col_names.index('ts')\n \n row_list = list(ir._data_loader_._results_) # copy the query results\n for row in row_list:\n if row[views_index] > 100:\n start_time_est = row[ts_index]\n break\n row_list.reverse()\n for row in row_list:\n if row[views_index] > 100:\n end_time_est = row[ts_index]\n break\n \n \n \"\"\"\n BUILD THE VISUALIZATION FOR THE TEST VIEWS OF THIS CAMAPAIGN\n ============================================================ \n \"\"\"\n \n \"\"\" Read the test name \"\"\"\n ttl = DL.TestTableLoader()\n row = ttl.get_test_row(utm_campaign)\n test_name = ttl.get_test_field(row ,'test_name')\n \n \"\"\" Regenerate the data using the estimated start and end times \"\"\"\n ir = DR.IntervalReporting(was_run=False, use_labels=False, font_size=20, plot_type='line', query_type='campaign', file_path=projSet.__web_home__ + 'campaigns/static/images/')\n ir.run(start_time_est, end_time_est, interval, 'views', utm_campaign, {}, one_step=one_step)\n \n \"\"\" Determine the type of test (if not overridden) and retrieve the artifacts \"\"\"\n test_type, artifact_name_list = FDH.get_test_type(utm_campaign, start_time, end_time, DL.CampaignReportingLoader(query_type=''), test_type_var)\n \n return render_to_response('campaigns/show_campaigns.html', {'utm_campaign' : utm_campaign, 'test_name' : test_name, 'start_time' : start_time_est, 'end_time' : end_time_est, 'one_step' : one_step, \\\n 'artifacts' : artifact_name_list, 'test_type' : test_type, 'err_msg' : err_msg}, context_instance=RequestContext(request)) \n\n except Exception as inst:\n \n logging.error('Failed to correctly produce campaign diagnostics.')\n logging.error(type(inst))\n logging.error(inst.args)\n logging.error(inst)\n \n \"\"\" Return to the index page with an error \"\"\"\n err_msg = 'There is insufficient data to analyze this campaign: %s. Check to see if the <a href=\"/LML/\">impressions have been loaded</a>. <br><br>ERROR:<br><br>%s' % (utm_campaign, inst.__str__())\n \n return index(request, kwargs={'err_msg' : err_msg})", "def validate(self, data):\n company = data['company']\n invoice = data.get(\"invoice\")\n if not self.context['request'].user.has_perm(\"view_company\", company) or not all(\n self.context['request'].user.has_perm(\"view_media\", media) for media in invoice):\n raise PermissionDenied()\n return data", "def validate(self, data):\n company = data['company']\n invoice = data.get(\"invoice\")\n if not self.context['request'].user.has_perm(\"view_company\", company) or not all(\n self.context['request'].user.has_perm(\"view_media\", media) for media in invoice):\n raise PermissionDenied()\n return data", "def campground_checker_view(request):\n # If POST request, retrieve data from API\n if request.method == 'POST':\n form = forms.CampgroundForm(request.POST)\n if form.is_valid():\n start_date = form.cleaned_data['start_date']\n end_date = form.cleaned_data['end_date']\n camp_ids = form.cleaned_data['camp_ids']\n camp_id_list = camp_ids.split()\n try:\n results, start_string, end_string = check.master_scraping_routine(camp_id_list, start_date, end_date)\n return render(request, 'availability_results.html', {'start_date': start_string,\n 'end_date': end_string,\n 'results': results})\n except:\n return render(request, 'no_results_found.html')\n else:\n return 'No success'\n # If GET or other type of request, load empty form\n else:\n form = forms.CampgroundForm()\n return render(request, 'availability.html', {'form': form})", "def validate_schema(self, data, **kwargs):\n if \"role\" not in data and \"visible\" not in data:\n raise ValidationError(_(\"Missing fields 'role' and/or 'visible'.\"))", "def __call__(self, data):\n data_combiner = DataCombiner(self.instance, data)\n company = data_combiner.get_value(self.company_field)\n contact = data_combiner.get_value(self.contact_field)\n\n if contact.company != company:\n raise ValidationError({\n self.contact_field: self.message,\n })", "def update_c_mandatory_fields(request, campaign_id):\n # print(request.POST)\n campaign = Campaign.objects.get(id=campaign_id)\n form = CampaignForm(request.POST, instance = campaign)\n # print(form)\n if form.is_valid():\n form.save()\n return redirect('add_campaign_spec', id=campaign_id)\n else:\n # return redirect('clonecampaign', id=campaign_id)\n print(form.errors)\n return redirect(reverse('edit_campaign', kwargs={'campaign_id':campaign_id}))", "def validate(self) -> bool:\n required = self.crud.validate(required=True)\n if required:\n raise ValueError(\n f\"Validation error. Required destination fields are not present in the crosswalk: {required}\"\n )", "def get_campaign_command(client: Client, campaign_id: str) -> CommandResults | str:\n try:\n raw_response = client.get_campaign(campaign_id)\n except ValueError:\n return 'Campaign Id not found'\n\n campaign_general_fields = ['id', 'name', 'description', 'startDate', 'notable']\n campaign_fields = ['families', 'techniques', 'actors', 'brands', 'malware']\n\n outputs = {}\n outputs['campaignMembers'] = dict_safe_get(raw_response, ['campaignMembers'])\n outputs['info'] = {key: value for key, value in raw_response.items() if key in campaign_general_fields}\n outputs.update({key: value for key, value in raw_response.items() if key in campaign_fields})\n fields_readable_output = \"\"\n for field in campaign_fields:\n fields_readable_output += \"\\n\" + tableToMarkdown(field.capitalize(),\n dict_safe_get(outputs, [field]), headers=['id', 'name'],\n headerTransform=pascalToSpace\n )\n\n campaign_info_output = tableToMarkdown('Campaign Information',\n outputs['info'],\n headers=['id', 'name', 'description', 'startDate', 'notable'],\n headerTransform=pascalToSpace\n )\n campaign_members_output = tableToMarkdown('Campaign Members',\n outputs['campaignMembers'],\n headers=['id', 'threat', 'type'],\n headerTransform=pascalToSpace\n )\n\n readable_output = campaign_info_output + \"\\n\" + campaign_members_output + fields_readable_output\n\n return CommandResults(\n readable_output=readable_output,\n outputs_prefix='Proofpoint.Campaign',\n outputs=outputs,\n outputs_key_field='id',\n raw_response=raw_response\n )", "def test_get_campaign_by_id_passes(self):\n response = self.client.get(f\"{self.endpoint_url}{self.test_campaign.id}/\")\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body, {\"id\": CAMPAIGN_ID, \"name\": CAMPAIGN_NAME})", "def validate(self, data):\n # if data['is_private'] and data['contestants']:\n # raise serializers.ValidationError(\"Can not be private and compete for an award.\")\n return data", "def validate(self):\n\n form = CallEventForm(self.data)\n if not form.is_valid():\n self.errors = form.errors\n map_dict_fields(self.errors, const.DB_FIELDS, const.API_FIELDS)", "def testGetCampaign(self):\n if self.__class__.campaign1 is None:\n self.testSaveCampaign()\n self.assert_(isinstance(self.__class__.service.GetCampaign(\n self.__class__.campaign1['id']), tuple))", "def run_validation(self, data=empty):\n\n if data is not empty:\n unknown = set(data) - set(self.fields)\n if unknown:\n errors = ['Unknown field: {}'.format(f) for f in unknown]\n raise ValidationError({api_settings.NON_FIELD_ERRORS_KEY: errors})\n return super().run_validation(data)", "def validate(self):\n return self.validator.validate(self.fields)", "def run_validation(self, data=empty):\n\n # no idea why there is no such built in feature in DRF\n if data is not empty:\n unknown = set(data) - set(self.fields)\n if unknown:\n errors = ['Unknown field: {}'.format(f) for f in unknown]\n raise ValidationError({api_settings.NON_FIELD_ERRORS_KEY: errors})\n return super().run_validation(data)", "def test_view_form_valid_sales_method(self, google):\n google.return_value = GeocoderMock()\n\n form_data = super(BaseSearchPageViewTestCase, self).get_data_sales()\n\n view = super(BaseSearchPageViewTestCase, self).initialize(BaseSearchPageView(), None)\n\n view.form_class = SearchForm\n\n form = SearchForm(form_data)\n\n form.is_valid()\n\n response = view.form_valid(form)\n\n (url, query) = super(BaseSearchPageViewTestCase, self).parse_url(response)\n\n form_data = super(BaseSearchPageViewTestCase, self).get_data_extra(form_data)\n\n for key, value in form_data.iteritems():\n self.assertTrue(key in query and query[key] == str(value))\n\n # Check we are dealing with a redirect and path as expected as sales/search\n self.assertIsInstance(response, HttpResponseRedirect)\n self.assertEqual(url.path, '/sales/search/')", "def send_validation_request(self):\r\n self.send_request(send_function=self._assemble_and_send_validation_request)", "def test_create_new_campaign_by_admin_passes(self):\n response = self.client.post(\n self.endpoint_url,\n json={\n \"logo\": None,\n \"name\": NEW_CAMPAIGN_NAME,\n \"organisations\": [self.test_org.id],\n \"url\": None,\n },\n headers={\"Authorization\": self.session_token},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response_body, {\"campaignId\": 2})", "def test_update_existent_campaign_by_admin_passes(self):\n response = self.client.patch(\n f\"{self.endpoint_url}{self.test_campaign.id}/\",\n json={\n \"logo\": None,\n \"name\": NEW_CAMPAIGN_NAME,\n \"organisations\": [],\n \"url\": None,\n },\n headers={\"Authorization\": self.admin_token},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body[\"Success\"], \"Campaign 1 updated\")", "def test_update_existent_campaign_by_unauthenticated_user_fails(self):\n response = self.client.patch(\n f\"{self.endpoint_url}{self.test_campaign.id}/\",\n json={\n \"logo\": None,\n \"name\": NEW_CAMPAIGN_NAME,\n \"organisations\": [self.test_org.id],\n \"url\": None,\n },\n )\n self.assertEqual(response.status_code, 401)", "def show_validator(self, show_validator):\n\n self._show_validator = show_validator", "def show_validator(self, show_validator):\n\n self._show_validator = show_validator", "def cingValidation(self): \n \n self.cingRun()\n \n self.analyseCingResults()", "def check(self, args):\n self.parent.footer.set_text(\"Checking data...\")\n self.parent.refreshScreen()\n # Get field information\n responses = dict()\n\n for index, fieldname in enumerate(self.fields):\n if fieldname != \"blank\":\n responses[fieldname] = self.edits[index].get_edit_text()\n\n password = responses[\"FUEL_ACCESS/password\"]\n confirm_password = responses.pop(\"CONFIRM_PASSWORD\")\n\n if self.parent.save_only:\n return responses\n\n # Validate each field\n errors = []\n warnings = []\n\n # Passwords must match\n if password != confirm_password and \\\n password != self.defaults['FUEL_ACCESS/password']['value']:\n errors.append(\"Passwords do not match.\")\n\n # Password must not be empty\n if len(password) == 0:\n errors.append(\"Password must not be empty.\")\n\n # Password needs to be in ASCII character set\n try:\n if password.decode('ascii'):\n pass\n except UnicodeDecodeError:\n errors.append(\"Password contains non-ASCII characters.\")\n\n # Passwords should be at least 8 symbols\n if len(password) < 8:\n warnings.append(\"8 symbols\")\n\n # Passwords should contain at least one digit\n if re.search(r\"\\d\", password) is None:\n warnings.append(\"one digit\")\n\n if re.search(r\"[A-Z]\", password) is None:\n warnings.append(\"one uppercase letter\")\n\n if re.search(r\"[a-z]\", password) is None:\n warnings.append(\"one lowercase letter\")\n\n if re.search(r\"[!#$%&'()*+,-@./[\\\\\\]^_`{|}~\" + r'\"]', password) \\\n is None:\n warnings.append(\"one special character\")\n\n if len(errors) > 0:\n log.error(\"Errors: %s %s\" % (len(errors), errors))\n modulehelper.ModuleHelper.display_failed_check_dialog(self, errors)\n return False\n\n if len(warnings) > 0:\n self.parent.footer.set_text(\"Warning: Password should have \"\n \"at least %s.\" % (warnings[0]))\n else:\n self.parent.footer.set_text(\"No errors found.\")\n\n return responses", "def validator(self, *args, **kwargs):\n if 'framework_slug' not in kwargs:\n current_app.logger.error(\"Required parameter `framework_slug` is undefined for the calling view.\")\n abort(500, \"There was a problem accessing this page of your application. Please try again later.\")\n\n if current_user.is_authenticated and current_user.supplier_id:\n supplier_framework = self.data_api_client.get_supplier_framework_info(\n current_user.supplier_id, kwargs['framework_slug']\n )['frameworkInterest']\n\n if supplier_framework['applicationCompanyDetailsConfirmed'] is not True:\n return abort(400, \"You cannot access this part of your application until you have confirmed your \"\n \"company details.\")\n\n return True", "def validate_get_openings_result(self, result):\n\n\n requiredFields = {'company', 'title', 'url', 'locations'}\n #optionalFields = {'department', 'description'}\n\n for r in result:\n if not requiredFields.issubset(set(result.keys())):\n return False\n\n return True" ]
[ "0.5706519", "0.5386401", "0.5315525", "0.530953", "0.51058286", "0.51058286", "0.48647565", "0.4818601", "0.47097164", "0.46946898", "0.46497676", "0.4634922", "0.46242386", "0.45637044", "0.449959", "0.44929436", "0.44728902", "0.44613764", "0.44438702", "0.4442745", "0.44330058", "0.4426822", "0.4408353", "0.43987724", "0.43857944", "0.43857944", "0.43846133", "0.43533403", "0.4345847", "0.43383792" ]
0.62158144
0
This is the base Exception class for all step failures. It can be manually raised from recipe code to cause the build to turn red.
def StepFailure(self): return recipe_api.StepFailure
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def raise_step_error(self, error: Exception, step: str):\n error_message = \"{}\\nFailed: Error={}\".format(step, str(error))\n logging.error(error_message)\n self.slacker.send_thread_reply(error_message)\n raise Exception(error_message)", "def raise_on_error(self):\n if not self._status.success:\n cls = UrlApi.InfraHTTPError if self._infra_step else UrlApi.HTTPError\n raise cls('HTTP status (%d)' % (self.status_code,), self)", "def test_fails(self):\n raise FoolishError(\"I am a broken test\")", "def fail(self, msg=None):\r\n raise self.failureException(msg)", "def step(self):\n raise TaskError(\"Task %s: subclass should override step() method!\" %\n self)", "def raise_for_failure(self) -> None:\n if not self.is_success():\n raise exc.ExecutionError(self)", "def exception(self, *args, **kwargs):\n return super(Blueprint, self).exception(*args, **kwargs)", "def abort(self, message: str) -> None:\n message = f\"{Invocation.current.log} - {message}\"\n self.exception = StepException(message)\n global failure_aborts_build # pylint: disable=invalid-name\n global no_actions # pylint: disable=invalid-name\n if failure_aborts_build.value and not no_actions.value:\n no_additional_complaints()\n raise self.exception", "def setup_class(cls):\n try:\n super(BuildFailureTests, cls).setup_class()\n except CommandFailure:\n pass\n else:\n raise AssertionError('A failed build returned an exit code of 0.')", "def fail(self, msg=None):\n raise Exception, msg", "def failed(self):\n\t\tpass", "def raise_fail(*args, **kwargs):\n raise Exception(\"oops\")", "def test_second_step_strict(self):\n with self.assertRaises(Exception):\n self.run_step('S02-errors.py', allow_failure=False)", "def testRunException(self):\n class TestError(Exception):\n \"\"\"Unique test exception\"\"\"\n\n perform_mock = self.PatchObject(generic_stages.BuilderStage, 'PerformStage')\n perform_mock.side_effect = TestError('fail!')\n\n stage = self.ConstructStage()\n results_lib.Results.Clear()\n self.assertRaises(failures_lib.StepFailure, self._RunCapture, stage)\n\n results = results_lib.Results.Get()[0]\n self.assertTrue(isinstance(results.result, TestError))\n self.assertEqual(str(results.result), 'fail!')\n self.mock_cidb.StartBuildStage.assert_called_once_with(\n DEFAULT_BUILD_STAGE_ID)\n self.mock_cidb.FinishBuildStage.assert_called_once_with(\n DEFAULT_BUILD_STAGE_ID,\n constants.BUILDER_STATUS_FAILED)", "def failure_callback(self):\n error_filename = self.run_dir / \"eplusout.err\"\n if error_filename.exists():\n with open(error_filename, \"r\") as stderr:\n stderr_r = stderr.read()\n self.exception = EnergyPlusProcessError(\n cmd=self.cmd, stderr=stderr_r, idf=self.idf\n )\n self.cleanup_callback()", "def add_failure(self, task: Task, exception: Any) -> None: # noqa: DAR101\n super().add_failure(task, exception)\n self._add_summary(task, _TaskExitCode.FAIL)", "def test_badstageerror_raise(self, mock_isdir):\n # Set the mocked functions returned values\n mock_isdir.side_effect = [True]\n\n # Test execution\n wrong_kwargs = copy.copy(self.kwargs)\n wrong_kwargs[\"reconstruction_stage\"] = \"WRONG\"\n self.assertRaises(ValueError, recon_all, **wrong_kwargs)", "def fail(msg):\n\n # Not sure if simply raising the exception is clearer.\n raise CommandFailed(msg)", "def error(self):\n raise NotImplementedError(\"subclasses need to override this method\")", "def InfraFailure(self):\n return recipe_api.InfraFailure", "def __init__(self, message=\"\"):\n super(AutomationError, self).__init__(message)", "def test_class_errored(self, cls, exception):", "def report_unexpected_exception(self, *args, **kwargs):\n pass", "def test_runFailed(self):\n builder = BookBuilder()\n exc = self.assertRaises(\n CommandFailed, builder.run,\n [sys.executable, '-c', 'print \"hi\"; raise SystemExit(1)'])\n self.assertEquals(exc.exitStatus, 1)\n self.assertEquals(exc.exitSignal, None)\n self.assertEquals(exc.output, \"hi\\n\")", "def failure_exception(cls, state, exception):\r\n return PlatformMessage(method=\"__reply__\", kwargs={\"__result__\": \"fail\", \"state\": state, \"errcode\": -2,\r\n \"e\": exception})", "def test_fail(make_runner: Callable[..., TargetFunctionRunner]) -> None:\n runner = make_runner(target_failed, use_instances=True)\n run_info = TrialInfo(config=2, instance=\"test\", seed=0, budget=0.0)\n\n runner.submit_trial(run_info)\n run_info, run_value = next(runner.iter_results())\n\n # Make sure the traceback message is included\n assert \"traceback\" in run_value.additional_info\n assert \"RuntimeError\" in run_value.additional_info[\"traceback\"]", "def test_config_step_raises(self):\n\n run_step = self.ConfigStep.create({\n 'name': 'run_step',\n 'job_type': 'run_odoo',\n })\n\n create_step = self.ConfigStep.create({\n 'name': 'test_step',\n 'job_type': 'create_build',\n })\n\n config = self.Config.create({'name': 'test_config'})\n\n # test that the run_odoo step has to be the last one\n with self.assertRaises(UserError):\n config.write({\n 'step_order_ids': [\n (0, 0, {'sequence': 10, 'step_id': run_step.id}),\n (0, 0, {'sequence': 15, 'step_id': create_step.id}),\n ]\n })\n\n # test that the run_odoo step should be preceded by an install step\n with self.assertRaises(UserError):\n config.write({\n 'step_order_ids': [\n (0, 0, {'sequence': 15, 'step_id': run_step.id}),\n (0, 0, {'sequence': 10, 'step_id': create_step.id}),\n ]\n })", "def failure(self, input: str) -> enumFail:\n pass", "def test_raise_exception(self):\n with self.assertRaises(Exception):\n SshpassBaseCommandBuilder(COMMAND).to_build()", "def indicate_failure(self):\n pass" ]
[ "0.65778327", "0.6160697", "0.6118424", "0.6045226", "0.6028757", "0.59925586", "0.59890693", "0.5984237", "0.5979543", "0.5880542", "0.58220655", "0.58042306", "0.58023226", "0.57811123", "0.57735544", "0.56904095", "0.56897426", "0.5686951", "0.56869185", "0.5652853", "0.5650791", "0.5649762", "0.56389666", "0.5612642", "0.5602301", "0.55968726", "0.5593547", "0.5591498", "0.55746824", "0.5562834" ]
0.6969654
0
StepWarning is a subclass of StepFailure, and will translate to a yellow build.
def StepWarning(self): return recipe_api.StepWarning
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _warn(self, warning=None):\r\n debug.err('Warning: %s' % warning)\r\n\r\n if core.FW_conf['settings'].TestRun.ExecutionMode == 'Leader' and warning != None:\r\n executeInFollower(\"self.warn('%s')\" % (warning,))\r\n\r\n if type(warning) != types.ListType:\r\n warning = [warning]\r\n\r\n self.result.addStepWarning(warning)", "def warning(self, warning):\n pass", "def StepFailure(self):\n return recipe_api.StepFailure", "def warning(self, msg, *args, **kwargs):\n pass", "def warning(self, *args, **kwargs):", "def test_warning(self):\n self.p.compute_termination_criteria = True\n self.set_parameter_and_step(\"max_iter\", True, 5, \"ignore\")", "def warning ( self , message , *args , **kwargs ) :\n return self.logger.warning ( message , *args , **kwargs )", "def warning(msg):\n click.secho(msg, fg='yellow')", "def warning(self, *args, **kwargs):\n self.msg(logging.WARNING, *args, **kwargs)", "def success_failure_color(self, evaluation):\n return \"#60f979\" if evaluation.passes else \"#f96c60\"", "def warning(self, msg, transfers):\n self.validation_exceptions.extend(self._create_exceptions(msg, transfers, ValidationType.WARNING))", "def notice(self, warning):\n pass", "def warning(self, message):\n return self.log(\"WARNING\", message)", "def fail(self, message):\n logger.warning(message)\n g.failed = True", "def warning(self, *lines):\n if self.__debug_level >= DEBUG_LEVELS['warning']:\n self.print_lines(self.colored(('magenta', 'bold'), lines))", "def warning(self) -> str:\n return pulumi.get(self, \"warning\")", "def failure(self, message=''):\n print(colored(message, 'red'))", "def set_warning_message(msg):\n set_message(msg, TYPE_WARNING)", "def warning(self) -> Optional[pulumi.Input['AnyArgs']]:\n return pulumi.get(self, \"warning\")", "def warning(self, message, *, preprocessor=None):\n console.warning(message)", "async def warning(self, check, *, note=None):\n return await self.mark(check, \"warning\", note=note)", "def warning(self, msg):\r\n self.logger.warning(msg)", "def warning(self, msg, *args):\n if self.lvl<=logging.WARNING: return self._log(msg, *args)", "def WARNING(self, _strMessage=\"\"):\n self.edLogging.WARNING(_strMessage)", "def warning(self, _strMessage=\"\"):\n self.edLogging.warning(_strMessage)", "def warning(self, msg):\n\n self.logger.warning(msg)", "def report_step_progress(self, step):\n dot_status = self.dot_status[step.status.name]\n if step.status == Status.failed:\n if (step.exception and\n not isinstance(step.exception, AssertionError)):\n # -- ISA-ERROR: Some Exception\n dot_status = self.dot_status[\"error\"]\n step.feature = self.current_feature\n step.scenario = self.current_scenario\n self.failures.append(step)\n self.stream.write(dot_status)\n self.stream.flush()", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass" ]
[ "0.680333", "0.6467354", "0.6363371", "0.6182617", "0.6118767", "0.60774654", "0.6036392", "0.6019372", "0.6014528", "0.6010554", "0.5999503", "0.5906176", "0.5892281", "0.5887475", "0.5859568", "0.5821751", "0.5817227", "0.5793164", "0.5664179", "0.5653461", "0.5625123", "0.56036913", "0.55827785", "0.55620575", "0.55563116", "0.5553998", "0.5553989", "0.55525285", "0.55525285", "0.55525285" ]
0.792026
0
InfraFailure is a subclass of StepFailure, and will translate to a purple build. This exception is raised from steps which are marked as `infra_step`s when they fail.
def InfraFailure(self): return recipe_api.InfraFailure
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _identify_fail(failure):\n logger.warning(failure.getErrorMessage())\n logger.warning(\"Failed to setup & obtain identity\")\n return", "def failure_exception(cls, state, exception):\r\n return PlatformMessage(method=\"__reply__\", kwargs={\"__result__\": \"fail\", \"state\": state, \"errcode\": -2,\r\n \"e\": exception})", "def infrastructure_failure(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"infrastructure_failure\")", "def infrastructure_failure(self) -> bool:\n return pulumi.get(self, \"infrastructure_failure\")", "def raise_on_error(self):\n if not self._status.success:\n cls = UrlApi.InfraHTTPError if self._infra_step else UrlApi.HTTPError\n raise cls('HTTP status (%d)' % (self.status_code,), self)", "def failure(self, cb: CircuitBreaker, exc: BaseException) -> None:", "def StepFailure(self):\n return recipe_api.StepFailure", "def handle_deploy_failure(self):\n step = \"Recovering From Deployment Error\"\n self.slacker.send_thread_reply(step)\n\n if self.has_down_time is True and self.migration_completed:\n return \"Skipped Automated Recovery: Requires Manual Intervention\"\n\n try:\n self.rollback_images()\n self.scale_up_deployments()\n error_handler_message = \"Successfully Rolled Back Deployment\"\n\n except Exception as e:\n error_handler_message = str(e)\n logging.error(error_handler_message)\n\n return error_handler_message", "def report_recoverable(self, payload, exception, callback_url):\n capture_exception(exception)\n\n if not callback_url:\n return\n\n payload[\"failure\"] = {\"type\": \"RECOVERABLE_FAILURE\", \"message\": str(exception)}\n\n data = GraphExportCallbackError().load(payload)\n with retry() as session:\n session.post(callback_url, data=data)", "def handle_unrecoverable_failure(self, node):\n if node.is_failed and node.exit_status < 400:\n self.report_error_handled(node, 'unrecoverable error, aborting...')\n return ProcessHandlerReport(True, self.exit_codes.ERROR_UNRECOVERABLE_FAILURE)", "def on_saga_failure(self, failed_step: BaseStep, initial_failure_payload: dict):\n logger.info(f'Saga {self.saga_id} failed on \"{failed_step.name}\" step. \\n'\n f'Failure details: {initial_failure_payload}')", "def failure_callback(self):\n error_filename = self.run_dir / \"eplusout.err\"\n if error_filename.exists():\n with open(error_filename, \"r\") as stderr:\n stderr_r = stderr.read()\n self.exception = EnergyPlusProcessError(\n cmd=self.cmd, stderr=stderr_r, idf=self.idf\n )\n self.cleanup_callback()", "def failure(self) -> 'outputs.EndConditionResponse':\n return pulumi.get(self, \"failure\")", "def _fail(self, exception):\n self.monitor_loop.stop()\n self._maintained.errback(exception)", "def failure(self, input: str) -> enumFail:\n pass", "def failure(cls, state, errcode=-1):\r\n return PlatformMessage(method=\"__reply__\", kwargs={\"__result__\": \"fail\", \"state\": state, \"errcode\": errcode})", "def logFailure(failure, msg='Unhandled exception in deferred:'):\n logging.error('%s\\n%s', msg, failure.getTraceback())", "def test_import_infra(self):\n project = Project.create()\n # Read an engine and check\n infra = import_infra(\"A320.xml\", \"engine\")\n self.assertEqual(len(infra.engines), 1)\n engine = infra.engines[0]\n self.assertEqual(engine.name, \"Machine 0\")\n self.assertEqual(engine.hauteur, 0.0)\n # Local frame:\n self.assertEqual(engine.position.x, 0.0)\n self.assertEqual(engine.position.y, 0.0)\n self.assertEqual(engine.position.z, 0.0)\n\n # Read a building and check\n infra = import_infra(\"Building.xml\", \"building\")\n self.assertEqual(len(infra.buildings), 1)\n building = infra.buildings[0]\n self.assertEqual(building.name, \"MyBuilding\")\n self.assertEqual(building.hauteur, 0.0)\n # Local frame:\n self.assertEqual(building.position.x, 0.0)\n self.assertEqual(building.position.y, 0.0)\n self.assertEqual(building.position.z, 0.0)\n\n # Check a no radiant building is refused:\n try:\n infra = import_infra(\"Building_no_radiant.xml\", \"building\")\n except:\n print(\"Ok, non radiant building is refused as expected.\")\n else:\n print(\"Non radiant building should be refused.\")\n sys.exit(-1)", "def add_failure(self, task: Task, exception: Any) -> None: # noqa: DAR101\n super().add_failure(task, exception)\n self._add_summary(task, _TaskExitCode.FAIL)", "def indicate_failure(self):\n pass", "def inject_failure(self):\n # Inject a failure only if there's a process running\n self.BqLog(\"Starting failure injection\")\n while len(self.circQ) > 0 or (self.currentProc and self.currentProc.workLeft > 0):\n t = time_to_failure()\n self.BqLog(\"Inject the next failure after %d seconds\" % (t))\n if t == 0:\n continue\n yield self.env.timeout(t)\n if len(self.circQ) >= 0 and \\\n self.currentProc.workLeft > 0:\n # Only break the machine if it is currently computing,\n # and if current proc is not restarting\n self.BqLog(\"Injecting a failure in %s\" % (self.currentProc.name))\n self.numFailures += 1\n self.process.interrupt(cause=\"failure\")", "def _logError(self, failure):\r\n try:\r\n failure.printTraceback()\r\n except:\r\n print('Could not print traceback of failure, print error '\r\n 'message instead:')\r\n print(failure.getErrorMessage())", "def failure(self):\n self.logger.debug(\"Logging failure for %s\", self.key)\n self.failures = self.driver.failure(self.key)", "def run(self, failure_info):\n signals = {}\n if not failure_info['failed'] or not failure_info['chromium_revision']:\n # Bail out if no failed step or no chromium revision.\n return signals\n\n # Bail out on infra failure\n if failure_info.get('failure_type') == failure_type.INFRA:\n return signals\n\n master_name = failure_info['master_name']\n builder_name = failure_info['builder_name']\n build_number = failure_info['build_number']\n\n for step_name in failure_info.get('failed_steps', []):\n if not waterfall_config.StepIsSupportedForMaster(step_name, master_name):\n # Bail out if the step is not supported.\n continue\n\n step = WfStep.Get(master_name, builder_name, build_number, step_name)\n if step and step.log_data:\n failure_log = step.log_data\n else:\n # TODO: do test-level analysis instead of step-level.\n # TODO: Use swarming test result instead of archived gtest results\n gtest_result = buildbot.GetGtestResultLog(\n master_name, builder_name, build_number, step_name)\n if gtest_result:\n failure_log = _GetReliableTestFailureLog(gtest_result)\n\n if gtest_result is None or failure_log == 'invalid':\n if not lock_util.WaitUntilDownloadAllowed(\n master_name): # pragma: no cover\n raise pipeline.Retry('Failed to pull log of step %s of master %s'\n % (step_name, master_name))\n try:\n failure_log = buildbot.GetStepLog(\n master_name, builder_name, build_number, step_name,\n self.HTTP_CLIENT)\n except ResponseTooLargeError: # pragma: no cover.\n logging.exception(\n 'Log of step \"%s\" is too large for urlfetch.', step_name)\n # If the stdio log of a step is too large, we don't want to pull it\n # again in next run, because that might lead to DDoS to the master.\n # TODO: Use archived stdio logs in Google Storage instead.\n failure_log = 'Stdio log is too large for urlfetch.'\n\n if not failure_log: # pragma: no cover\n raise pipeline.Retry('Failed to pull stdio of step %s of master %s'\n % (step_name, master_name))\n\n # Save step log in datastore and avoid downloading again during retry.\n if not step: # pragma: no cover\n step = WfStep.Create(\n master_name, builder_name, build_number, step_name)\n\n step.log_data = _ExtractStorablePortionOfLog(failure_log)\n\n try:\n step.put()\n except Exception as e: # pragma: no cover\n # Sometimes, the step log is too large to save in datastore.\n logging.exception(e)\n\n # TODO: save result in datastore?\n if step.isolated:\n try:\n json_failure_log = (\n json.loads(failure_log) if failure_log != 'flaky' else {})\n except ValueError: # pragma: no cover\n json_failure_log = {}\n logging.warning('failure_log %s is not valid JSON.' % failure_log)\n\n signals[step_name] = {\n 'tests': {}\n }\n step_signal = FailureSignal()\n\n for test_name, test_failure_log in json_failure_log.iteritems():\n signals[step_name]['tests'][test_name] = extractors.ExtractSignal(\n master_name, builder_name, step_name, test_name,\n base64.b64decode(test_failure_log)).ToDict()\n\n # Save signals in test failure log to step level.\n step_signal.MergeFrom(signals[step_name]['tests'][test_name])\n\n signals[step_name]['files'] = step_signal.files\n signals[step_name]['keywords'] = step_signal.keywords\n else:\n signals[step_name] = extractors.ExtractSignal(\n master_name, builder_name, step_name, None, failure_log).ToDict()\n\n return signals", "def error_handler(self, failure):\n log.error(failure)", "def on_failure(self, exc: BaseException) -> NoReturn:\n throw_new_error = self._breaker.open()\n\n if throw_new_error:\n error_msg = \"Trial call failed, circuit breaker opened\"\n raise CircuitBreakerError(error_msg).with_traceback(sys.exc_info()[2])\n else:\n raise exc", "def fail(self, msg=None):\r\n raise self.failureException(msg)", "def on_compensation_failure(self, initially_failed_step: BaseStep,\n initial_failure_payload: dict,\n compensation_failed_step: BaseStep,\n compensation_exception: BaseException):\n logger.info(f'Saga {self.saga_id} failed while compensating \"{compensation_failed_step.name}\" step.\\n'\n f'Error details: {format_exception_as_python_does(compensation_exception)} \\n \\n'\n f'Initial failure details: {initial_failure_payload}')", "def failure_detail(self) -> 'outputs.FailureDetailResponse':\n return pulumi.get(self, \"failure_detail\")", "def _authenticate_failed(self, e):\r\n if e.check(InvalidRequest):\r\n code = httpstatus.HTTP_STATUS_CODE_BAD_REQUEST[0]\r\n msg = e.getErrorMessage()\r\n elif e.check(UnauthorizedLogin):\r\n code = httpstatus.HTTP_STATUS_CODE_UNAUTHORIZED[0]\r\n msg = httpstatus.HTTP_STATUS_CODE_UNAUTHORIZED[1]\r\n else:\r\n e.printTraceback()\r\n code = httpstatus.HTTP_STATUS_CODE_INTERNAL_SERVER_ERROR[0]\r\n msg = httpstatus.HTTP_STATUS_CODE_INTERNAL_SERVER_ERROR[1]\r\n\r\n return Failure(HttpException(code, msg))" ]
[ "0.5803383", "0.5730745", "0.565321", "0.5649556", "0.56122845", "0.5440349", "0.5320383", "0.52736396", "0.5240046", "0.5213798", "0.5201333", "0.5083693", "0.50791305", "0.50574607", "0.5027117", "0.50205135", "0.5006612", "0.5000682", "0.49848235", "0.49722403", "0.49625576", "0.49599424", "0.4942724", "0.49408588", "0.49185506", "0.488899", "0.4879341", "0.48707384", "0.48572305", "0.4842845" ]
0.6850455
0
StepTimeout is a subclass of StepFailure and is raised when a step times out.
def StepTimeout(self): return recipe_api.StepTimeout
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def raise_timeout(self, *args, **kwargs):\n\n self.log.error(\"Task timeout encountered.\")\n raise TimeoutError", "def handler(*args, **kwargs):\n raise TimeoutException(\"Test aborted due to timeout. Test was \" +\n \"expected to finish in less than {} second(s).\".format(time_limit))", "def StepFailure(self):\n return recipe_api.StepFailure", "async def timeout(self, failed: bool = False) -> None:\n raise NotImplementedError()", "def test_timeout_elapsed_exception(self):\n deadline = Deadline(-MS)\n with self.assertRaises(TimeoutError):\n deadline.timeout()", "def assert_timeout(self) -> None:", "def timeout(order):\n return ResultProxy(TaskResult(TaskTimedout('A task has timedout'), order))", "def _timeout(signum, frame):\n # Raise TimeoutException with system default timeout message\n raise TimeoutException()", "def test_timeoutRaises(self):\n\n @self.eventloop.wait_for(timeout=0.5)\n def times_out():\n return Deferred().addErrback(lambda f: f.trap(CancelledError))\n\n start = time.time()\n self.assertRaises(TimeoutError, times_out)\n self.assertTrue(abs(time.time() - start - 0.5) < 0.1)", "def _check_timeouts(self, chunk_timeout, total_timeout):\n cur_time = time()\n\n if chunk_timeout is not None and cur_time > self._chunk_time + chunk_timeout:\n raise ChunkTimeout('Item timeout expired.')\n elif total_timeout is not None and cur_time > self._total_time + total_timeout:\n raise TotalTimeout('Total timeout expired.')", "def test_timeout(self, mocker, mock_timedelta):\n\n tid = 289466\n site = \"mysite\"\n\n exception_response = self.generate_task_dictionary(\n tid, state=\"started\", completed=None\n )\n\n responses = [{\"json\": exception_response}]\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, responses)\n\n with self.assertRaises(exceptions.AcquiaCloudTimeoutError):\n self.client.site(site).task(tid).wait(0)", "def _handle_timeout(self, frame=None, **_):\n\n raise TimeOut.TimeOutError(self, frame)", "def test_failed_processing(self):\n # setup\n ledger_api_dialogue, fipa_dialogue = self._setup_fipa_ledger_api_dialogues(self)\n\n self.transaction_behaviour.timedout.add(ledger_api_dialogue.dialogue_label)\n\n # operation\n with patch.object(self.logger, \"log\") as mock_logger:\n self.transaction_behaviour.failed_processing(ledger_api_dialogue)\n\n # after\n self.assert_quantity_in_outbox(0)\n\n # finish_processing\n assert self.transaction_behaviour.timedout == set()\n\n mock_logger.assert_any_call(\n logging.DEBUG,\n f\"Timeout dialogue in transaction processing: {ledger_api_dialogue}\",\n )\n\n # failed_processing\n assert fipa_dialogue in self.transaction_behaviour.waiting", "def assert_timeout(self) -> None:\n if self._cancelled:\n raise asyncio.TimeoutError from None", "def onTimeStepEnd(self, timeStep):\n pass", "def test_step_stop_aborted(self, _step: PropertyMock):\n _step.return_value = None\n es = exposed.ExposedStep()\n es.stop()", "def testTimeout(self):\n\n class TimeoutTestCase(cros_test_lib.TestCase):\n \"\"\"Test case that raises a TimeoutError because it takes too long.\"\"\"\n\n TEST_CASE_TIMEOUT = 1\n\n def testSleeping(self):\n \"\"\"Sleep for 2 minutes. This should raise a TimeoutError.\"\"\"\n time.sleep(2 * 60)\n raise AssertionError('Test case should have timed out.')\n\n # Run the test case, verifying it raises a TimeoutError.\n test = TimeoutTestCase(methodName='testSleeping')\n self.assertRaises(timeout_util.TimeoutError, test.testSleeping)", "def test_set_timeout_value_error(self, timeout):\n self.assertRaises(ValueError, self.root.set_timeout, timeout)", "def check_timeout(self, msg):\n if msg.clock.secs > self.timeout and not self.is_cancelled:\n rospy.loginfo(\"Test timed out, cancelling job\")\n self.utils.set_tag(name=self.test_name + \"_Status\", value=\"Failed\")\n self.utils.set_tag(name=self.test_name + \"_Timed_Out\", value=str(self.timeout))\n self.utils.cancel_job()", "def test_timeout(self):\n start = time.time()\n dr = EventualResult(Deferred(), None)\n self.assertRaises(TimeoutError, dr.wait, timeout=0.03)\n # be a little lenient for slow computers:\n self.assertTrue(abs(time.time() - start) < 0.05)", "def pytest_timeout_cancel_timer(item):", "def test_wait_for_predicate_timeout(self):\n predicate_mock = mock.MagicMock(side_effect=[True, True, True])\n with self.assertRaises(TimeoutError):\n train_utils.wait_for_predicate(predicate_mock, num_retries=3)", "def _test_run_with_long_error_msg(self, task_class):\r\n task_entry = self._create_input_entry()\r\n self.define_option_problem(PROBLEM_URL_NAME)\r\n expected_message = \"x\" * 1500\r\n with self.assertRaises(TestTaskFailure):\r\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)\r\n # compare with entry in table:\r\n entry = InstructorTask.objects.get(id=task_entry.id)\r\n self.assertEquals(entry.task_state, FAILURE)\r\n self.assertGreater(1023, len(entry.task_output))\r\n output = json.loads(entry.task_output)\r\n self.assertEquals(output['exception'], 'TestTaskFailure')\r\n self.assertEquals(output['message'], expected_message[:len(output['message']) - 3] + \"...\")\r\n self.assertTrue('traceback' not in output)", "def __step_waiter(self, step_id):\n\n # don't forget to tip the waiter :)\n step_waiter = self.emr_client.get_waiter('step_complete')\n try:\n step_waiter.wait(ClusterId=self.clusID,\n StepId=step_id[0],\n WaiterConfig={\n 'Delay': 15,\n 'MaxAttempts': 480\n })\n\n except WaiterError as e:\n if 'Max attempts exceeded' in e.message:\n print('EMR Step did not complete in two hours')\n else:\n print(e.message)", "def test_task_failed(self):\n\n task1 = FailedTask(mock.Mock(), total_retries=0)\n task2 = mock.Mock(execute_after=0)\n\n g = TaskDependencyGraph(MockWorkflowContext())\n seq = g.sequence()\n seq.add(task1, task2)\n\n with limited_sleep_mock():\n self.assertRaisesRegex(WorkflowFailed, 'failtask', g.execute)\n self.assertTrue(task1.is_terminated)\n self.assertFalse(task2.apply_async.called)", "def step(self):\n raise TaskError(\"Task %s: subclass should override step() method!\" %\n self)", "def pytest_exception_interact(node):\n hooks = node.config.pluginmanager.hook\n hooks.pytest_timeout_cancel_timer(item=node)", "def test_timeout_retries(self):\n\n batch = Batch(Mock())\n self.check_instance(batch=batch)\n\n self.assertEqual(batch.timeout_retries, 0)\n self.check_instance(batch, timeout_retries=0)\n\n batch.timeout_retries = 10\n self.assertEqual(batch.timeout_retries, 10)\n self.check_instance(batch, timeout_retries=10)\n\n batch.timeout_retries = 0\n self.assertEqual(batch.timeout_retries, 0)\n self.check_instance(batch, timeout_retries=0)\n\n batch.timeout_retries = 1\n self.assertEqual(batch.timeout_retries, 1)\n self.check_instance(batch, timeout_retries=1)\n\n # exceptions\n ## error messages\n value_error = \"'timeout_retries' must be positive, i.e. greater or equal that zero (>=0).\"\n type_error = f\"'timeout_retries' must be of type {int}.\"\n\n #######################################################################\n # test wrong value\n with self.assertRaises(ValueError) as error:\n batch.timeout_retries = -1\n self.assertEqual(batch.timeout_retries, 1)\n self.check_instance(batch, timeout_retries=1)\n check_error_message(self, error, value_error)\n\n #######################################################################\n # test wrong type\n with self.assertRaises(TypeError) as error:\n batch.timeout_retries = True\n self.assertEqual(batch.timeout_retries, 1)\n self.check_instance(batch, timeout_retries=1)\n check_error_message(self, error, type_error)\n\n with self.assertRaises(TypeError) as error:\n batch.timeout_retries = '2'\n self.assertEqual(batch.timeout_retries, 1)\n self.check_instance(batch, timeout_retries=1)\n check_error_message(self, error, type_error)", "def fail(self):\n self.cleanup()\n self.runner.report_job_fail(self.id)", "def test_timeout_elapsed_no_exception(self):\n deadline = Deadline(-MS)\n timeout = deadline.timeout(raise_if_elapsed=False)\n self.assertGreater(timeout, -2 * MS)\n self.assertLess(timeout, -MS)" ]
[ "0.6783338", "0.6670357", "0.6458349", "0.6233549", "0.620531", "0.6151229", "0.61305606", "0.6103031", "0.59383696", "0.5915559", "0.5886344", "0.5850895", "0.58435684", "0.58397263", "0.5815907", "0.5812256", "0.576869", "0.5734896", "0.5714007", "0.56782407", "0.56605077", "0.5657417", "0.5595983", "0.5576143", "0.55475986", "0.5510916", "0.5483291", "0.5479062", "0.5474232", "0.5470697" ]
0.7499932
0
The currently active (open) result from the last step that was run. This is a `types.StepData` object.
def active_result(self): return self.step_client.previous_step_result()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def result(self):\n # most pythonic way to get last in last is -1\n return self.history[-1]", "def current_progress_data(self):\n return self._current_progress_data", "def previous_step_result(self):\n return self._previous_step_result", "def cur_step(self):\n return self._cur_step", "def previous_step_result(self):\n if not self._engine._step_stack:\n raise ValueError(\n 'No steps have been run yet, and you are asking for a previous step '\n 'result.')\n return self._engine._step_stack[-1].step_result", "def get_last_result(self):\n return self.last_result", "def getCurrentStep():", "def last_triggered_step(self):\n return self._last_triggered_step", "def result(self):\n assert(self.__complete)\n return self.__result", "def get_current_observation(self):\n return self.observation_history[-1]", "def result( self):\n return self._result", "def result(self):\n return self['result']", "def get_last_solution(self):\n return self.last_result", "def get_current_value(self):\n assert(self.is_started())\n return self.currValue", "def currentValue(self):\n return self.__currentValue", "def result(self):\n return self._result", "def result(self):\n return self._result", "def result(self):\n return self._result", "def result(self):\n with self._condition:\n self.fetch()\n return self.__get_result()", "def last_result(self):\n # TODO : when evaluating multiline expressions this returns the first result\n lr = self.jiloop.lastRequest()\n res = lr.lineRep().call(\"$result\", spark_jvm_helpers.to_scala_list([]))\n return res", "def _get_result(self):\r\n \r\n return self._result", "def current(self):\n return self._wizard.current_step or self.first", "def get_data(self):\n return self._result", "def state(self):\n result = self.getResult()\n return result.state", "def last_value(self):\n return self._stop", "def step(self):\n return self._step", "def extract_goal_state(self):\n time = rospy.get_time()\n ref_time = time - self.last_time\n future_time = ref_time + self.update_rate\n\n # get state of future time in global trajectory\n return df.compute_output3D(self.global_solution, self.order, self.time[self.future_index], future_time)", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step" ]
[ "0.7012832", "0.6931432", "0.6929647", "0.67603475", "0.66973376", "0.6685482", "0.66834754", "0.66508675", "0.66325194", "0.65645987", "0.6562965", "0.65574545", "0.65385914", "0.6496028", "0.6484237", "0.6467374", "0.6467374", "0.6467374", "0.64124614", "0.6401742", "0.63910645", "0.63884705", "0.6376051", "0.63594383", "0.63495255", "0.6305278", "0.6294111", "0.6282416", "0.6282416", "0.6282416" ]
0.78945726
0
Nest allows you to nest steps hierarchically on the build UI. Calling ```python
def nest(self, name): step_result = self(name, []) with self.m.context(name_prefix=name, increment_nest_level=True): yield step_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_step(self):\n pass", "def build_step(self):\n pass", "def test_build_page_nested(build_resources, cli):\n books, _ = build_resources\n src = books.joinpath(\"nested\")\n page = src.joinpath(\"contents\", \"markdown.md\")\n html = src.joinpath(\"_build\", \"_page\", \"contents-markdown\", \"html\")\n index = html.joinpath(\"index.html\")\n result = cli.invoke(commands.build, [page.as_posix(), \"-n\", \"-W\", \"--keep-going\"])\n assert result.exit_code == 0, result.output\n assert html.joinpath(\"markdown.html\").exists()\n assert not html.joinpath(\"extra_page.html\").exists()\n assert 'url=markdown.html\" />' in index.read_text(encoding=\"utf8\")", "def build_step(self):\n\n pass", "def test_run_tempest(self, tempest_actions, show_step, _):\n show_step(1)\n tempest_actions.prepare_and_run_tempest()", "def stepStarted(build, step):", "def build_nested_blocks(self):\n pass", "def less_nested_example_vanilla():\n return", "def build(ctx: typer.Context):\n from .tasks import build, main\n\n sys.argv = sys.argv[:1] + (ctx.args or [\"list\"])\n main(vars(build))", "def build(parameters):\n\n\n print(\"In Build module\")", "def actionBuild():\n\n #Init builder logger\n Builder.init()\n\n for target in Settings.targets:\n targetsToBuild, combineLibs, copyToOutput = Builder.getTargetGnPath(target)\n for platform in Settings.targetPlatforms:\n for cpu in Settings.targetCPUs:\n if System.checkIfCPUIsSupportedForPlatform(cpu,platform):\n for configuration in Settings.targetConfigurations:\n if not Summary.checkIfActionFailed(ACTION_PREPARE, target, platform, cpu, configuration):\n Logger.printStartActionMessage('Build ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.YELLOW)\n result = Builder.run(target, targetsToBuild, platform, cpu, configuration, combineLibs, copyToOutput)\n Summary.addSummary(ACTION_BUILD, target, platform, cpu, configuration, result, Builder.executionTime)\n if result != NO_ERROR:\n Logger.printEndActionMessage('Failed building ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.RED)\n #Terminate script execution if stopExecutionOnError is set to True in userdef\n shouldEndOnError(result)\n else:\n Logger.printEndActionMessage('Build ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration)\n else:\n Logger.printColorMessage('Build cannot run because preparation has failed for ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.YELLOW)\n Logger.printEndActionMessage('Build not run for ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.YELLOW)", "def build_root(event):\n mode = event.parameters['mode']\n if mode == 'from config':\n config = event.parameters['config']\n\n elif mode == 'from template':\n manager = event.workbench.get_plugin('exopy.tasks')\n view = TemplateSelector(event.parameters.get('widget'),\n manager=manager)\n result = view.exec_()\n if result:\n path = view.path\n config, _ = load_template(path)\n\n else:\n msg = 'Invalid mode (%s) for build_root. Valid ones are : %s'\n raise ValueError(msg % (mode, ('from config', 'from template')))\n\n if config:\n build_dep = event.parameters.get('build_dep', event.workbench)\n return build_task_from_config(config, build_dep, True)\n\n else:\n raise RuntimeError('No config for building')", "def show_build_order(c, ignore=False, update=False):\n\n print(\" # Add this to invoke.yaml\")\n print(\" build_order:\")\n for p in _build_order(c, ignore=ignore, update=update):\n print(f\" - {p}\")\n\n print(\"\")", "def main():\n logging.basicConfig(\n level=logging.DEBUG, format=\"%(levelname)s: %(message)s\")\n root.add_command(all_)\n root.add_command(build)\n root.add_command(cleanup)\n root.add_command(clitest)\n root.add_command(run)\n root.add_command(push)\n root.add_command(unittest)\n root.add_command(list_images)\n root.add_command(list_stages)\n root()", "def less_nested_example_rst():\n\n return", "def build_step(self, signals):\n raise BuildError(\"OpBuilders must implement a `build_step` function\")", "def build(root):", "def tree(ctx):\n hokusai.print_command_tree(ctx.find_root().command)", "def test_get_scenarios_expanded(self):\n pass", "def test_python(width=10):\n\n stage_1 = [diamond(sleep=60, inputs=[0])]\n\n stage_2 = []\n for i in range(0, width):\n stage_2.extend([diamond(sleep=20, inputs=stage_1)])\n\n stage_3 = [diamond(sleep=30, inputs=stage_2)]\n\n if not stage_3[0].done():\n time.sleep(30)\n for sitename in dfk.executors:\n print(dfk.executors[sitename].status())", "def ui_root1():\n return send_build()", "def run_steps(properties, stream_engine, step_runner, universe_view,\n engine_flags=None, emit_initial_properties=False):\n with stream_engine.make_step_stream('setup_build') as s:\n if emit_initial_properties:\n for key in sorted(properties.iterkeys()):\n s.set_build_property(key, json.dumps(properties[key], sort_keys=True))\n\n engine = RecipeEngine(\n step_runner, properties, os.environ, universe_view, engine_flags)\n\n # Create all API modules and top level RunSteps function. It doesn't launch\n # any recipe code yet; RunSteps needs to be called.\n api = None\n\n assert 'recipe' in properties\n recipe = properties['recipe']\n\n root_package = universe_view.universe.package_deps.root_package\n run_recipe_help_lines = [\n 'To repro this locally, run the following line from the root of a %r'\n ' checkout:' % (root_package.name),\n '',\n '%s run --properties-file - %s <<EOF' % (\n os.path.join( '.', root_package.relative_recipes_dir, 'recipes.py'),\n recipe),\n '%s' % json.dumps(properties),\n 'EOF',\n '',\n 'To run on Windows, you can put the JSON in a file and redirect the',\n 'contents of the file into run_recipe.py, with the < operator.',\n ]\n\n with s.new_log_stream('run_recipe') as l:\n for line in run_recipe_help_lines:\n l.write_line(line)\n\n # Find and load the recipe to run.\n try:\n recipe_script = universe_view.load_recipe(recipe, engine=engine)\n s.write_line('Running recipe with %s' % (properties,))\n\n api = loader.create_recipe_api(\n universe_view.universe.package_deps.root_package,\n recipe_script.LOADED_DEPS,\n recipe_script.path,\n engine,\n recipe_test_api.DisabledTestData())\n\n s.add_step_text('running recipe: \"%s\"' % recipe)\n except (loader.LoaderError, ImportError, AssertionError) as e:\n for line in str(e).splitlines():\n s.add_step_text(line)\n s.set_step_status('EXCEPTION')\n if engine_flags and engine_flags.use_result_proto:\n return result_pb2.Result(\n failure=result_pb2.Failure(\n human_reason=str(e),\n exception=result_pb2.Exception(\n traceback=traceback.format_exc().splitlines()\n )))\n return RecipeResult({\n 'status_code': 2,\n 'reason': str(e),\n })\n\n # The engine will use step_runner to run the steps, and the step_runner in\n # turn uses stream_engine internally to build steam steps IO.\n return engine.run(recipe_script, api)", "def build(ctx):\n ctx.run(\"vsce package\", replace_env=False)", "def test_sections_json_spider_three_levels_with_summary_and_call(self):\n title = (\"Taking Action for the Social and Emotional Health of \"\n\t \"Young Children: A Report to the Community from the Denver \"\n\t\t \"Early Childhood Council\")\n\tsummary = (\"Now, Denver has a plan of action to make it easier for \"\n\t \"families to access early childhood mental health \"\n\t\t \"information, intervention and services.\")\n\tcall_to_action = (\"Test call to action.\")\n\tbyline = \"Denver Early Childhood Council\"\n story = create_story(title=title, summary=summary, byline=byline,\n\t\t\t call_to_action=call_to_action)\n section1 = create_section(\"We're ready to take action. Are you?\",\n\t\t\t story=story, weight=7)\n\tsection2 = create_section(\"Ricardo's Story\",\n\t\t\t story=story, weight=2)\n\tsection3 = create_section(\"Meeting the need for better child mental health services\",\n\t\t\t story=story, root=True, weight=1)\n\tsection4 = create_section(\"Healthy Minds Support Strong Futures\",\n\t\t\t story=story, weight=5) \n\tsection5 = create_section(\"Community Voices\",\n\t\t\t story=story, weight=3)\n\tsection6 = create_section(\"Our Vision: That All Children in Denver are Valued, Healthy and Thriving\",\n\t\t\t story=story, weight=4)\n\tsection7 = create_section(\"Defining a \\\"Framework for Change\\\" with Actionable Goals and Strategies\",\n\t\t\t story=story, weight=5) \n section8 = create_section(\"How Can the Plan Make a Difference?\",\n\t\t\t story=story, weight=5)\n\tsection9 = create_section(\"Impact\", story=story, weight=6)\n SectionRelation.objects.create(parent=section6, child=section8,\n weight=0)\n SectionRelation.objects.create(parent=section7, child=section9,\n weight=0)\n SectionRelation.objects.create(parent=section6, child=section7,\n weight=0)\n SectionRelation.objects.create(parent=section3, child=section1,\n weight=0)\n SectionRelation.objects.create(parent=section3, child=section6,\n weight=0)\n SectionRelation.objects.create(parent=section3, child=section4,\n weight=0)\n SectionRelation.objects.create(parent=section3, child=section5,\n weight=0)\n SectionRelation.objects.create(parent=section3, child=section2,\n weight=0)\n\tjson_sections = simplejson.loads(story.structure.sections_json(\n\t\tinclude_summary=True, include_call_to_action=True))\n\tself.assertIn(\n\t section8.section_id,\n\t self._get_section(json_sections, section6.section_id)['children'])\n\tself.assertIn(\n\t section9.section_id,\n\t self._get_section(json_sections, section7.section_id)['children'])\n\tself.assertIn(\n\t section7.section_id,\n\t self._get_section(json_sections, section6.section_id)['children'])\n\tself.assertIn(\n\t section1.section_id,\n\t self._get_section(json_sections, section3.section_id)['children'])\n\tself.assertIn(\n\t section6.section_id,\n\t self._get_section(json_sections, section3.section_id)['children'])\n\tself.assertIn(\n\t section4.section_id,\n\t self._get_section(json_sections, section3.section_id)['children'])\n\tself.assertIn(\n\t section5.section_id,\n\t self._get_section(json_sections, section3.section_id)['children'])\n\tself.assertIn(\n\t section2.section_id,\n\t self._get_section(json_sections, section3.section_id)['children'])\n\tself.assertEqual(json_sections[0]['section_id'], 'summary')\n\tself.assertEqual(json_sections[0]['next_section_id'], \n\t\t\t json_sections[1]['section_id'])\n\tself.assertEqual(json_sections[1]['previous_section_id'], 'summary')\n\tself.assertEqual(json_sections[-1]['section_id'], 'call-to-action')\n\tself.assertEqual(json_sections[-1]['previous_section_id'], \n\t\t\t json_sections[-2]['section_id'])\n\tself.assertEqual(json_sections[-2]['next_section_id'], 'call-to-action')", "def start(context, project_name):\n\n gcc_version = '10-2020-q4-major-'\n os_extension = ''\n\n if platform.system() == 'Linux':\n if platform.machine() == 'x86_64':\n os_extension = 'x86_64-linux'\n else:\n os_extension = 'aarch64-linux'\n elif platform.system() == 'Darwin':\n os_extension = 'mac'\n elif platform.system() == 'Windows':\n os_extension = 'win32'\n\n final_branch_name = f'{gcc_version}{os_extension}'\n\n if not os_extension:\n click.secho(f'This system {platform.system()}:{platform.machine()} ' +\n 'is not supported for SJSU-Dev2 ', fg='red', bold=True)\n return -1\n\n click.secho(f'Creating project: {project_name}', fg='white', bold=True)\n Path(project_name).mkdir()\n\n click.echo(f' Creating \"{project_name}/.sj2\" directory')\n Path(f'{project_name}/.sj2').mkdir(exist_ok=True)\n Path(f'{project_name}/.sj2/reserved').touch(exist_ok=True)\n\n click.echo(f' Creating \"{project_name}/library\" directory')\n Path(f'{project_name}/library').mkdir(exist_ok=True)\n\n click.echo(f' Creating \"{project_name}/packages\" directory')\n Path(f'{project_name}/packages').mkdir(exist_ok=True)\n\n click.echo(f' Creating \"{project_name}/main.cpp\" source file')\n Path(f'{project_name}/main.cpp').write_text(BASIC_MAIN_CPP)\n\n click.echo('')\n\n context.invoke(install, library='libcore', tag='main',\n project_directory=project_name)\n context.invoke(install, library='libarmcortex',\n tag='main', project_directory=project_name)\n context.invoke(install, library='liblpc40xx', tag='main',\n project_directory=project_name)\n context.invoke(install, library='libstm32f10x',\n tag='main', project_directory=project_name)\n context.invoke(install, library='gcc-arm-none-eabi-picolibc',\n tag=final_branch_name, project_directory=project_name)", "def explore(self, board, args):\n self.tree.explore(board, *args)", "def run_steps(stream, build_properties, factory_properties,\n test_data=recipe_test_api.DisabledTestData()):\n stream.honor_zero_return_code()\n\n # TODO(iannucci): Stop this when blamelist becomes sane data.\n if ('blamelist_real' in build_properties and\n 'blamelist' in build_properties):\n build_properties['blamelist'] = build_properties['blamelist_real']\n del build_properties['blamelist_real']\n\n properties = factory_properties.copy()\n properties.update(build_properties)\n\n # TODO(iannucci): A much better way to do this would be to dynamically\n # detect if the mirrors are actually available during the execution of the\n # recipe.\n if ('use_mirror' not in properties and (\n 'TESTING_MASTERNAME' in os.environ or\n 'TESTING_SLAVENAME' in os.environ)):\n properties['use_mirror'] = False\n\n # It's an integration point with a new recipe engine that can run steps\n # in parallel (that is not implemented yet). Use new engine only if explicitly\n # asked by setting 'engine' property to 'ParallelRecipeEngine'.\n engine = RecipeEngine.create(stream, properties, test_data)\n\n # Create all API modules and an instance of top level GenSteps generator.\n # It doesn't launch any recipe code yet (generator needs to be iterated upon\n # to start executing code).\n api = None\n with stream.step('setup_build') as s:\n assert 'recipe' in factory_properties\n recipe = factory_properties['recipe']\n\n properties_to_print = properties.copy()\n if 'use_mirror' in properties:\n del properties_to_print['use_mirror']\n\n run_recipe_help_lines = [\n 'To repro this locally, run the following line from a build checkout:',\n '',\n './scripts/tools/run_recipe.py %s --properties-file - <<EOF' % recipe,\n repr(properties_to_print),\n 'EOF',\n '',\n 'To run on Windows, you can put the JSON in a file and redirect the',\n 'contents of the file into run_recipe.py, with the < operator.',\n ]\n\n for line in run_recipe_help_lines:\n s.step_log_line('run_recipe', line)\n s.step_log_end('run_recipe')\n\n try:\n recipe_module = recipe_loader.load_recipe(recipe)\n stream.emit('Running recipe with %s' % (properties,))\n api = recipe_loader.create_recipe_api(recipe_module.DEPS,\n engine,\n test_data)\n steps = recipe_module.GenSteps\n s.step_text('<br/>running recipe: \"%s\"' % recipe)\n except recipe_loader.NoSuchRecipe as e:\n s.step_text('<br/>recipe not found: %s' % e)\n s.step_failure()\n return RecipeExecutionResult(2, None)\n\n # Run the steps emitted by a recipe via the engine, emitting annotations\n # into |stream| along the way.\n return engine.run(steps, api)", "def stage(self, stage: osbuild.Stage):", "def test_quick_build1(self):\n pass", "def expand_tasks_with_samples( # pylint: disable=R0913,R0914\n self,\n dag,\n chain_,\n samples,\n labels,\n task_type,\n adapter_config,\n level_max_dirs,\n):\n LOG.debug(f\"expand_tasks_with_samples called with chain,{chain_}\\n\")\n # Figure out how many directories there are, make a glob string\n directory_sizes = uniform_directories(len(samples), bundle_size=1, level_max_dirs=level_max_dirs)\n\n glob_path = \"*/\" * len(directory_sizes)\n\n LOG.debug(\"creating sample_index\")\n # Write a hierarchy to get the all paths string\n sample_index = create_hierarchy(\n len(samples),\n bundle_size=1,\n directory_sizes=directory_sizes,\n root=\"\",\n n_digits=len(str(level_max_dirs)),\n )\n\n LOG.debug(\"creating sample_paths\")\n sample_paths = sample_index.make_directory_string()\n\n LOG.debug(\"assembling steps\")\n # the steps in the chain\n steps = [dag.step(name) for name in chain_]\n\n # sub in globs prior to expansion\n # sub the glob command\n steps = [\n step.clone_changing_workspace_and_cmd(cmd_replacement_pairs=parameter_substitutions_for_cmd(glob_path, sample_paths))\n for step in steps\n ]\n\n # workspaces = [step.get_workspace() for step in steps]\n # LOG.debug(f\"workspaces : {workspaces}\")\n\n needs_expansion = is_chain_expandable(steps, labels)\n\n LOG.debug(f\"needs_expansion {needs_expansion}\")\n\n if needs_expansion:\n # prepare_chain_workspace(sample_index, steps)\n sample_index.name = \"\"\n LOG.debug(\"queuing merlin expansion tasks\")\n found_tasks = False\n conditions = [\n lambda c: c.is_great_grandparent_of_leaf,\n lambda c: c.is_grandparent_of_leaf,\n lambda c: c.is_parent_of_leaf,\n lambda c: c.is_leaf,\n ]\n for condition in conditions:\n if not found_tasks:\n for next_index_path, next_index in sample_index.traverse(conditional=condition):\n LOG.info(\n f\"generating next step for range {next_index.min}:{next_index.max} {next_index.max-next_index.min}\"\n )\n next_index.name = next_index_path\n\n sig = add_merlin_expanded_chain_to_chord.s(\n task_type,\n steps,\n samples[next_index.min : next_index.max],\n labels,\n next_index,\n adapter_config,\n next_index.min,\n )\n sig.set(queue=steps[0].get_task_queue())\n\n if self.request.is_eager:\n sig.delay()\n else:\n LOG.info(f\"queuing expansion task {next_index.min}:{next_index.max}\")\n self.add_to_chord(sig, lazy=False)\n LOG.info(f\"merlin expansion task {next_index.min}:{next_index.max} queued\")\n found_tasks = True\n else:\n LOG.debug(\"queuing simple chain task\")\n add_simple_chain_to_chord(self, task_type, steps, adapter_config)\n LOG.debug(\"simple chain task queued\")" ]
[ "0.5666077", "0.5666077", "0.5635756", "0.5634427", "0.53961426", "0.5334729", "0.52571476", "0.52398247", "0.5215903", "0.5194408", "0.5111144", "0.51089895", "0.505281", "0.50474066", "0.5011156", "0.50028014", "0.49496236", "0.4938487", "0.4927877", "0.48954463", "0.48513916", "0.4836588", "0.4828846", "0.48177338", "0.48066425", "0.48060808", "0.47948453", "0.47854248", "0.47723517", "0.47621235" ]
0.5708708
0
Insert single row into a table
def _insert_table_row(self, db: str, table: str, row: Dict[str, Any]): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_row(self, table: str, row_data: dict):\r\n\r\n columns = \"\".join([f\"'{i}',\" for i in row_data]).rstrip(\",\")\r\n keys = \"\".join([f\"'{row_data[i]}',\" for i in row_data]).rstrip(\",\")\r\n sql_statement = f\"INSERT INTO {table} ({columns}) VALUES({keys});\"\r\n try:\r\n self.__cursor(sql_statement)\r\n self.__db_conn.commit()\r\n except sqlite3.Error as error:\r\n print(\"[!] Couldn't add record\")\r\n print(\"[!]\", str(error).capitalize())\r\n return\r\n print(\"[*] Record added successfully.\")", "def insert_row(self, tablename, fields):\n insert_params = \"(\" + \",\".join(['?' for x in fields]) + \")\"\n self.cursor.execute(\"insert into \" + tablename + \" values \" +\n insert_params, fields)", "def insert(db, table, name, row):\n\n # Build insert prepared statement\n columns = [name for name, _ in table.items()]\n insert = INSERT_ROW.format(table=name, columns=\", \".join(columns), values=(\"?, \" * len(columns))[:-2])\n\n try:\n db.execute(insert, values(table, row, columns))\n except Exception as ex:\n print(\"Error inserting row: {}\".format(row), ex)", "def insert(self, row):\n if not self.loaded:\n print(\"Database is not loaded\")\n return False\n\n self.rows.append(row)\n return True", "def add_row(self, row_id):", "def insertRow(self, row, data):\n newRowData = self.createRowData(data)\n self.jobRow.insertRow(row, newRowData)", "def singleInsert(self, table_name, fields, field_values, field_types=[]):\n if not self.checkTable(table_name):\n self.createTable(table_name, fields, field_types)\n self.transactionInsert(table_name, fields, field_values)\n self.transactionEnd()", "def insertRow(self, p_int, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def insert(self, table, *args, **kwargs):\n\n values = None\n query = \"INSERT INTO %s \" % table\n if kwargs:\n keys = kwargs.keys()\n values = kwargs.values()\n query += \"(\" + \",\".join([\"`%s`\"]*len(keys)) % tuple(keys) + \\\n \") VALUES(\" + \",\".join([\"%s\"]*len(values)) + \")\"\n elif args:\n values = args\n query += \" VALUES(\" + \",\".join([\"%s\"]*len(values)) + \")\"\n\n self.__open()\n self.__cursor.execute(query, values)\n self.__connection.commit()\n self.__close()\n return self.__cursor.lastrowid", "def insert(self, table_name, rows, bulk=True):\n table = self._create_table(table_name)\n return self._perform_query(table.insert(), rows, bulk)", "def insert_data(self, row, table_fields_names, table_fields_types):\n\n\t\tquery = ''\n\n\t\ttry:\t\t\t\t\n\t\t\tquery = self.form_insert_query(TABLE_NAME, row, table_fields_names, table_fields_types)\n\t\t\t# print query\n\t\t\tself.execute_query(query)\t\t\t\n\t\texcept Exception, e:\t\t\t\t\n\t\t\tprint '[e] Exeption: %s' % (str(e))\n\t\t\tprint '\\t[q] Query that caused exception \\n %s' % (query)\n\t\t\treturn False\n\n\t\treturn True", "def insert_row(self, identifier, position, datastore):\n # Get dataset. Raise exception if dataset is unknown.\n dataset = datastore.get_dataset(identifier)\n if dataset is None:\n raise ValueError(\"unknown dataset '{}'\".format(identifier))\n # Insert new row into dataset.\n df = vizual.insert_row(df=dataset.to_dataframe(), pos=position)\n # Store updated dataset to get new identifier.\n ds = datastore.update_dataset(\n origin=dataset,\n df=df,\n annotations=dataset.annotations\n )\n return VizualApiResult(ds)", "def insert_row(self, row_dict):\n sql = self.commands.insert_row(\n self.name,\n self._join_cols(row_dict.keys()),\n self._join_values(row_dict.values())\n )\n return self.execute(sql)[0][0]", "def insert(self):\n sql = u'INSERT INTO %s' % self.table()\n keys = []\n values = []\n format_values = []\n for field in self.fields():\n attr = object.__getattribute__(self, field)\n if attr.auto_value:\n continue\n keys.append(field)\n format_values.append(attr.format)\n values.append(attr._value)\n keys_str = u'( %s )' % u', '.join(keys)\n values_str = u'VALUES( %s )' % u', '.join(format_values)\n sql = '%s %s %s;' % (sql, keys_str, values_str)\n connection.execute(sql, values)\n primary_k = self.__class__.get_primary()\n primary = object.__getattribute__(self, primary_k)\n primary.value = connection.connection.insert_id()", "def insert_row(self, table_model, row, count):\n self.undostack.push(InsertRowCommand(table_model, row, count))", "def insert(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()", "def insert(self):\n self.getDbRecord().insert()\n\n return", "def insert_row(table_str, attribute_value_dict): #works\n sql = make_insert_row(table_str, attribute_value_dict)\n #print sql\n execute_edit_queries(sql)", "def insert(self, row: BaseTrackerRow) -> None:\n assert isinstance(row, self._tracker_row)\n row = [row.__dict__[\"_\" + col] for col in list(self._table.columns)]\n self._table.loc[len(self._table)] = row", "def insert(self, data, table, **kwargs):\n logging.info(f'Inserting into `{table}`')\n\n try:\n data.to_sql(table, self.engine, **kwargs)\n try:\n self.execute(f'ALTER TABLE `{table}` ADD PRIMARY KEY (`id`);')\n except:\n pass\n return True\n except:\n logging.exception('Something went wrong inserting. Check trace.')\n return False", "def insert(self,table,values):\n self.connect.execute(self.insert_disc[table],values)\n self.connect.commit()", "def single_insert(conn, insert_req):\n cursor = conn.cursor()\n try:\n cursor.execute(insert_req)\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error: %s\" % error)\n conn.rollback()\n cursor.close()\n return 1\n cursor.close()", "def insert(self, table, element):\n\n update = self.update(table, element)\n if update:\n return update\n\n fields = []\n values = []\n for key in element.keys():\n fields.append(key)\n values.append(element[key])\n result = self.__insert(table, fields, values)\n return result", "def insert(self, table, **kwargs):\n if 'creator' in self.get_columns(table):\n kwargs.setdefault('creator', self.user_id)\n if 'uuid' in self.get_columns(table):\n generated_uuid = (table == 'concept' and\n (str(kwargs['concept_id']) + 'A'*36)[:36] or uuid.uuid4())\n kwargs.setdefault('uuid', generated_uuid)\n\n columns = kwargs.keys()\n values = kwargs.values()\n placeholders = ['%s'] * len(values)\n if 'date_created' in self.get_columns(table):\n columns += ['date_created']\n placeholders += ['now()']\n\n self.db.execute(\n 'insert into %s (%s) values (%s)' %\n (table, ', '.join(columns), ', '.join(placeholders)), *values)\n if table + '_id' in self.get_columns(table):\n return self.db.get(table + '_id', uuid=kwargs['uuid'])", "def Insert(self):\n sql = 'INSERT INTO %s ( %s ) VALUES ( %s )' % (\n self.table_name,\n ', '.join(self.values),\n ', '.join(['?' for _ in self.values])\n )\n return Database().Execute(sql, tuple(self.values.values()))", "def insert_one(self, data):\n _client = self.client\n _db = _client[self.database]\n _col = _db[self.collection]\n\n x = _col.insert_one(data)\n\n return x", "def insertRow(self, index, *row):\n if ((len(row) == 1) and (type(row[0]) in MATRIX_VALID_COLLECTIONS)):\n row = row[0]\n if self._width:\n if not (len(row) == self._width):\n raise ValueError('Improper length for new row: %d, should be %d' % (len(row), self._width))\n else:\n self._width = len(row)\n self._height += 1\n # make a deep copy\n newrow = list()\n for item in row:\n if not (type(item) in MATRIX_VALID_TYPES):\n message = \"Values must be of type \"\n for t in range(len(MATRIX_VALID_TYPENAMES)):\n if t:\n message += ' or '\n message += \"'%s'\" % MATRIX_VALID_TYPENAMES[t]\n raise TypeError(message)\n newrow.append(item)\n self._value.insert(index, newrow)", "def add_row(self, row):\n ...", "async def insert_one(self, model):\n\n pass", "def rpc_database_insert_row(self, keys, values):\n\t\tif not isinstance(keys, (list, tuple)):\n\t\t\tkeys = (keys,)\n\t\tif not isinstance(values, (list, tuple)):\n\t\t\tvalues = (values,)\n\t\tassert len(keys) == len(values)\n\t\ttable_name = self.path.split('/')[-2]\n\t\tfor key, value in zip(keys, values):\n\t\t\tassert key in DATABASE_TABLES[table_name]\n\t\ttable = DATABASE_TABLE_OBJECTS.get(table_name)\n\t\tassert table\n\t\tsession = db_manager.Session()\n\t\trow = table()\n\t\tfor key, value in zip(keys, values):\n\t\t\tsetattr(row, key, value)\n\t\tsession.add(row)\n\t\tsession.close()\n\t\treturn" ]
[ "0.7340923", "0.7256007", "0.7239077", "0.7177126", "0.71218693", "0.70833904", "0.7065664", "0.69191945", "0.68331724", "0.682139", "0.68028295", "0.67609316", "0.6746725", "0.6736111", "0.673245", "0.6703915", "0.6692947", "0.6680959", "0.66731584", "0.6654706", "0.6627586", "0.66269606", "0.6552719", "0.65406734", "0.64872986", "0.6486009", "0.6460466", "0.6456129", "0.64366454", "0.64338666" ]
0.7353583
0
Compares two response objects based on their NVCness. Only returns true if both responses are in agreement with either responding NVC or not NVC.
def compare(obj_a, obj_b): return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) == 'NVC')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_vn_in_api_server(self):\n self.api_verification_flag = True\n self.api_s_vn_obj = self.api_s_inspect.get_cs_vn(\n domain=self.domain_name, project=self.project_name,\n vn=self.vn_name, refresh=True)\n if not self.api_s_vn_obj:\n self.logger.debug(\"VN %s is not found in API-Server\" %\n (self.vn_name))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n if self.api_s_vn_obj['virtual-network']['uuid'] != self.uuid:\n self.logger.warn(\n \"VN Object ID %s in API-Server is not what was created\" % (self.uuid))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n\n subnets = list()\n for ipam in self.api_s_vn_obj['virtual-network']['network_ipam_refs']:\n subnets.extend(ipam['attr']['ipam_subnets'])\n for vn_subnet in self.vn_subnets:\n subnet_found = False\n vn_subnet_cidr = str(IPNetwork(vn_subnet['cidr']).ip)\n for subnet in subnets:\n if subnet['subnet']['ip_prefix'] == vn_subnet_cidr:\n subnet_found = True\n if not subnet_found:\n self.logger.warn(\n \"VN Subnet IP %s not found in API-Server for VN %s\" %\n (vn_subnet_cidr, self.vn_name))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n # end for\n self.api_s_route_targets = self.api_s_inspect.get_cs_route_targets(\n vn_id=self.uuid)\n if not self.api_s_route_targets:\n errmsg = \"Route targets not yet found in API-Server for VN %s\" % self.vn_name\n self.logger.error(errmsg)\n self.api_verification_flag = self.api_verification_flag and False\n return False\n self.rt_names = self.api_s_inspect.get_cs_rt_names(\n self.api_s_route_targets)\n\n if not self.rt_names:\n self.logger.debug(\n 'RT names not yet present for VN %s', self.vn_name)\n return False\n\n if self.rt_number:\n if not any(item.endswith(self.rt_number) for item in self.rt_names):\n self.logger.debug('RT %s is not found in API Server RT list %s ' %(\n self.rt_number, self.rt_names))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n\n self.api_s_routing_instance = self.api_s_inspect.get_cs_routing_instances(\n vn_id=self.uuid)\n if not self.api_s_routing_instance:\n msg = \"Routing Instances not found in API-Server for VN %s\" % self.vn_name\n self.logger.warn(msg)\n self.api_verification_flag = self.api_verification_flag and False\n return False\n self.ri_ref = self.api_s_routing_instance['routing_instances'][0]['routing-instance']\n if not self.verify_network_id():\n return False\n self.api_verification_flag = self.api_verification_flag and True\n self.logger.info(\"Verifications in API Server for VN %s passed\" %\n (self.vn_name))\n return True", "def __eq__(self, other):\n if not isinstance(other, VirtualMachinesResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def consistent(self, c, combination):\t\t\n\t\treturn (self.response(c, combination) \n\t\t\t== self.response(combination, self.code))", "def compare_cpes(lhs: ImageCpe, rhs: ImageCpe):\n vendor_cmp = compare_fields(lhs.vendor, rhs.vendor)\n if vendor_cmp != 0:\n return vendor_cmp\n\n name_cmp = compare_fields(lhs.name, rhs.name)\n if name_cmp != 0:\n return name_cmp\n\n version_cmp = compare_fields(lhs.version, rhs.version)\n if version_cmp != 0:\n return version_cmp\n\n update_cmp = compare_fields(lhs.update, rhs.update)\n if update_cmp != 0:\n return update_cmp\n\n meta_cmp = compare_fields(lhs.meta, rhs.meta)\n if meta_cmp != 0:\n return meta_cmp\n\n # all avenues of comparison have been depleted, the two cpes are same for all practical purposes\n return 0", "def test_equality(self):\n self.assertEqual(self._version1, self._version1)\n self.assertNotEqual(self._version2, self._version1)\n self.assertEqual(self._version1, PrcsVersion(self._version1))", "def __eq__(self, other):\n if not isinstance(other, InlineResponse20020Result):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, ComputingResourceFlavorsRsp):\n return False\n\n return self.__dict__ == other.__dict__", "def _cryptovariables_equal(x, y):\n\n return (\n _hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, x) ==\n _hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, y))", "def __eq__(self, other):\n if not isinstance(other, ClientDetailResponseResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def is_response_correct(self, response):\n for answer in self.my_osid_object.get_answers():\n if self._is_match(response, answer):\n return True\n return False", "def __eq__(self, other):\n if not isinstance(other, InlineResponse20023):\n return False\n\n return self.__dict__ == other.__dict__", "def __ne__(self, other):\n if not isinstance(other, ChannelReturnResponse):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, IQueryUserPartnerCouponsResultV2):\n return False\n\n return self.__dict__ == other.__dict__", "def is_correctness_available_for_response(self, response):\n return True", "def __eq__(self, other):\n if not isinstance(other, ChannelReturnResponse):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, BalanceResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def is_equivalence(self) -> bool:", "def __eq__(self, other: Any) -> bool:\n if not isinstance(other, DetectionResult):\n return False\n\n return self.to_pb2().__eq__(other.to_pb2())", "def __eq__(self, other):\n if not isinstance(other, InlineResponse2018):\n return False\n\n return self.__dict__ == other.__dict__", "def vote_result(self) -> bool:\n token_score = self.create_interface_score(self._token_score.get(), TokenInterface)\n yes = 0\n no = 0\n for address in self._voted:\n vote = self._vote[str(address)]\n if vote == 'yes':\n yes += token_score.balanceOf(address)\n else:\n no += token_score.balanceOf(address)\n self._yes_votes.set(yes)\n self._no_votes.set(no)\n if self._yes_votes.get() > (token_score.totalSupply() - token_score.balanceOf(self._rewards_score.get())) // 2:\n return True\n else:\n return False", "def __ne__(self, other):\n if not isinstance(other, InlineResponse200):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, GetSesameResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def compareVerOnly(v1, v2):\n return compareEVR(('', v1, ''), ('', v2, ''))", "def __eq__(self, other):\n if not isinstance(other, InlineResponse200):\n return False\n\n return self.to_dict() == other.to_dict()", "def compare_results(self, result1, result2):\n return self.compare_measurements(measurement1=result1, measurement2=result2)", "def __eq__(self, other):\n if not isinstance(other, InlineResponse2001):\n return False\n\n return self.__dict__ == other.__dict__", "def verif_response(response):\n if response.status_code >= 200 and response.status_code <= 299:\n logging.debug(\"response server OK::{}\".format(response.text))\n return True\n\n logging.error(\"response server KO::{}\".format(response.text))\n return False", "def __eq__(self, other):\n if not isinstance(other, DepositCompleteResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def compare_response_to_model_instance(self, response, model_instance):\n parsed_response = json_decode(response)\n headers = parsed_response['headers']\n data = parsed_response['data']\n self.assertEquals(len(data), len(model_instance))\n for i in range(len(data)):\n datum = self.deserialize(headers, data[i])\n self.compare_model_instance(datum, model_instance[i])", "def test_equal_method(self):\n sc1 = ServComs(self.serverIp, \"1\")\n sc2 = ServComs(self.serverIp, \"1\")\n sc3 = ServComs(self.serverIp, \"2\")\n\n self.assertEqual(sc1, sc2) # Same ip and id\n self.assertNotEqual(sc1, sc3) # different ip" ]
[ "0.57760257", "0.57723004", "0.57442796", "0.5704212", "0.5581943", "0.55500567", "0.5549865", "0.55349195", "0.55025715", "0.54922974", "0.544647", "0.5440523", "0.54150635", "0.5412424", "0.5370746", "0.5357937", "0.535282", "0.5340017", "0.53387195", "0.5306857", "0.5294818", "0.5288538", "0.52878577", "0.52720547", "0.5267108", "0.5264616", "0.5248908", "0.52382374", "0.5237619", "0.5237269" ]
0.6934954
0
Runs main gobject loop.
def run_main_loop(): mainloop = GObject.MainLoop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loop( self ):\n import gtk\n while self.count >= 1:\n log.debug( 'GTK loop restarting' )\n while gtk.events_pending():\n gtk.main_iteration()\n log.debug( 'GTK loop exiting' )\n try:\n del self.t_loop\n except AttributeError, err:\n pass", "def run(self):\n GLib.MainLoop().run()", "def startGTK( ):\n if not INITIALIZED:\n init()\n if LOOP_TRACKER:\n LOOP_TRACKER.increment()", "def start(self):\n if self.__started:\n return\n\n self.__started = True\n GLib.timeout_add(GtkMainLoop.DEADLINE_GLIB, self.__ioloop_run)\n self.__gi_loop.run()", "def main(self):\n\t\tgtk.main()", "def run(self):\n\n while not self.done:\n\n self.event_loop()\n\n self.update()", "def main():\n global loop\n DBusGMainLoop(set_as_default=True)\n\n loop = gobject.MainLoop()\n bus = dbus.SessionBus()\n\n bus.add_signal_receiver(catchall_handler, \n dbus_interface=\"org.freedesktop.DBus.Properties\")\n\n threading.Thread(target=run_spotify).start()\n loop.run()", "def run(self):\n self.cmdloop()", "def main_loop(self):\n dt = 0\n self.clock.tick(FPS)\n while not self.done:\n self.event_loop()\n self.update(dt)\n self.render()\n dt = self.clock.tick(FPS) / 1000.0", "def run(self):\n\t\tgtk.gdk.threads_init()\t\t\t# (!) important for multi-threading to work with GTK+\n\t\tself.__update_timer = gobject.timeout_add(250, self.__update, self)\n\t\tself.statusbar1.push(0, \"Ready (for about dialog; right-click to lower right corner).\")\n\t\tgtk.main()", "def _run(self):\n while(self._loop):\n pass", "async def _main(self):\n while True:\n time.sleep(1)", "def run(self):\n self.ui['main_window'].widgets['main'].show_all()\n gtk.main()", "def loop(self):\n pass", "def loop(self):\r\n while self.__running:\r\n self.__check_events()\r\n self.__render()\r\n self.__reset_variables()", "def main():\n BouncyGUI().mainloop()", "def main():\r\n gameclass = data.game.GameClass()\r\n gameclass.main_loop()", "def main():\n dealCards().mainloop()", "def main_loop(self):\n dt = 0.3\n self.clock.tick(self.fps)\n while not self.done:\n self.event_loop()\n self.update(dt)\n self.render()\n dt = self.clock.tick(self.fps)/1000.0 # create delta time variable to multiply with movement and rotation\n self.display_fps()\n self.health_bar()\n self.enemy_health()\n self.energy_bar()", "def run():\n gui = GUI()\n gui.mainloop()", "def gameloop(self):\r\n\r\n # What you see above (\"\"\" some text \"\"\") is called a docstring.\r\n # It explains the purpose of the method/function.\r\n # There should generally be one for every function.\r\n\r\n\r\n # Below is the main loop\r\n while True: \r\n # One cycle in the loop is equivalent to one frame.\r\n\r\n self.event()\r\n\r\n self.draw_objects()\r\n self.move_objects()\r\n\r\n self.update_display()", "def run(self):\n\n observer = Observer()\n observer.schedule(self.ehandler, \"./gl\", True)\n observer.start()\n observer.join()", "def run(self):\n self.window.show()\n Gtk.main()", "def run(self):\n if self._main_loop:\n return\n self._main_loop = GObject.MainLoop()\n self._disconnect_all()\n self._register()\n logger.info(\"--- Mainloop started ---\")\n logger.info(\"Hub is ready for onboarding\")\n try:\n self._main_loop.run()\n except KeyboardInterrupt:\n # ignore exception as it is a valid way to exit the program\n # and skip to finally clause\n pass\n except Exception as e:\n logger.error(e)\n finally:\n logger.info(\"--- Mainloop finished ---\")\n self._unregister()\n self._main_loop.quit()\n self._main_loop = None", "def run(self):\n self.window.mainloop()", "def main_loop(self) -> None:\n # Modify signal handlers to make sure Ctrl-C is caught and handled.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n self._impl.main_loop()", "def takeControl(self):\n mainloop()", "def takeControl(self):\n mainloop()", "def main(self,Surf):\n while True:\n if self.state == \"GAME\":\n self.event_loop()\n self.update(Surf)\n elif self.state == \"QUIT\":\n break\n pg.display.update()\n self.Clock.tick(65)", "def game_loop(self):\n self.interface.game_loop(self)" ]
[ "0.760594", "0.7460419", "0.72110164", "0.7099442", "0.7034407", "0.691706", "0.6902692", "0.68630695", "0.6785323", "0.67737657", "0.6773619", "0.6746319", "0.6662695", "0.66581476", "0.66040593", "0.6601622", "0.65761584", "0.6574493", "0.65733767", "0.6544273", "0.65289533", "0.65134174", "0.65104914", "0.6422718", "0.64116424", "0.641118", "0.64009243", "0.64009243", "0.63575095", "0.6356688" ]
0.8040331
0
Initialie dbus system bus acquire adapter/interface for org.bluez.GattManager1 register application for 'org.bluez.GattService1'
def __init__(self): dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) self.bus = dbus.SystemBus() self.adapter = self._find_adapter() if not self.adapter: IFaceNotFoundException('%s interface not found' % GATT_MANAGER_IFACE) self.service_manager = dbus.Interface( self.bus.get_object(BLUEZ_SERVICE_NAME, self.adapter), GATT_MANAGER_IFACE) self.mainloop = GObject.MainLoop() self.ctx = GattContext(self.bus, self.mainloop) self.app = Application(self.ctx) #print('Registering GATT application...') self.service_manager.RegisterApplication(self.app.get_path(), {}, reply_handler=register_app_cb, error_handler=register_app_error_cb)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, alias, adapter=None):\n\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n\n self.bus = dbus.SystemBus()\n\n if not adapter:\n adapter = self._find_adapter()\n if not adapter:\n logger.error(\"Could not find any adapter implementing GattManager1 + LEAdvertisingManager1 interfaces\")\n raise BleNotSupportedException(\n \"No adapter implementing GattManager1 + LEAdvertisingManager1 found\")\n self._adapter_path = '/org/bluez/' + adapter\n self._device_properties_changed_signal = None\n self._adapter_properties_changed_signal = None\n self._main_loop = None\n self.on_remote_disconnected = None\n\n self._adapter_props = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self._adapter_path), DBUS_PROP_IFACE)\n\n self._disable_br_edr()\n\n logger.info(\"Creating BLE Peripheral with alias: %s\" % alias)\n\n self.alias = alias\n self.is_powered = True\n self.discoverable_timeout = 0\n self.is_advertising = False\n\n # Prepare Managers:\n\n self._ad_manager = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self._adapter_path),\n LE_ADVERTISING_MANAGER_IFACE)\n\n self._gatt_manager = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self._adapter_path),\n GATT_MANAGER_IFACE)\n\n # Create Advertisement and GATT Application:\n\n self._advertisement = Advertisement(self.bus, 0, 'peripheral')\n self._app = Application(self.bus)", "def start(self):\n \r\n # Fetch the XBee Manager name from the Settings Manager:\r\n xbee_manager_name = SettingsBase.get_setting(self, \"xbee_device_manager\")\r\n dm = self.__core.get_service(\"device_driver_manager\")\r\n self.__xbee_manager = dm.instance_get(xbee_manager_name)\r\n\r\n # Register ourselves with the XBee Device Manager instance:\r\n self.__xbee_manager.xbee_device_register(self)\r\n\r\n # Get the extended address of the device:\r\n extended_address = SettingsBase.get_setting(self, \"extended_address\")\r\n\r\n # Create a callback specification for our device address, endpoint\r\n # Digi XBee profile and sample cluster id:\r\n xbdm_rx_event_spec = XBeeDeviceManagerRxEventSpec()\r\n xbdm_rx_event_spec.cb_set(self.sample_indication)\r\n xbdm_rx_event_spec.match_spec_set(\r\n (extended_address, 0xe8, 0xc105, 0x92),\r\n (True, True, True, True))\r\n self.__xbee_manager.xbee_device_event_spec_add(self,\r\n xbdm_rx_event_spec)\r\n\r\n # Create a callback specification that calls back this driver when\r\n # our device has left the configuring state and has transitioned\r\n # to the running state:\r\n xbdm_running_event_spec = XBeeDeviceManagerRunningEventSpec()\r\n xbdm_running_event_spec.cb_set(self.running_indication)\r\n self.__xbee_manager.xbee_device_event_spec_add(self,\r\n xbdm_running_event_spec)\r\n\r\n # Create a DDO configuration block for this device:\r\n xbee_ddo_cfg = XBeeConfigBlockDDO(extended_address)\r\n\r\n # Get the gateway's extended address:\r\n gw_xbee_sh, gw_xbee_sl = gw_extended_address_tuple()\r\n\r\n # Set the destination for I/O samples to be the gateway:\r\n xbee_ddo_cfg.add_parameter('DH', gw_xbee_sh)\r\n xbee_ddo_cfg.add_parameter('DL', gw_xbee_sl)\r\n\r\n # TODO: Configure the XBee pins to be Digital/Analog IO\r\n #\r\n # I.E.: Configure pins DI0 .. DI3 for digital input and \r\n # enable line monitoring on pins DIO0 .. DIO3:\r\n #for io_pin in [ 'D0', 'D1', 'D2', 'D3' ]:\r\n # xbee_ddo_cfg.add_parameter(io_pin, 3)\r\n # Enable I/O line monitoring on pins DIO0 .. DIO3:\r\n #xbee_ddo_cfg.add_parameter('IC', 0xf)\r\n #\r\n # I.E.: Configure pins DI1 .. DI3 for analog input:\r\n #for io_pin in [ 'D1', 'D2', 'D3' ]:\r\n # xbee_ddo_cfg.add_parameter(io_pin, 2)\r\n\r\n # Configure node sleep behavior:\r\n sleep_ms = SettingsBase.get_setting(self, \"sleep_ms\")\r\n awake_time_ms = SettingsBase.get_setting(self, \"awake_time_ms\")\r\n xbee_sleep_cfg = XBeeConfigBlockSleep(extended_address)\r\n if sleep_ms > 0:\r\n # Configure node to sleep for the specified interval:\r\n xbee_sleep_cfg.sleep_cycle_set(awake_time_ms, sleep_ms)\r\n else:\r\n # If sleep_ms is 0, disable sleeping on the node altogether:\r\n xbee_sleep_cfg.sleep_mode_set(SM_DISABLED)\r\n\r\n # Register the Sleep configuration block with the XBee Device Manager:\r\n self.__xbee_manager.xbee_device_config_block_add(self, xbee_sleep_cfg)\r\n\r\n # Register the DDO configuration block with the XBee Device Manager:\r\n self.__xbee_manager.xbee_device_config_block_add(self, xbee_ddo_cfg)\r\n\r\n # Indicate that we have no more configuration to add:\r\n self.__xbee_manager.xbee_device_configure(self)\r\n\r\n # Start the thread\r\n threading.Thread.start(self)\r\n\n return True", "def _init_dbus(self):\n self.players = [ 'amarokapp','amarok','rhythmbox','audacious','banshee',\n 'exaile','gmusicbrowser','juk','quodlibet','listen','songbird',\n 'muine','beep-media-play','mpd' ]\n try:\n self.bus=dbus.SessionBus()\n except ImportError:\n self.display_message(\"Some issues python-dbus\")", "def setup():\n global zb\n # Signal handler (Ctrl+C exit)\n signal.signal(signal.SIGINT, signal_handler) \n # DBus\n session_bus = dbus.SessionBus()\n objXBZB = session_bus.get_object(PROTOCOL_BUS_NAME, PROTOCOL_OBJ_PATH + \"/\" + XBEE_ZB + \"/\" + SOCKET0)\n zb = dbus.Interface(objXBZB, dbus_interface=PROTOCOL_BUS_NAME)", "def start(self):\n # Fetch the XBee Manager name from the Settings Manager:\n xbee_manager_name = SettingsBase.get_setting(self, \"xbee_device_manager\")\n dm = self.__core.get_service(\"device_driver_manager\")\n self.__xbee_manager = dm.instance_get(xbee_manager_name)\n\n # Register ourselves with the XBee Device Manager instance:\n self.__xbee_manager.xbee_device_register(self)\n\n # Get the extended address of the device:\n extended_address = SettingsBase.get_setting(self, \"extended_address\")\n\n # Create a callback specification for our device address, endpoint\n # Digi XBee profile and sample cluster id:\n xbdm_rx_event_spec = XBeeDeviceManagerRxEventSpec()\n xbdm_rx_event_spec.cb_set(self._sample_indication)\n xbdm_rx_event_spec.match_spec_set(\n (extended_address, 0xe8, 0xc105, 0x92),\n (True, True, True, True))\n self.__xbee_manager.xbee_device_event_spec_add(self,\n xbdm_rx_event_spec)\n\n # Create a DDO configuration block for this device:\n xbee_ddo_cfg = XBeeConfigBlockDDO(extended_address)\n\n # Get the gateway's extended address:\n gw_xbee_sh, gw_xbee_sl = gw_extended_address_tuple()\n\n # Set the destination for I/O samples to be the gateway:\n xbee_ddo_cfg.add_parameter('DH', gw_xbee_sh)\n xbee_ddo_cfg.add_parameter('DL', gw_xbee_sl)\n \n #\"\"\" IF YOUR XBEE DEVICE DON'N SLEEP AND YOU SEND DATA FROM XBEE DEVICE TO ConnectPort X manually then uncoment the start of that line.\n # Configure the IO Sample Rate:\n # Clip sample_rate_ms to the max value of IR:\n sample_rate_ms = SettingsBase.get_setting(self, \"sample_rate_ms\")\n sample_rate_ms = min(sample_rate_ms, 0xffff)\n xbee_ddo_cfg.add_parameter('IR', sample_rate_ms)\n\n # Register this configuration block with the XBee Device Manager:\n self.__xbee_manager.xbee_device_config_block_add(self, xbee_ddo_cfg)\n\n # Setup the sleep parameters on this device:\n will_sleep = SettingsBase.get_setting(self, \"sleep\")\n sample_predelay = SettingsBase.get_setting(self, \"sample_predelay\")\n awake_time_ms = (SettingsBase.get_setting(self, \"awake_time_ms\") +\n sample_predelay)\n \n if will_sleep:\n # Sample time pre-delay, allow the circuitry to power up and\n # settle before we allow the XBee to send us a sample: \n xbee_ddo_wh_block = XBeeConfigBlockDDO(extended_address)\n xbee_ddo_wh_block.apply_only_to_modules((MOD_XB_ZB, MOD_XB_S2C_ZB,))\n xbee_ddo_wh_block.add_parameter('WH', sample_predelay)\n self.__xbee_manager.xbee_device_config_block_add(self,\n xbee_ddo_wh_block)\n\n # The original sample rate is used as the sleep rate:\n sleep_rate_ms = SettingsBase.get_setting(self, \"sample_rate_ms\")\n xbee_sleep_cfg = XBeeConfigBlockSleep(extended_address)\n if will_sleep:\n xbee_sleep_cfg.sleep_cycle_set(awake_time_ms, sleep_rate_ms)\n else:\n xbee_sleep_cfg.sleep_mode_set(SM_DISABLED)\n self.__xbee_manager.xbee_device_config_block_add(self, xbee_sleep_cfg)\n #\"\"\"\n # Register this configuration block with the XBee Device Manager:\n self.__xbee_manager.xbee_device_config_block_add(self, xbee_ddo_cfg)\n\n # Indicate that we have no more configuration to add:\n self.__xbee_manager.xbee_device_configure(self)\n \n #threading.Thread.start(self)\n \n return True", "def __init__(self, name=None, address=None):\n self.name = name\n self.address = address\n self.dongle = adapter.Adapter(adapter.list_adapters()[0])\n if not self.dongle.powered:\n self.dongle.powered = True\n logger.debug('Adapter powered')\n logger.debug('Start discovery')\n self.dongle.nearby_discovery()\n device_path = None\n if name is not None:\n device_path = tools.get_dbus_path(\n constants.DEVICE_INTERFACE,\n 'Name',\n name)\n elif address is not None:\n device_path = tools.get_dbus_path(\n constants.DEVICE_INTERFACE,\n 'Address',\n address)\n\n self.blnkt = device.Device(device_path[0])\n\n self.blinkt_srv_path = None\n self.blinkt_chrc_path = None", "def __init__(self, dev):\n\n self.dev = dev\n\n # do pygatt communication in the background\n self.gatt = PyGattThread(dev)\n self.gatt.start()", "def connect(self):\n self.blnkt.connect()\n while not self.blnkt.services_resolved:\n sleep(0.5)\n self._get_dbus_paths()", "async def init(self):\n logger.info(\"Init device: %s\", self._serial)\n self._callback(STATUS_INIT)\n\n self._init_binaries()\n self._init_apks()\n await self._init_forwards()\n\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --stop\")\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --nouia -d\")", "def connect_to_dbus(self):\n if not self._connected_to_dbus:\n self._connected_to_dbus = True\n proxy_obj = self._bus.get_object(\"org.wicd.daemon\", \n '/org/wicd/daemon')\n self.proxy_obj = proxy_obj\n daemon = dbus.Interface(proxy_obj, 'org.wicd.daemon')\n interface = dbus.Interface(proxy_obj, 'org.wicd.daemon.interface')\n ui = dbus.Interface(proxy_obj, 'org.wicd.daemon.ui')\n self._dbus_ifaces = {\"daemon\" : daemon,\n \"interface\" : interface, \n \"ui\" : ui}", "def __init__(self, ifname):\n\n self._dbus_loop = gobject.MainLoop()\n self._bus = dbus.SystemBus()\n wait_bus_owner_timeout = 5 # Wait for 5s to have an owner for the bus name we are expecting\n logger.debug('Going to wait for an owner on bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n while not self._bus.name_has_owner(RemoteDhcpClientControl.DBUS_NAME):\n time.sleep(0.2)\n wait_bus_owner_timeout -= 0.2\n if wait_bus_owner_timeout <= 0: # We timeout without having an owner for the expected bus name\n raise Exception('No owner found for bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n \n logger.debug('Got an owner for bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n gobject.threads_init() # Allow the mainloop to run as an independent thread\n dbus.mainloop.glib.threads_init()\n \n dbus_object_name = RemoteDhcpClientControl.DBUS_OBJECT_ROOT + '/' + str(ifname)\n logger.debug('Going to communicate with object ' + dbus_object_name)\n self._dhcp_client_proxy = self._bus.get_object(RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE, dbus_object_name)\n self._dbus_iface = dbus.Interface(self._dhcp_client_proxy, RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE)\n \n logger.debug(\"Connected to D-Bus\")\n self._dhcp_client_proxy.connect_to_signal(\"IpConfigApplied\",\n self._handleIpConfigApplied,\n dbus_interface = RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE,\n message_keyword='dbus_message') # Handle the IpConfigApplied signal\n \n self._dhcp_client_proxy.connect_to_signal(\"LeaseLost\",\n self._handleLeaseLost,\n dbus_interface = RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE,\n message_keyword='dbus_message') # Handle the IpConfigApplied signal\n \n #Lionel: the following line is used for D-Bus debugging only\n #self._bus.add_signal_receiver(catchall_signal_handler, interface_keyword='dbus_interface', member_keyword='member')\n self._dbus_loop_thread = threading.Thread(target = self._loopHandleDbus) # Start handling D-Bus messages in a background thread\n self._dbus_loop_thread.setDaemon(True) # D-Bus loop should be forced to terminate when main program exits\n self._dbus_loop_thread.start()\n \n self._bus.watch_name_owner(RemoteDhcpClientControl.DBUS_NAME, self._handleBusOwnerChanged) # Install a callback to run when the bus owner changes\n \n self._callback_new_lease_mutex = threading.Lock() # This mutex protects writes to the _callback_new_lease attribute\n self._callback_new_lease = None\n \n self._exit_unlock_event = threading.Event() # Create a new threading event that will allow the exit() method to wait for the child to terminate properly\n self._getversion_unlock_event = threading.Event() # Create a new threading event that will allow the GetVersion() D-Bus call below to execute within a timed limit \n\n self.status = DhcpLeaseStatus.DhcpLeaseStatus()\n\n self._getversion_unlock_event.clear()\n self._remote_version = ''\n self._dbus_iface.GetVersion(reply_handler = self._getVersionUnlock, error_handler = self._getVersionError)\n if not self._getversion_unlock_event.wait(10): # We give 10s for slave to answer the GetVersion() request\n logfile = tempfile.NamedTemporaryFile(prefix='TimeoutOnGetVersion-', suffix='.log', delete=False)\n if logfile:\n print('Saving TimeoutOnGetVersion environment dump to file \"' + logfile.name + '\"', file=sys.stderr)\n print('TimeoutOnGetVersion', file=logfile)\n subprocess.call('ps -ef', stdout=logfile, shell=True)\n subprocess.call('perl ./dbus-introspect.pl --system com.legrandelectric.RobotFrameworkIPC.DhcpClientLibrary /com/legrandelectric/RobotFrameworkIPC/DhcpClientLibrary/eth1', stdout=logfile, shell=True)\n subprocess.call('dbus-send --system --type=method_call --print-reply --dest=com.legrandelectric.RobotFrameworkIPC.DhcpClientLibrary /com/legrandelectric/RobotFrameworkIPC/DhcpClientLibrary/eth1 com.legrandelectric.RobotFrameworkIPC.DhcpClientLibrary.GetVersion', stdout=logfile, shell=True)\n logfile.close()\n raise Exception('TimeoutOnGetVersion')\n else:\n logger.debug('Slave version: ' + self._remote_version)", "def connectAdapter(self):\n self.canusb = pycanusb.CanUSB(bitrate='500')\n print('CanUSB: ',self.canusb)\n Msg = Switch_to_Operational_State_Msg()\n QTimer.singleShot(50,lambda msg = Msg : self.initialization(Msg))", "def __init__(self, interface_watcher, conn, dbus_object_path = DBUS_OBJECT_ROOT, **kwargs):\n # Note: **kwargs is here to make this contructor more generic (it will however force args to be named, but this is anyway good practice) and is a step towards efficient mutliple-inheritance with Python new-style-classes\n dbus.service.Object.__init__(self, conn=conn, object_path=dbus_object_path)\n self.interface_watcher = interface_watcher\n interface_watcher.interface_destroy_callback = self.InterfaceRemoved\t# Request interface_watcher object to call InterfaceRemoved (in order to send a D-Bus signal when secondary network interface is going down)\n interface_watcher.interface_add_callback = self.InterfaceAdded\t# Request interface_watcher object to call InterfaceAdded (in order to send a D-Bus signal when secondary network interface is going up)\n logger.debug('Registered binding with D-Bus object PATH: ' + str(dbus_object_path))", "def register_to_core(self):\n self.channel.basic_publish(exchange='', routing_key='peripheral_register', body=json.dumps({self.name: api}))", "def run(self):\n\n # self.peripheral.connect(self.address)\n\n # //-set the delegate to handle notification message process\n # self.peripheral.setDelegate(MyDelegate(self.sinOut))\n if self._type == \"BW\":\n uuid = \"0000fff0-0000-1000-8000-00805f9b34fb\" # the bought module distinguished by the name.\n # BW means the bought module's name \"BW-ECG-01\".\n svc = self.peripheral.getServiceByUUID(uuid)\n\n # //-the characteristic that data can be written to\n chr_of_writable = svc.getCharacteristics()[0]\n # //-the characteristic that receives notification from other peripheral.\n chr_of_notify = svc.getCharacteristics()[1]\n # //-enable the notify\n self.peripheral.writeCharacteristic(chr_of_notify.valHandle + 1, struct.pack('<bb', 0x01, 0x00), True)\n # //-bind user ID to BW-ECG-01, the ID could be a random ID.\n chr_of_writable.write(b'\\xE8\\x41\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00',\n True)\n # //-start the acquiring, a time(Y/M/D/H/H/S/deltaT) should be given. the time could be a random time\n # //-but the delta T should have meaning which is the acquiring time. 0x01 means 1 minutes.\n # //-the delta T could be modified as other number, this could be done by UI.\n # //-if the number could be set by user, that will be perfection.\n chr_of_writable.write(b'\\xE8\\x23\\x15\\x03\\x0b\\x10\\x15\\x00\\x00\\x01', True)\n # //-start continually acquiring\n chr_of_writable.write(b'\\xE8\\20', True)\n\n while self.working:\n if self.peripheral.waitForNotifications(1.0):\n # print(\"notification:\")\n continue\n else:\n uuid = \"f000fff0-0451-4000-b000-000000000000\" # the module made by ourselves\n svc = self.peripheral.getServiceByUUID(uuid)\n ch = svc.getCharacteristics()[0]\n self.peripheral.writeCharacteristic(ch.valHandle + 1, struct.pack('<bb', 0x01, 0x00))\n # print(\"waiting...\")\n # self.sinOut.emit(\"waiting...\")\n\n while self.working:\n if self.peripheral.waitForNotifications(1.0):\n # print(\"notification:\")\n continue", "async def async_setup(self) -> None:\n await self.hass.async_add_executor_job(self._setup)\n\n # set already known devices to away instead of unavailable\n device_registry = dr.async_get(self.hass)\n devices = dr.async_entries_for_config_entry(device_registry, self.entry_id)\n for device_entry in devices:\n if device_entry.via_device_id is None:\n continue # do not add the router itself\n\n device_mac = dict(device_entry.connections).get(dr.CONNECTION_NETWORK_MAC)\n self.devices[device_mac] = {\n \"mac\": device_mac,\n \"name\": device_entry.name,\n \"active\": False,\n \"last_seen\": dt_util.utcnow() - timedelta(days=365),\n \"device_model\": None,\n \"device_type\": None,\n \"type\": None,\n \"link_rate\": None,\n \"signal\": None,\n \"ip\": None,\n }\n\n await self.async_update_device_trackers()\n self.entry.async_on_unload(\n async_track_time_interval(\n self.hass, self.async_update_device_trackers, SCAN_INTERVAL\n )\n )\n\n async_dispatcher_send(self.hass, self.signal_device_new)", "def onConfigureMessage(self, config):\n for adaptor in config[\"adaptors\"]:\n adtID = adaptor[\"id\"]\n if adtID not in self.devices:\n # Because configure may be re-called if devices are added\n name = adaptor[\"name\"]\n friendly_name = adaptor[\"friendly_name\"]\n logging.debug(\"%s Configure app. Adaptor name: %s\", ModuleName, name)\n self.idToName[adtID] = friendly_name.replace(\" \", \"_\")\n self.devices.append(adtID)\n self.dm = DataManager(self.bridge_id)\n self.setState(\"starting\")", "def registerDevice(self):\n\t\tr = req.post(\"http://localhost:9090/devices?id={}&sensors={}_{}&board={}\".format(\n\t\t\tBOARD_ID,\n\t\t\tSENSOR1,\n\t\t\tSENSOR2,\n\t\t\tBOARD\n\t\t))\n\t\tprint (\"[{}] Device Registered on Room Catalog\".format(\n\t\t\tint(time.time()),\n\t\t))", "def do_dbus_register(self, connection, object_path):\n logger.debug('::dbus_register')\n Gio.Application.do_dbus_register(self, connection, object_path)\n failure = False\n try:\n connection.connect('closed', lambda i: self.quit())\n self._dbus_id = connection.register_object(\n object_path,\n DeskChangerDaemonDBusInterface.interfaces[0],\n self._handle_dbus_call,\n self._handle_dbus_get,\n self._handle_dbus_set\n )\n except TypeError:\n # TODO - Handle this failure correctly.\n failure = True\n except GLib.Error as e:\n logger.debug(e.args)\n finally:\n if self._dbus_id is None or self._dbus_id == 0:\n logger.critical('failed to register DBus name %s', object_path)\n if failure:\n logger.error('possibly unsupported version of glib')\n return False\n\n logger.info('successfully registered DBus name %s', object_path)\n return True", "def __init__(self,device=None,port=0):\n self.device= Service.initDevice(device)\n self.adbCmd= r'adb -s %s '%self.device\n self.port = port\n if self.port == 0:\n self.port = utils.free_port()", "def init(self):\n logger.info('systime service init')\n yield self._connect_dbus()", "def device_connect(self):\n pass", "def init():\n\n global registry, fsk_router, ook_router\n\n radio.init()\n OpenThings.init(Devices.CRYPT_PID)\n\n fsk_router = Registry.Router(\"fsk\")\n\n #OOK receive not yet written\n #It will be used to be able to learn codes from Energenie legacy hand remotes\n ##ook_router = Registry.Router(\"ook\")\n\n registry = Registry.DeviceRegistry()\n registry.set_fsk_router(fsk_router)\n ##registry.set_ook_router(ook_router\n\n path = os.path.join(sys.path[0], registry.DEFAULT_FILENAME)\n if os.path.isfile(path):\n registry.load_from(path)\n print(\"loaded registry from file\")\n registry.list()\n fsk_router.list()\n\n # Default discovery mode, unless changed by app\n ##discovery_none()\n ##discovery_auto()\n ##discovery_ask(ask)\n discovery_autojoin()\n ##discovery_askjoin(ask)", "def __action_connect_system_bus_cb(self, action, parameter):\n try:\n if self.system_bus is not None:\n return\n bw = BusWatch(self.data_dir, Gio.BusType.SYSTEM)\n self.system_bus = bw.box_bus\n self.stack.add_titled(self.system_bus, 'System Bus', 'System Bus')\n self.remove_action('connect-system-bus')\n except Exception as e:\n print(e)", "def Init(self, factory_reset=True):\n # Create a new serial device every time since the serial driver\n # on chameleon board is not very stable.\n result = self.CreateSerialDevice()\n\n if factory_reset:\n # Enter command mode to issue commands.\n # This must happen first, so that other commands work\n result = self.EnterCommandMode() and result\n\n # Do a factory reset to make sure it is in a known initial state.\n # Do the factory reset before proceeding to set parameters below.\n result = self.FactoryReset() and result\n\n # Set HID as the service profile.\n result = self.SetServiceProfileHID() and result\n\n # Set the HID device type.\n result = self.SetHIDType(self.device_type) and result\n\n # Set the default class of service.\n result = self.SetDefaultClassOfService() and result\n\n # Set the class of device (CoD) according to the hid device type.\n result = self.SetClassOfDevice(self.device_type) and result\n\n # Set authentication to the specified mode.\n if self.authentication_mode != PeripheralKit.OPEN_MODE:\n result = self.SetAuthenticationMode(self.authentication_mode)\\\n and result\n\n # Set RN-42 to work as a slave.\n result = self.SetSlaveMode() and result\n\n # Set a temporary pin code for testing purpose.\n # Only do this when we want to use a pin code.\n if self.authentication_mode == PeripheralKit.PIN_CODE_MODE:\n result = self.SetPinCode(self.TMP_PIN_CODE) and result\n\n # Enable the connection status message so that we could get the message\n # of connection/disconnection status.\n result = self.EnableConnectionStatusMessage() and result\n\n if not isinstance(self._kit, nRF52):\n # Reboot so that the configurations above take effect.\n result = self.Reboot() and result\n\n # Enter command mode again after reboot.\n result = self.EnterCommandMode() and result\n time.sleep(self.INIT_SLEEP_SECS)\n\n logging.info('A bluetooth HID \"%s\" device is connected.', self.device_type)\n return result", "def init(self):\n try:\n yield self._connect_dbus()\n logger.info(\"Request the GSM resource\")\n yield WaitFSOResource('GSM', time_out=30)\n yield WaitDBus(self.ousage.RequestResource, 'GSM')\n yield self._turn_on()\n logger.info(\"register on the network\")\n register = yield self._register()\n #if register:\n #provider = yield tichy.Wait(self, 'provider-modified')\n \n self._keep_alive().start()\n \n ##network selection end\n \n except Exception, ex:\n logger.error(\"Error : %s\", ex)\n raise\n \n try:\n \n yield tichy.Service.get('ConfigService').wait_initialized()\n self.config_service = tichy.Service.get(\"ConfigService\")\n logger.info(\"got config service\")\n \n except Exception, ex:\n logger.error(\"Error in try retrieving config service : %s\", ex)\n \n try:\n \n ##call forwaring setting start\n self.values = self.config_service.get_items(\"call_forwarding\")\n if self.values != None: self.values = dict(self.values)\n logger.info(\"realized values is none\")\n\n except Exception, ex:\n logger.error(\"Error in try call forwarding setting : %s\", ex)\n \n \n try:\n\n self.SettingReason = tichy.settings.ListSetting('Call Forwarding','Reason',tichy.Text,value='unconditional', setter=self.ForwardingSetReason,options=[\"unconditional\",\"mobile busy\",\"no reply\",\"not reachable\",\"all\",\"allconditional\"],model=tichy.List([ ListSettingObject(\"unconditional\", self.action),ListSettingObject(\"mobile busy\",self.action),ListSettingObject(\"no reply\", self.action),ListSettingObject(\"not reachable\", self.action),ListSettingObject(\"all\", self.action),ListSettingObject(\"all conditional\", self.action)]), ListLabel =[('title','name')])\n \n self.SettingForwarding = tichy.settings.ToggleSetting('Call Forwarding', 'active', tichy.Text, value=self.GetForwardingStatus('unconditional'),setter=self.ToggleForwarding, options=['active','inactive'])\n \n \n except Exception, ex:\n logger.error(\"Error in try call forwarding setting list : %s\", ex)\n \n \n try:\n\n self.SettingChannels = tichy.settings.Setting('Call Forwarding', 'channels', tichy.Text, value=self.ForwardingGet('class'), setter=self.ForwardingSetClass, options=[\"voice\",\"data\",\"voice+data\",\"fax\",\"voice+data+fax\"])\n \n self.SettingTargetNumber = tichy.settings.NumberSetting('Call Forwarding', 'Target Number', tichy.Text, value=self.ForwardingGet('number'), setter=self.ForwardingSetNumber)\n \n self.SettingTargetNumber = tichy.settings.NumberSetting('Call Forwarding', 'Timeout', tichy.Text, value=self.ForwardingGet('timeout'), setter=self.ForwardingSetTimeout)\n \n ##call forwaring setting stop\n \n \n except Exception, ex:\n logger.error(\"Error in try Error in try call forwarding setting : %s\", ex)\n \n try:\n\n ##call identifaction setting start\n self.CallIdentification = tichy.settings.Setting('Network', 'Call Identification', tichy.Text, value=self.GetCallIdentification(), setter=self.SetCallIdentifaction, options=[\"on\",\"off\",\"network\"])\n ##call identifaction setting stop\n \n except Exception, ex:\n logger.error(\"Error in network identification setting: %s\", ex)\n \n try: \n ##network selection etc begin\n self.NetworkRegistration = tichy.settings.Setting('Network', 'Registration', tichy.Text, value=self.GetRegStatus(), setter=self.SetRegStatus, options=[\"registered\",\"not registered\"])\n \n \n except Exception, ex:\n logger.error(\"Error in network registration setting : %s\", ex)\n \n \n try:\n \n self.scanning = False\n self.NetworkList = tichy.List()\n self.ListLabel = [('title','name'),('subtitle','status')]\n \n self.scan_setting = tichy.settings.ListSetting('Network', 'List', tichy.Text, value=\"scan\", setter=self.run_scan, options=['scan'], model=self.NetworkList, ListLabel=self.ListLabel)\n \n except Exception, ex:\n logger.error(\"Error in network list setting : %s\", ex)\n #raise", "def create_bond(device_address=None, adapter_address=None):\n con = pexpect.spawn('sudo bluetoothctl')\n con.expect(\"bluetooth\", timeout=1)\n \n print(\"selecting adapter ...\")\n con.sendline(\"select \" + adapter_address.upper())\n\n #check to see if already paired\n print(\"checking if bond exists already ...\")\n no_bond=False\n try:\n con.sendline(\"paired-devices\")\n con.expect(device_address.upper(), timeout=1)\n except(pexpect.TIMEOUT):\n no_bond = True\n else:\n print(\"bond already exists for %s\" % (device_address.upper()))\n print(\"successfully quiting bluetoothctl since bond is already formed\")\n con.sendline(\"quit\") \n return(0) \n \n con.sendline(\"select \" + adapter_address.upper())\n \n print(\"registering agent ...\")\n try:\n con.sendline(\"agent NoInputNoOutput\")\n con.expect(['Agent registered', 'Agent is already registered'], timeout=1)\n con.sendline(\"default-agent\")\n con.expect(\"Default agent request successful\", timeout=1)\n except(pexpect.TIMEOUT):\n print(\"unable to register agent\")\n return(1)\n\n print(\"enabling pairing ...\")\n try:\n con.sendline(\"pairable on\")\n con.expect(\"Changing pairable on succeeded\", timeout=1)\n except(pexpect.TIMEOUT):\n print(\"unable to turn pairing on\")\n return(1)\n\n print(\"starting scan ...\")\n try:\n con.sendline(\"scan on\")\n devfound = con.expect(device_address.upper(), timeout=5)\n if devfound == 0:\n try:\n con.sendline(\"scan off\")\n print (\"Found device. connecting to %s\" % (device_address.upper()))\n con.sendline(\"connect \" + device_address.upper())\n con.expect(\"Connection successful\", timeout=10)\n #sleep(10) #need extra time here to finish pairing\n except(pexpect.TIMEOUT):\n print(\"could not connect to %s\" % (device_address.upper()))\n return(1)\n try:\n #explicitly pair with the device\n con.sendline(\"pair \" + device_address.upper())\n con.expect(\"Pairing successful\", timeout=5)\n except(pexpect.TIMEOUT):\n print(\"pairing not successful\")\n try:\n con.sendline(\"info \" + device_address.upper()) \n con.expect(\"Paired: yes\", timeout=1)\n except(pexpect.TIMEOUT):\n print(\"could not pair with %s\" % (device_address.upper()))\n return(1)\n else:\n con.sendline(\"trust \" + device_address.upper())\n print(\"Connection and pairing successful!\")\n #try:\n #con.sendline(\"list-attributes\")\n #con.expect(\"6e400003-b5a3-f393-e0a9-e50e24dcca9e\", timeout=2)\n #print(con.before)\n #for line in con.before:\n # read_characteristics = line\n #print(read_characteristics)\n #except(pexpect.TIMEOUT):\n #print(\"could not list the attributes\")\n #return(1)\n try:\n print(\"disconnecting temporarily ...\")\n con.sendline(\"disconnect \" + device_address.upper())\n con.expect(\"Connected: no\", timeout=5)\n except(pexpect.TIMEOUT):\n print(\"could not disconnect.. \")\n con.sendline(\"quit\")\n return(1)\n else:\n print(\"successfully quiting bluetoothctl after forming bond\")\n con.sendline(\"quit\")\n return(0)\n except(pexpect.TIMEOUT):\n con.sendline(\"scan off\")\n print(\"unable to find device %s\" % (device_address))\n return(1)", "def init_bluetooth(self, btport = 4):\n self.server_sock = BluetoothSocket( RFCOMM )\n self.server_sock.bind((\"\", btport))\n self.server_sock.listen(1)\n port = self.server_sock.getsockname()[1]\n uuid = \"94f39d29-7d6d-437d-973b-fba39e49d4ee\"\n advertise_service(self.server_sock, \"SampleServer\",\n service_id=uuid,\n service_classes=[uuid, SERIAL_PORT_CLASS],\n profiles=[SERIAL_PORT_PROFILE],\n )\n import atexit\n atexit.register(goodbye, None, self.server_sock)\n #print \"atexit registered 1\"\n print_msg(self.name, \"waiting for connection on RFCOMM channel %d\" % port)\n self.client_sock, client_info = self.server_sock.accept() # blocking\n atexit.register(goodbye, self.client_sock, self.server_sock)\n #print \"atexit registered 2\"\n print_msg(self.name, \"Accepted connection from \"+str(client_info))\n self.is_connected = True", "def broadcast(loopstate):\n cmdstring = 'sudo hcitool -i hci0 cmd ' # Send cmd to hci0\n cmdstring += '0x08 ' # Set group to BLE\n cmdstring += '0x0008 ' # Set command to HCI_LE_Set_Advertising_Data\n cmdstring += '0D ' # Length of entire following data, in bytes\n cmdstring += '02 ' # Length of flag info\n cmdstring += '01 ' # Use AD flags\n cmdstring += '02 ' # Flag value:\n # bit 0 (OFF) LE Limited Discoverable Mode\n # bit 1 (ON) LE General Discoverable Mode\n # bit 2 (OFF) BR/EDR Not Supported\n # bit 3 (ON) Simultaneous LE and BR/EDR to Same Device Capable (controller)\n # bit 4 (ON) Simultaneous LE and BR/EDR to Same Device Capable (Host)\n cmdstring += '09 ' # Length of following message, in bytes\n cmdstring += '07 ' # GAP value (07 = 128 Bit Complete Service UUID List)\n cmdstring += '42 69 63 79 63 6c 65 ' # Header to identify beacon message-\n # - and it's also is Bicycle in ASCII!\n if loopstate:\n cmdstring = cmdstring + LOOP_ON\n else:\n cmdstring = cmdstring + LOOP_OFF + ' >/dev/null 2>&1'\n subprocess.call(cmdstring, shell=True)\n subprocess.call('sudo hciconfig hci0 leadv 3 >/dev/null 2>&1', shell=True)", "def test_gwservice_createdevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n print(json.dumps(payload))\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device verify\", body=body)\n if resp.status_code != 200:\n assert False\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device delete\", body=body)\n if resp.status_code != 200:\n assert False" ]
[ "0.7221227", "0.61894155", "0.6014742", "0.59280574", "0.5923623", "0.57182866", "0.57111996", "0.56961524", "0.56900334", "0.56577533", "0.5630222", "0.5583196", "0.5564716", "0.5501188", "0.54448515", "0.54448307", "0.5401305", "0.5356949", "0.5353707", "0.5304931", "0.5298031", "0.5284333", "0.5272044", "0.526911", "0.52525955", "0.525026", "0.52470005", "0.52267975", "0.5224401", "0.5193984" ]
0.77838093
0
Adds service to previously initialize app.
def add_service(self, service): self.app.add_service(service)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addService(self, service):\n\t\tself.services.append(service)\n\t\treturn self", "def add(self, service: AbstractService):\n self.services.append(service)", "def initService(self):", "def add_app(self):\n \n pass", "def add_service(self, zeroconf, service_type, name):\n self.pending.add(\n asyncio.ensure_future(self._internal_add(zeroconf, service_type, name))\n )", "def add_service(torconfig, service, port=None):\n # picks a random port until it finds one avaible.\n while not service.tcp:\n port = port or new_port()\n try:\n service.tcp = reactor.listenTCP(port, service.factory)\n except error.CannotListenError:\n pass\n\n service.hs = txtorcon.HiddenService(\n torconfig, os.path.join(config.tor_data, service.name),\n ['%d 127.0.0.1:%d' % (service.port, port)])\n apaf.hiddenservices.append(service)", "def set_service(self):\n\n if self.service:\n self.service = self.service(\n json=self.json,\n google_user=self.google_user,\n endpoint=self\n )", "def _installed_apps_add(self):\n config.add_plugin(self.module_path)", "def service(self, service):\n \n self._service = service", "def _add_services(self):\n this_service = {'name': 'swift-proxy'}\n other_services = [\n {'name': 'percona-cluster'},\n {'name': 'keystone'},\n {'name': 'glance'},\n {'name': 'swift-storage'}\n ]\n super(SwiftProxyBasicDeployment, self)._add_services(this_service,\n other_services)", "def start_services(self, app_to_start):\n\n self.service_collection = service.IServiceCollection(app_to_start)\n\n amp_factory = AmpServerFactory(self)\n\n port = settings.SERVER_AMP_PORT\n amp_server = internet.TCPServer(port, amp_factory)\n amp_server.setName('dott%s' % port)\n amp_server.setServiceParent(self.service_collection)", "def register_service(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_register_service_description()\n self._register(description, \"registering agent's service on the SOEF.\")", "def addServices(self):\r\n self.addHendrix()\r\n\r\n if not self.options.get('global_cache') and not self.options.get('nocache'):\r\n self.addLocalCacheService()\r\n\r\n if self.is_secure:\r\n self.addSSLService()\r\n\r\n self.catalogServers(self.hendrix)", "def _add_services(self):\n this_service = {'name': 'keystone'}\n other_services = [\n {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'}, # satisfy wrkload stat\n {'name': 'cinder'},\n ]\n super(KeystoneBasicDeployment, self)._add_services(this_service,\n other_services)", "def addServiceListener(self, listener: ghidra.framework.plugintool.util.ServiceListener) -> None:\n ...", "def _registerService(self, callerId, service, serviceApi, callerApi):\n if service not in self.FilterServices:\n # The type of the service is not included in the XMLRPC call\n self.__docWriter.addService(callerId, service, \"TODO: type\")", "def add_preload_service(acc, service, chars=None, opt_chars=None):\n from pyhap.loader import get_serv_loader, get_char_loader\n service = get_serv_loader().get(service)\n if chars:\n chars = chars if isinstance(chars, list) else [chars]\n for char_name in chars:\n char = get_char_loader().get(char_name)\n service.add_characteristic(char)\n if opt_chars:\n opt_chars = opt_chars if isinstance(opt_chars, list) else [opt_chars]\n for opt_char_name in opt_chars:\n opt_char = get_char_loader().get(opt_char_name)\n service.add_opt_characteristic(opt_char)\n acc.add_service(service)\n return service", "def _add_services(self):\n # Services and relations which are present merely to satisfy\n # required_interfaces and workload status are not inspected.\n # Fix me. Inspect those too.\n this_service = {'name': 'neutron-openvswitch'}\n other_services = [\n {'name': 'nova-compute'},\n {'name': 'nova-cloud-controller'},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'glance'},\n {'name': 'neutron-api'},\n self.get_percona_service_entry(),\n ]\n if self._get_openstack_release() >= self.bionic_train:\n other_services.append({'name': 'placement'})\n super(NeutronOVSBasicDeployment, self)._add_services(this_service,\n other_services)", "def RegisterService():\n hooks.RegisterHook(SERVICE_NAME, 'file-exists', hook_class=HookForExists)\n hooks.RegisterHook(SERVICE_NAME, 'file-write',\n hook_class=HookForWriteAndTouch)\n hooks.RegisterHook(SERVICE_NAME, 'file-touch',\n hook_class=HookForWriteAndTouch)\n hooks.RegisterHook(SERVICE_NAME, 'file-get', hook_class=HookForGet)\n hooks.RegisterHook(SERVICE_NAME, 'list-files', hook_class=HookForListFiles)\n hooks.RegisterHook(SERVICE_NAME, 'list-dir', hook_class=HookForListDir)", "def _add_services(self):\n this_service = {'name': '{{ metadata.package }}'}\n other_services = [\n {'name': 'mysql',\n 'location': 'cs:percona-cluster',\n 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'manila'}\n ]\n super(ManilaPluginCharmDeployment, self)._add_services(\n this_service, other_services)", "def addService(self, interfaceClass: java.lang.Class, service: object) -> None:\n ...", "def register_service(service, iface, name):", "def _register_services(self, pipeline):\n\n pipeline.register_service(self._aprs_service)", "async def on_terncy_svc_add(event):\n dev_id = event.data[\"dev_id\"]\n _LOGGER.info(\"found terncy service: %s %s\", dev_id, event.data)\n host = event.data[\"ip\"]\n if dev_id == tern.dev_id and not tern.is_connected():\n tern.host = host\n _LOGGER.info(\"start connection to %s %s\", dev_id, tern.host)\n\n hass.async_create_task(setup_terncy_loop())", "def service():\n conf = template('remote/addok.service', **config)\n put(conf, '/etc/systemd/system/addok.service')\n systemctl('enable addok.service')", "def register_service(self, name, command):\n service_name = command['service_name']\n try:\n service_type = self.get_interface_type(command['interface_type'], '.srv')\n self.srv_clients[service_name] = self.AsyncServiceProxy(\n self,\n service_name,\n service_type)\n\n if service_name in self.offline_services:\n self.offline_services.remove(service_name)\n except JoyTeleopException:\n if service_name not in self.offline_services:\n self.offline_services.append(service_name)", "def _init_service(self):\n self.robot_variables.check_variables()\n # setting launch id for report portal service\n self.robot_service.init_service(endpoint=self.robot_variables.endpoint,\n project=self.robot_variables.project,\n uuid=self.robot_variables.uuid)", "def add_service(self, service):\n # type: (LoadBalancerService) -> List[BoundAction]\n return self._client.add_service(self, service=service)", "def _register_service(self, extkey, extcls):\n if extkey not in self._service_registry:\n self._service_registry[extkey] = extcls\n else:\n self._service_registry[extkey] = extcls\n return", "def appMgr( *varg , **kwarg ) :\n import GaudiPython.Bindings\n _g = GaudiPython.Bindings.AppMgr()\n if not 'LoKiSvc' in _g.ExtSvc :\n logger.debug ('appMgr: add LoKiSvc into the list of services')\n _g.ExtSvc += [ 'LoKiSvc']\n return _g" ]
[ "0.70407766", "0.6967143", "0.6634693", "0.6463807", "0.6374438", "0.628673", "0.62649804", "0.6252654", "0.62386537", "0.62090975", "0.61053437", "0.6091435", "0.605257", "0.6042793", "0.60172504", "0.599833", "0.599623", "0.59924555", "0.5940163", "0.58883333", "0.5873045", "0.5859798", "0.5848243", "0.5843242", "0.5801694", "0.5795095", "0.5783658", "0.57812464", "0.57582015", "0.57482976" ]
0.8112667
0
Return the date (UTC) from 10 days ago formatted as YYYYMMDD.
def _ten_days_ago() -> str: ten_days_ago = gmtime(mktime(gmtime()) - TEN_DAYS_SECONDS) return strftime(DATE_FORMAT, ten_days_ago)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ago(self):\n return human(self.timestamp/1000.0, precision=1, abbreviate=True)", "def relativeTime(date):\n diff = datetime.utcnow() - date\n\n if diff.days > 7 or diff.days < 0:\n return date.ctime()\n elif diff.days == 1:\n return '1 day ago'\n elif diff.days > 1:\n return '%d days ago' % diff.days\n elif diff.seconds <= 1:\n return 'just now'\n elif diff.seconds < 60:\n return '%d seconds ago' % diff.seconds\n elif diff.seconds < (60 * 2):\n return '1 minute ago'\n elif diff.seconds < (60 * 60):\n return '%d minutes ago' % (diff.seconds / 60)\n elif diff.seconds < (60 * 60 * 2):\n return '1 hour ago'\n else:\n return '%d hours ago' % (diff.seconds / (60 * 60))", "def pretty_date(date: datetime):\n if not isinstance(date, datetime) or date > NOW:\n raise ValueError('pretty_date() only accepts datetime objects in the past')\n diff = NOW - date\n seconds = int(diff.total_seconds())\n minutes = seconds // 60\n hours = minutes // 60\n # This doesn't _feel_ very pythonic…\n if seconds < 10:\n return 'just now'\n if seconds < 60:\n return f'{seconds} seconds ago'\n if minutes < 2:\n return 'a minute ago'\n if minutes < 60:\n return f'{minutes} minutes ago'\n if hours < 2:\n return 'an hour ago'\n if hours < 24:\n return f'{hours} hours ago'\n if hours < 48:\n return 'yesterday'\n return date.strftime('%m/%d/%y')", "def human_date(self, date):\n return timeago.format(date)", "def render_delta_from_now(date):\n return render_delta(__timedelta_millis(date - utc()))", "def future_time():\n ten_days_after = datetime.now() + timedelta(days=10)\n time_format = '%Y-%m-%d %H:%M:%S'\n return ten_days_after.strftime(time_format)", "def create_past_date(self, days):\n past_date = datetime.now() - timedelta(days=days)\n return past_date.isoformat()", "def SAgeDdt(ddt):\n if ddt.days < 0:\n return \"in the future?\"\n months = int(ddt.days*12/365)\n years = int(ddt.days/365)\n if years >= 1:\n return \"%d year%s ago\" % (years, SPlural(years))\n if months >= 3:\n return \"%d months ago\" % months \n if ddt.days == 1:\n return \"yesterday\"\n if ddt.days > 1:\n return \"%d days ago\" % ddt.days\n hrs = int(ddt.seconds/60/60)\n if hrs >= 1:\n return \"%d hour%s ago\" % (hrs, SPlural(hrs))\n minutes = round(ddt.seconds/60)\n if minutes < 1:\n return \"seconds ago\"\n return \"%d minute%s ago\" % (minutes, SPlural(minutes))", "def calculate_date(x, now):\n\t#now = datetime.datetime.now()\n\tn = int(extract_only_number(x))\n\tif n > 0:\n\t\treturn (now - datetime.timedelta(n)).strftime(\"%d-%m-%Y\")\n\treturn now.strftime(\"%d-%m-%Y\")", "def relative_datetime(self):\n now = datetime.now(timezone.utc)\n created_at = self.created_at.astimezone(timezone.utc)\n\n delta = humanize.naturaldelta(abs(created_at - now))\n tense = \"from now\" if now < created_at else \"ago\"\n\n return f\"{delta} {tense}\"", "def get_n_days_ago(self, startdate, n):\n return startdate - datetime.timedelta(days=n)", "def prevDate(y, m, d):\n dateTuple = (y, m, d, 0, 0, 0, 0, 0, 0)\n epochSecs = mktime(dateTuple)\n prevDateTuple = localtime(epochSecs-24*60*60)\n return prevDateTuple[:3]", "def pretty_date(time=False):\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time, datetime):\n diff = now - time\n elif not time:\n diff = now - now\n else:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(int(round(second_diff, 0))) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(int(round(second_diff / 60, 0))) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(int(round(second_diff / 3600, 0))) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(int(round(day_diff, 0))) + \" days ago\"\n if day_diff < 31:\n return str(int(round(day_diff / 7, 0))) + \" weeks ago\"\n if day_diff < 365:\n return str(int(round(day_diff / 30, 0))) + \" months ago\"\n return str(int(round(day_diff / 365, 0))) + \" years ago\"", "def thirty_days_ago():\n return date.today() - timedelta(days=30)", "def pretty_date(time=False):\r\n from datetime import datetime\r\n import dateutil.parser\r\n now = datetime.now()\r\n if type(time) is str or type(time) is unicode:\r\n time = dateutil.parser.parse(time)\r\n if type(time) is int:\r\n diff = now - datetime.fromtimestamp(time)\r\n elif isinstance(time, datetime):\r\n diff = now - time\r\n elif not time:\r\n diff = now - now\r\n second_diff = diff.seconds\r\n day_diff = diff.days\r\n\r\n if day_diff < 0:\r\n return ''\r\n\r\n if day_diff == 0:\r\n if second_diff < 10:\r\n return \"just now\"\r\n if second_diff < 60:\r\n return str(second_diff) + \" seconds ago\"\r\n if second_diff < 120:\r\n return \"a minute ago\"\r\n if second_diff < 3600:\r\n return ' '.join([str(second_diff / 60), \"minutes ago\"])\r\n if second_diff < 7200:\r\n return \"an hour ago\"\r\n if second_diff < 86400:\r\n return ' '.join([str(second_diff / 3600), \"hours ago\"])\r\n if day_diff == 1:\r\n return \"Yesterday\"\r\n if day_diff < 7:\r\n return ' '.join([str(day_diff), \"days ago\"])\r\n if day_diff < 31:\r\n return ' '.join([str(day_diff / 7), \"weeks ago\"])\r\n if day_diff < 60:\r\n return ' '.join([str(day_diff / 30), \"month ago\"])\r\n if day_diff < 365:\r\n return ' '.join([str(day_diff / 30), \"months ago\"])\r\n if day_diff < (365 * 2):\r\n return ' '.join([str(day_diff / 365), \"year ago\"])\r\n return ' '.join([str(day_diff / 365), \"years ago\"])", "def day_relative_to_absolute(relative):\n today = datetime.datetime.today()\n delta = datetime.timedelta(days=relative)\n return (today - delta).strftime(\"%Y-%m-%d\")", "def render_date_time_with_relative_into(into, date_time, add_ago):\n into.append(format(date_time, DATETIME_FORMAT_CODE))\n \n into.append(' [*')\n into.append(elapsed_time(date_time))\n if add_ago:\n into.append(' ago')\n into.append('*]')\n \n return into", "def get_preceeding_dekad(c):\n if c.day < 10:\n prec_dekad = dt.date(c.year, c.month, 1) - dt.timedelta(days=1)\n elif c.day < 20:\n prec_dekad = dt.date(c.year, c.month, 10)\n else:\n prec_dekad = dt.date(c.year, c.month, 20)\n\n return prec_dekad", "def yesterday_string(fmt='%Y-%m-%d'):\n return (brasilia_time() - pd.Timedelta(days=1)).strftime(fmt)", "def pretty_date(time=False):\n from datetime import datetime\n\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time, datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return \"\"\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(second_diff / 60) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(second_diff / 3600) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff / 7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff / 30) + \" months ago\"\n return str(day_diff / 365) + \" years ago\"", "def pretty_date(time=False):\n from datetime import datetime\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time \n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str( second_diff / 60 ) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str( second_diff / 3600 ) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff/7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff/30) + \" months ago\"\n return str(day_diff/365) + \" years ago\"", "def timesince_limited(d):\n today = datetime.datetime.now()\n delta = datetime.timedelta\n interval = today - d\n if today.strftime('%Y-%m-%d') == d.strftime('%Y-%m-%d'):\n if interval < delta(days=0, hours=1):\n return timesince(d) + ' ago '\n else:\n return d.strftime('%H:%M')\n else:\n return d", "def get_date():\n return (datetime.now() - TIMEDELTA).isoformat()", "def getfuturedate(runningdate, futuredays):\n d = (runningdate + datetime.timedelta(days=(futuredays-1))).strftime('%d-%m')\n return str(d)", "def timesince(date):\n format = '%b %d, %Y'\n return date.strftime(format)", "def format_date(value: int) -> str:\n\n return (datetime(1970, 1, 1) + timedelta(milliseconds=value)).strftime('%Y%m%d')", "def timesince(dt, default=\"just now\"):\n\n now = datetime.datetime.now()\n diff = now - dt\n \n periods = (\n (diff.days / 365, \"year\", \"years\"),\n (diff.days / 30, \"month\", \"months\"),\n (diff.days / 7, \"week\", \"weeks\"),\n (diff.days, \"day\", \"days\"),\n (diff.seconds / 3600, \"hour\", \"hours\"),\n (diff.seconds / 60, \"minute\", \"minutes\"),\n (diff.seconds, \"second\", \"seconds\"),\n )\n\n for period, singular, plural in periods:\n \n if period:\n return \"%d %s ago\" % (period, singular if period == 1 else plural)\n\n return default", "def time_since(dt, default=\"just now\"):\n\t\n\tnow = datetime.utcnow()\n\tdiff = now - dt\n\t\n\tperiods = (\n\t\t(diff.days / 365, \"year\", \"years\"),\n\t\t(diff.days / 30, \"month\", \"months\"),\n\t\t(diff.days / 7, \"week\", \"weeks\"),\n\t\t(diff.days, \"day\", \"days\"),\n\t\t(diff.seconds / 3600, \"hour\", \"hours\"),\n\t\t(diff.seconds / 60, \"minute\", \"minutes\"),\n\t\t(diff.seconds, \"second\", \"seconds\"),\n\t)\n\n\tfor period, singular, plural in periods:\n\t\tif period:\n\t\t\treturn \"%d %s ago\" % (period, singular if period == 1 else plural)\n\n\treturn default", "def yesterdayDate(self):\n yesterday = time.time() - 24*3600\n return time.strftime(\"%m/%d/%Y\", time.localtime(yesterday))", "def get_days_old(days):\n days = int(days)\n current_time = datetime.datetime.today()\n days_after = datetime.timedelta(days)\n new_date = current_time - days_after\n new_date = new_date.strftime(\"%d-%b-%Y\")\n return new_date" ]
[ "0.63436246", "0.6039876", "0.5983086", "0.5962784", "0.58728963", "0.581312", "0.58069855", "0.5791033", "0.5787348", "0.5763219", "0.56588507", "0.5579537", "0.5577654", "0.55374175", "0.5471164", "0.539841", "0.53682923", "0.5352607", "0.53389454", "0.53330094", "0.5331382", "0.5319435", "0.52447456", "0.5240271", "0.5236799", "0.5219827", "0.5197637", "0.5193551", "0.5193048", "0.5184128" ]
0.7581562
0
Return the last month (UTC) formatted as YYYYMM.
def _last_month() -> str: time_now = gmtime() return ( f"{time_now.tm_year}-{time_now.tm_mon - 1:02d}" if time_now.tm_mon > 1 else f"{time_now.tm_year - 1}-12" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last_month_day():\r\n return (datetime.now().replace(day=1) + relativedelta(months=1) + timedelta(days=-1)).strftime(\r\n '%d-%m-%Y')", "def make_last_month_period(dt=None):\n if not dt:\n dt = datetime.utcnow()\n dt = dt.replace(day=1) - timedelta(days=1)\n return dt.strftime('%Y%m')", "def last_month():\n return datetime.now() + relativedelta(months=-1)", "def make_last_year_month_period(dt=None):\n if not dt:\n dt = datetime.utcnow()\n dt = dt.replace(year=dt.year - 1, month=dt.month, day=1)\n return int(dt.strftime('%Y%m'))", "def last_month_first_day():\r\n return (datetime.now().replace(day=1) + relativedelta(months=-1) + timedelta(days=-1)).strftime(\r\n '%d-%m-%Y')", "def floor_end_month(date):\n return datetime(date.year, date.month, 1) + timedelta(days=-1)", "def last_month(self):\r\n return RecordsLastMonth(self)", "def end_month(d):\n return date(d.year, d.month, monthrange(d.year, d.month)[1])", "def get_end_month(month):\n return datetime(2020, month, 28)", "def get_month_end(x: Optional[Date] = None) -> Date:\n return (x or get_today()).replace(day=1) + relativedelta(months=+1, days=-1)", "def _get_last_date_month(self, date_find):\n day = datetime.strptime(date_find, settings.TIME_FORMAT)\n last_day_of_month = calendar.monthrange(day.year, day.month)[1]\n date_day_of_month = '{}-{}-{} 00:00:00'.format(day.year, day.month, last_day_of_month)\n return date_day_of_month", "def last_day_of_month(date):\n last_day = calendar.monthrange(date.year, date.month)[1]\n return datetime.date(date.year, date.month, last_day)", "def last_day_of_month(date):\n last_day = calendar.monthrange(date.year, date.month)[1]\n return datetime.date(date.year, date.month, last_day)", "def to_end_of_month(self):\n days = _num_days_in_month(self._months, self._years)\n return from_year_month_day(self._years, self._months, days, validate=False)", "def get_months_to_date():\n month_sequence = [5, 4, 3, 2, 1, 12, 11, 10, 9, 8] # season is August to May\n try:\n current_month_index = month_sequence.index(dt.now().month)\n except ValueError:\n current_month_index = 0\n\n return month_sequence[current_month_index:]", "def get_current_month() -> int:\n return datetime.now().month", "def get_month(self, indate):\n return indate.strftime(\"%B\") + \"-\" + indate.strftime(\"%Y\")", "def get_default():\n today = datetime.date.today()\n if today.month == 1:\n return YearMonth(today.year - 1, 12)\n return YearMonth(today.year, today.month - 1)", "def get_last_day_of_month(today: Optional[datetime] = None) -> int:\n if today is None:\n today = datetime.utcnow()\n return monthrange(today.year, today.month)[1]", "def MONTH(date):\n return _make_datetime(date).month", "def getCurrentMonth(self):\n return math.ceil((self.wcount % 48) / 4)", "def decrement_month(self):\n month: int = int(self.month)\n month -= 1\n if month == 0:\n month == 12\n year: int = int(self.year)\n year -= 1\n self.year = str(year)\n self.month = str(month)\n if len(self.month) == 1:\n self.month = \"0\" + self.month", "def resolve_month(ym):\n if isinstance(ym, (tuple, list)):\n y, m = ym\n elif isinstance(ym, (datetime.datetime, datetime.date)):\n y, m = ym.year, ym.month-1\n elif isinstance(ym, int) or ym is None:\n today = timezone.now()\n y, m = today.year, today.month + (ym or 0)\n else:\n raise RuntimeError(\"Unsupported argument %r\" % ym)\n\n return y*12 + m", "def getMonth(self):\n return _libsbml.Date_getMonth(self)", "def mm(self):\n return '%02d' % self._month", "def month(self) -> int:\n if self.is_old_style:\n return int(self.split('/', 1)[1][2:4])\n return int(self[2:4])", "def pick_month():\n today = date.today()\n month = date(today.year, today.month, 1)\n if today.day < 14:\n # Use last month\n month -= timedelta(days=27)\n while month.day != 1:\n month -= timedelta(days=1)\n return month", "def month(self):\n return 0", "def month(self):\n return 0", "def last_month(today: Optional[datetime] = None, tz: Any = None) -> Tuple[datetime, datetime]:\n if today is None:\n today = datetime.utcnow()\n end = datetime(day=1, month=today.month, year=today.year)\n end_incl = end - timedelta(seconds=1)\n begin = datetime(day=1, month=end_incl.month, year=end_incl.year)\n return localize_time_range(begin, end, tz)" ]
[ "0.7890911", "0.77757496", "0.7772196", "0.74962604", "0.7192194", "0.6839517", "0.6807995", "0.6761683", "0.6746424", "0.6616606", "0.66151744", "0.6588002", "0.6588002", "0.6579385", "0.6565151", "0.65579987", "0.65383244", "0.64961576", "0.6473222", "0.64433897", "0.6393315", "0.6372145", "0.6341654", "0.63224846", "0.63065106", "0.6232501", "0.62307566", "0.61551315", "0.61551315", "0.6147469" ]
0.8435946
0
Retrieve the latest exchange rate from the given ECB data.
def _get_latest_ecb_rate(data: bytes) -> float: root = etree.fromstring(data) values = root.xpath('.//generic:ObsValue/@value', namespaces=root.nsmap) last_value = len(values) - 1 return float(values[last_value])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_and_store_latest_ecb_exrates():\n response = requests.get(DAILY_ECB_URL)\n # Raise exception if status_code != 200 or ConnectionError\n response.raise_for_status()\n info = ET.fromstring(response.content)[2][0]\n datestamp = datetime.strptime(info.attrib['time'], \"%Y-%m-%d\").date()\n rates = [x.attrib for x in info]\n\n exrates = []\n for item in rates:\n if item['currency'] in SUPPORTED_CURRENCIES:\n exrate, created = ExchangeRate.objects.update_or_create(\n datestamp=datestamp,\n currency=item['currency'],\n defaults={'rate': Decimal(item['rate'])}\n )\n exrates.append(exrate)\n print(exrate, \"NEW EXRATE!\" if created else \"<noupdate>\")\n\n return exrates", "def exchange_rate(self):\n res = r.get(self.url + self.current_rate)\n return self.execute(res)", "def get_realtime_exchange_rate(from_currency, to_currency) :\n\turl = f\"{BASE_URL}function={settings.CURRENCY_EXCHANGE_RATE}&from_currency={from_currency}&to_currency={to_currency}&apikey={API_KEY}\" \n\trequest = requests.get(url)\n\tresult = request.json()\n\treturn result[PREFIX][EXCHANGE_RATE], result[PREFIX][DATE]", "def getData(self):\n\n url = 'https://www.ecb.europa.eu/stats/eurofxref/eurofxref-hist.zip'\n try:\n file, _ = urlretrieve(url)\n zip_file_object = zipfile.ZipFile(file, 'r')\n first_file = zip_file_object.namelist()[0]\n file = zip_file_object.open(first_file)\n\n file_handler = []\n for row in file:\n file_handler.append(row.decode())\n\n # getting the currency headers into header_list\n header_list = []\n notFound = True\n x = 0\n while notFound:\n if file_handler[x].startswith('Date'):\n header = file_handler[x].split(',')\n for col in header:\n header_list.append(col.strip())\n notFound = False\n x += 1\n self.currencies = list(filter(None, header_list))\n self.currencies.append('EUR')\n self.currencies = self.currencies[1:] # Removing the \"Date\" entry\n\n data = []\n for row in file_handler[x:]:\n if row.startswith('`\\n'):\n break\n else:\n data.append(list(filter(None, [x.replace('\\n', '') for x in row.split(',')]))) # Removing any empty extra columns at the end of each rows\n\n # filling my self.rates with the currency in the format {CURR: {date: rate, ...}, ...}\n for row in data:\n for i in range(len(self.currencies)):\n try:\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: row[i + 1]}\n else:\n self.rates[self.currencies[i]].update({row[0]: row[i + 1]})\n except IndexError:\n # We reached the EUR section\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: '1.0000'}\n else:\n self.rates[self.currencies[i]].update({row[0]: '1.0000'})\n\n self.currencies.sort()\n\n except Exception as e:\n print('Failed to process the data')\n print(e)\n finally:\n file.close()", "def get_currency_exchange_rate(self, from_currency, to_currency):\n _FUNCTION_KEY = 'CURRENCY_EXCHANGE_RATE'\n return _FUNCTION_KEY, 'Realtime Currency Exchange Rate', None", "def get_rate(currency, date):\n status = 400\n while status != 200:\n url = (\"http://api.nbp.pl/api/exchangerates/rates/A/%s/%d-%02d-%02d?format=json\" %\n (currency, date.year, date.month, date.day))\n\n response = requests.get(url)\n status = response.status_code\n if status != 200:\n date = date - datetime.timedelta(1)\n\n tree = json.loads(response.content)\n assert len(tree['rates']) == 1\n print_rate_info(tree['rates'])\n return (tree['rates'][0]['mid'], date)", "def get_latest(self):\n url = f\"{self.get_api_url()}+latest\"\n # set api parameters\n params = {}\n params.update({'base': self.base_currency})\n params.update({'symbols': ','.join(self.target_currency_codes)})\n # call the api for rates\n response = requests.get(url, params=params)\n if response.status_code == 200:\n base, rates = response.json().get('base'), response.json().get('rates')\n # remove base currency from rates if it is returned by the data source\n rates.pop(self.base_currency, None)\n return base, rates\n return None, None", "def get_exchange_rate_data(self, source_currency, exchanged_currency, valuation_date):\n raise NotImplementedError", "def get_euro_exchange_rates(currency, frequency=\"D\"):\n ISO_4217_RE = re.compile(r\"[A-Z]{3}\")\n FREQUENCIES = [\"D\", \"M\", \"A\"]\n \n URL_TEMPLATE = \"http://sdw-wsrest.ecb.europa.eu/service/data/EXR/{}.{}.EUR.SP00.A?format=csvdata\"\n \n if not ISO_4217_RE.match(currency):\n raise ValueError('\"' + currency + '\" is no valid currency code!')\n if frequency not in FREQUENCIES:\n raise ValueError(\"Frequency must be one of \" + \", \".join(FREQUENCIES))\n \n url = URL_TEMPLATE.format(frequency, currency)\n req = Request(url)\n response = urlopen(req)\n lines = []\n for line in response:\n lines.append(line.decode(\"utf-8\"))\n reader = csv.DictReader(lines)\n result = {}\n for line in reader:\n date = line[\"TIME_PERIOD\"]\n value = line[\"OBS_VALUE\"]\n result[date] = value\n return result", "def parse_rate():\n try:\n response = requests.get(ecb_url)\n except Exception as e:\n return {\"error\": \"error occurred while accessing www.ecb.europa.eu: {}\".format(e)}, True\n else:\n currency_xml = response.content.decode()\n root = ET.fromstring(currency_xml)\n currencies_list = [currency.attrib.get('currency') for currency in root.iter(cube) if currency.attrib.get('currency')]\n rates_list = [float(currency.attrib.get('rate')) for currency in root.iter(cube) if currency.attrib.get('rate')]\n result = dict(zip(currencies_list, rates_list))\n result['EUR'] = float(1)\n return result, False", "def update(self):\n self.rate = self.exchange.latest()", "def get_exchange_rate_data(self, source_currency, exchanged_currency, valuation_date, provider=None, *args, **kwargs):\n raise NotImplementedError", "def lookup(self, invoice_code):\n return self.exchange_rate_btc_today[0]", "def get_ecb_rates_for_currency(currency):\n # UPDATE 2018-06-05 -- read directly from the database, and skip caching\n if currency not in SUPPORTED_CURRENCIES:\n raise CurrencyNotSupported(\"Currently we don't support %s\" % currency)\n exrates = get_latest_ecb_rates_from_db(currency)\n return (exrates['datestamp'], exrates[currency])", "def get_rate(self, t):\n return self.rates[bisect.bisect(self.change_times, t) - 1]", "def _get_currency_rate(currency):\n response = requests.get(f'{config(\"OPENEXCHANGERATES_URL\")}')\n if not response.ok:\n # log\n # can handle exception in better way later\n raise Exception(\n f'currency conversion api not working {response.text}')\n rates = response.json().get('rates')\n currency_rate = rates.get(currency.upper(), None)\n if not currency_rate:\n raise ValueError(f'Given currency conversion rate not found')\n return currency_rate", "def latest(self, base='USD'):\n try:\n resp = self.client.get(self.ENDPOINT_LATEST, params={'base': base})\n resp.raise_for_status()\n except requests.exceptions.RequestException as e:\n raise OpenExchangeRatesClientException(e)\n return resp.json(parse_int=decimal.Decimal,\n parse_float=decimal.Decimal)", "def _get_eur_gbp_last_daily(self) -> None:\n data = _get_ecb_data(FREQUENCY_DAILY, _ten_days_ago(), _today())\n\n self.eur_gbp_last_day = _get_latest_ecb_rate(data)", "def base_exchange_rate(self):\n return self._base_exchange_rate", "def api_call(cls, currency):\n headers = {\"x-accept-version\": \"2.0.0\", \"Accept\": \"application/json\"}\n r = requests.get(cls.API_URL + currency, headers=headers)\n r.raise_for_status()\n return r.json()[\"data\"][\"rate\"]", "def fetch_currency_rates(url=\"http://www.nbrb.by/API/ExRates/Rates?Periodicity=0\") -> dict:\n data = {}\n response = requests.get(url)\n if response.status_code == 200:\n data = get_json(response)\n return data", "def downloadExchangeRates(_source_currency, _track_reconnections):\n try:\n logger.info('downloadExchangeRates: Retrieving exchange rates.')\n logger.debug('downloadExchangeRates: Retrieving exchange rates for: %s' % _source_currency)\n\n exchange_rate_ = 0\n\n #download exchange rate\n got_html = getHtml(URL_CALCULATOR + '1' + _source_currency + '=?' + BASE_CURRENCY)\n\n #parse\n if got_html:\n if 'error: \"\"' in got_html:\n #parse data\n re_object = re.search(\".*rhs: \\\"(\\d\\.\\d*)\", got_html)\n\n #using float since we're not interested in high precision\n exchange_rate_ = float(re_object.group(1))\n logger.debug('downloadExchangeRates: Parsed exchange rate: %s' % exchange_rate_)\n\n else:\n #reconnect if error field not empty\n if _track_reconnections['times_reconnected'] <= MAXIMUM_RECONNECTIONS:\n logger.debug('downloadExchangeRates: Times reconnected: %s' %\n _track_reconnections['times_reconnected'])\n logger.warning('downloadExchangeRates: Server signalizes an error, repeating request.')\n\n _track_reconnections['times_reconnected'] += 1\n\n #wait for the server to allow another inquiry\n time.sleep(PAUSE_BETWEEN_RECONNECTIONS)\n\n #repeat request\n downloadExchangeRates(_source_currency, _track_reconnections)\n\n else:\n logger.error('downloadExchangeRates: Could not obtain exchange rate for: %s, returning '\n 'default value.' % _source_currency)\n\n return exchange_rate_\n\n except:\n raise", "def comprxbytesrate(self) :\n\t\ttry :\n\t\t\treturn self._comprxbytesrate\n\t\texcept Exception as e:\n\t\t\traise e", "def get_rates_for(currency: str, date: str):\n baseurl = f\"https://openexchangerates.org/api/historical/{date}.json\"\n params = {\"app_id\": OEG_APP_ID, \"symbols\": currency, \"base\": \"USD\"}\n return make_request(baseurl=baseurl, params=params)", "def latest_L2_order_book_entry(symbol: str,\n exchange: str = CRYPTO_EXCHANGE,\n rate_limit: bool = True):\n try:\n check_exchange_existence(exchange=exchange)\n response = asyncio.get_event_loop().run_until_complete(\n getOrderBookL2(symbol=symbol,\n number_of_data_points=1,\n exchange=exchange,\n rate_limit=rate_limit))\n latest_orderbook_entry_dict = {}\n latest_orderbook_entry_dict['symbol'] = symbol\n latest_orderbook_entry_dict['ask'] = response['asks'][0][0] if len(\n response['asks']) > 0 else None\n latest_orderbook_entry_dict['asksize'] = response['asks'][0][1] if len(\n response['asks']) > 0 else None\n latest_orderbook_entry_dict['bid'] = response['bids'][0][0] if len(\n response['bids']) > 0 else None\n latest_orderbook_entry_dict['bidsize'] = response['bids'][0][1] if len(\n response['bids']) > 0 else None\n latest_orderbook_entry_dict['datetime'] = response['datetime']\n latest_orderbook_entry_dict['nonce'] = response['nonce']\n return latest_orderbook_entry_dict\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception", "def get_updated_currency(self, currency_array, main_currency,\n max_delta_days):\n url = 'http://rate.bot.com.tw/xrt/flcsv/0/day'\n\n # We do not want to update the main currency\n if main_currency in currency_array:\n currency_array.remove(main_currency)\n _logger.debug(\"BOT currency rate service : connecting...\")\n try:\n url_open = urllib.request.urlopen(url)\n csvfile = csv.reader(io.StringIO(url_open.read().decode('utf-8-sig')), delimiter=',')\n url_open.close()\n except IOError:\n raise UserError(\n _('Web Service does not exist (%s)!') % url)\n\n next(csvfile)\n exchange = {}\n for row in csvfile:\n bid = float(row[3])\n ask = float(row[13])\n\n exchange[row[0]] = {\n 'bid': bid,\n 'ask': ask\n }\n\n self.check_rate_date(datetime.today(), max_delta_days)\n self.supported_currency_array = list(exchange.keys())\n\n self.supported_currency_array.append('TWD')\n _logger.debug(\"Supported currencies = %s \" %\n self.supported_currency_array)\n self.validate_cur(main_currency)\n if main_currency != 'TWD':\n main_rate = float(exchange[main_currency]['ask'])\n if main_currency in currency_array:\n currency_array.remove(main_currency)\n for curr in currency_array:\n self.validate_cur(curr)\n if curr == 'TWD':\n rate = main_rate\n else:\n if main_currency == 'TWD':\n rate = 1 / float(exchange[curr]['ask'])\n else:\n rate = main_rate / float(exchange[curr]['ask'])\n self.updated_currency[curr] = rate\n _logger.debug(\n \"Rate retrieved : 1 %s = %s %s\" % (main_currency, rate, curr)\n )\n return self.updated_currency, self.log_info", "def get_rate(parent=None):\n dialog = RateDialog(parent)\n dialog.exec_()\n rate = dialog.rate\n return rate", "def getDataRate(self):\n \n return self.DataRate", "def latest_order_book_entry(symbol: str,\n exchange: str = CRYPTO_EXCHANGE,\n rate_limit: bool = True):\n try:\n check_exchange_existence(exchange=exchange)\n response = asyncio.get_event_loop().run_until_complete(\n getOrderBook(symbol=symbol,\n number_of_data_points=1,\n exchange=exchange,\n rate_limit=rate_limit))\n latest_orderbook_entry_dict = {}\n latest_orderbook_entry_dict['symbol'] = symbol\n latest_orderbook_entry_dict['ask'] = response['asks'][0][0] if len(\n response['asks']) > 0 else None\n latest_orderbook_entry_dict['asksize'] = response['asks'][0][1] if len(\n response['asks']) > 0 else None\n latest_orderbook_entry_dict['bid'] = response['bids'][0][0] if len(\n response['bids']) > 0 else None\n latest_orderbook_entry_dict['bidsize'] = response['bids'][0][1] if len(\n response['bids']) > 0 else None\n latest_orderbook_entry_dict['datetime'] = response['datetime']\n latest_orderbook_entry_dict['nonce'] = response['nonce']\n return latest_orderbook_entry_dict\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception", "def currency_rate(self, init):\r\n\r\n curr = CurrencyRates()\r\n curr_rate = curr.get_rates(init)\r\n return curr_rate" ]
[ "0.7017314", "0.6967795", "0.6684886", "0.6548335", "0.65449774", "0.65055674", "0.6461167", "0.6431432", "0.64266616", "0.6394351", "0.62385744", "0.62312907", "0.62306887", "0.62064004", "0.61375725", "0.6130839", "0.6130635", "0.6115328", "0.60870564", "0.6060977", "0.59144956", "0.58851856", "0.58674395", "0.583746", "0.58213174", "0.5775519", "0.5773551", "0.5763365", "0.57522315", "0.5652959" ]
0.759938
0
Retrieve and store the 15min delayed BTC market price in EUR.
def _get_btc_eur_15min(self) -> None: with requests.get(BITCOIN_TICKER) as response: response.raise_for_status() json_data = response.json() self.btc_eur_15min = json_data["EUR"]["15m"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def do_work(self) -> None:\n self._get_btc_eur_15min()\n print(\n f\"1 BTC = {self.btc_eur_15min} EUR\"\n f\"\\t\\t(15min delayed market price)\"\n )\n\n self._get_eur_gbp_last_month()\n print(\n f\"1 EUR = {self.eur_gbp_last_month} GBP\"\n f\"\\t(last month average rate)\"\n )\n\n self._get_btc_gbp_15min()\n print(\n f\"1 BTC = {self.btc_gbp_15min:.6f} GBP\"\n f\"\\t(BTC 15min delayed market price; GBP latest daily average rate)\"\n )", "def BuyingPrice(self):\n return self.buying_rice", "def _get_btc_gbp_15min(self) -> None:\n self._get_eur_gbp_last_daily()\n\n self.btc_gbp_15min = self.btc_eur_15min * self.eur_gbp_last_day", "def track_price():\n r = requests.get('https://finance.yahoo.com/quote/EURPLN=X?p=EURPLN%3DX&.tsrc=fin-srch&guce_referrer'\n '=aHR0cHM6Ly9maW5hbmNlLnlhaG9vLmNvbS8_Z3VjZV9yZWZlcnJlcj1hSFIwY0hNNkx5OTNkM2N1WjI5d'\n 'loyeGxMbU52YlM4Jmd1Y2VfcmVmZXJyZXJfc2lnPUFRQUFBRG1vS3ROMkF5bzFpTDRpd29Td0Z4Z0NDTVN'\n 'XU3M0UkNoa2pBcGl2NmxobmxJcWRab0JIWUF6NVJuNHlZdkN1WTRBNEdwVTRfWjBZQ3JNM1RwX2ZMd05rej'\n 'g0TkVWdksyUzA3LVNmNXdndUJCUjhieG5sZEN4dGRCRmV6eEZfMnNQdEpQeXJ6UzREeV9WRUF4ZXNUMXNLYz'\n 'lnTm1pSlFCV3R6LVpLX0hvc2p5Jl9ndWNfY29uc2Vud'\n 'F9za2lwPTE1OTcwODc3MTg&guce_referrer_sig=AQAAAKzjjM2--Diw1M3gykrGHjIn9NdqSch_odxmo6xqtgD4pNo'\n 'anrEQBgPoZ9xkh8HPYFN1_9mpio4Fg2tEGa4GrsK69bHe4yN9LactTwdKEuBxazZPO751TNSeFH_lltkNoN1k7D6I978v'\n '1eXB9WaCp0NUgbRZRmbYEdoZmkmQvUq7&_guc_consent_skip=1597087949')\n if r.status_code != 200:\n raise ConnectionError\n else:\n soup = BeautifulSoup(r.text, 'html.parser')\n price_elem = soup.find('span', {\"class\": \"Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(ib)\"})\n return float(price_elem.text)", "def price(temp):\n now = datetime.datetime.now()\n r = requests.get(\"https://bitcoin.co.th/\")\n soup = BeautifulSoup(r.content, \"html.parser\")\n data = soup.find_all(\"div\", {\"class\": \"price\"})\n print(\"[%02i:%02i:%02i] Now BTC Price : \" % (now.hour, now.minute, now.second), end=\"\")\n for i in range(len(data)):\n price = (data[i].text)\n print(price)\n if price != temp: # Price Change\n line_sent(price)\n temp = price\n time.sleep(30) # Delay 30 second\n main(temp) # call function main for loop", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://bittrex.com/api/v1.1/public/getticker?market=\"+pair\n jsonResponse = self.getJson(uri)\n currentPrice = jsonResponse[\"result\"][\"Last\"]\n return currentPrice", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitfinex.com/v2/ticker/t\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[0]\n return currentPrice", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n jsonResponse = self.getJson(\"https://poloniex.com/public?command=returnTicker\")\n currentPrice = jsonResponse[pair][\"last\"]\n return currentPrice", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitstamp.net/api/v2/ticker/\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"last\"]\n return currentPrice", "def get_current_price(self):\n URL = config.coin['price_hist_url'] + self.ticker.lower()\n try:\n r = requests.get(URL)\n data = json.loads(r.text)\n value = data['last']\n timestamp = data['timestamp']\n self.current_price = value\n self.current_datetime = timestamp\n except Exception as err:\n logger.error(err)", "def updateLastPrice(self):\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(\n pytz.timezone('US/Central')).strftime(\"%H:%M\")\n\n # UPDATE POSITION LAST PRICE AND UPDATE HIGH PRICE\n open_positions = self.open_positions.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n open_positions_list = []\n\n for position in open_positions:\n\n symbol = position[\"Symbol\"]\n\n if symbol not in open_positions_list:\n\n open_positions_list.append(symbol)\n\n if len(open_positions_list) > 0:\n\n resp = self.tdameritrade.getQuotes(open_positions_list)\n\n if resp:\n\n for key, value in resp.items():\n\n symbol = key\n\n last_price = value[\"lastPrice\"]\n\n self.open_positions.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})\n\n if dt_central == \"15:00\":\n\n self.open_positions.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Opening_Price\": last_price}})\n\n # UPDATE QUEUE LAST PRICE\n queues = self.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type})\n\n queues_list = []\n\n for queue in queues:\n\n if self.asset_type == \"EQUITY\":\n\n symbol = queue[\"Symbol\"]\n\n elif self.asset_type == \"OPTION\":\n\n symbol = queue[\"Pre_Symbol\"]\n\n if symbol not in queues_list:\n\n queues_list.append(symbol)\n\n if len(queues_list) > 0:\n\n resp = self.tdameritrade.getQuotes(queues_list)\n\n for key, value in resp.items():\n\n symbol = key\n\n last_price = value[\"lastPrice\"]\n\n if self.asset_type == \"EQUITY\":\n\n self.queue.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})\n\n elif self.asset_type == \"OPTION\":\n\n self.queue.update_many({\"Trader\": self.user[\"Name\"], \"Pre_Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})", "def getPrice(coin,cur):\n price = 'https://api.coinmarketcap.com/v1/ticker/' + coin\n json = requests.get(price).json()\n value = json[0]['price_' + str(cur)]\n return value", "def price(self) -> float:\n return self.close", "def get_price():\n return uniform(1.0, 350.0)", "def buy_and_pay(self):\n return self.price", "def buying_price(self):\n buy_price = self.standard_init_price()\n # Special status and resources price adaptation\n if self.planet.status in [self.tradeitem.dps]:\n buy_price = (buy_price * 5) / 3\n\n elif self.planet.special in [self.tradeitem.cr]:\n buy_price = (buy_price * 3) / 4\n\n elif self.planet.special in [self.tradeitem.er]:\n buy_price = (buy_price * 4) / 3\n\n # randomize a bit\n moins = random.randrange(self.tradeitem.var)\n plus = random.randrange(self.tradeitem.var)\n buy_price = buy_price - moins + plus\n\n # price can't be negative\n if buy_price < 0:\n buy_price = 0\n\n return int(buy_price)", "def purchase_price(self):\n if self.sold_on is None:\n return 0.0 # Not yet sold\n return 10000 - (.10 * self.miles)", "def compute_time_price(supplier_with_transaction):\n supplier_item = supplier_with_transaction.get('supplier_detail')\n transaction_item = supplier_with_transaction.get('supplier_transaction')\n # Check if there is time prices or not\n if supplier_with_transaction.get('time_price'):\n # Check if we will compute in complex or simple\n if not supplier_item.get('has_complex_minute_price'):\n # start to calculate the simple version for time price\n charging_start = transaction_item.get('charging_start')\n charging_end = transaction_item.get('charging_end')\n if charging_start and charging_end:\n charging_start_obj = datetime.strptime(charging_start, '%Y-%m-%dT%H:%M:%S')\n charging_end_obj = datetime.strptime(charging_end, '%Y-%m-%dT%H:%M:%S')\n duration_in_minutes = (charging_end_obj - charging_start_obj).total_seconds() / 60\n # Check for min duration\n if supplier_item.get('min_duration') and duration_in_minutes < supplier_item.get('min_duration'):\n duration_in_minutes = supplier_item.get('min_duration')\n price = supplier_item.get('simple_minute_price')\n total_price = price * duration_in_minutes\n return total_price\n else:\n # start calculate the complex version for time price\n total_price = 0\n if supplier_item.get('interval') == 'start':\n for start_rec in supplier_item.get('time_price'):\n timeframe = start_rec.get('billing_each_timeframe') * 60\n if start_rec.get('hour_from', 0) > start_rec.get('hour_to', 0):\n duration = (start_rec.get('hour_to') - start_rec.get('hour_from')) * 60\n else:\n duration = (start_rec.get('hour_to') - (24 - start_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration + duration_after_timeframe\n total_price += total_duration * start_rec.get('minute_price')\n else:\n for end_rec in supplier_item.get('time_price'):\n timeframe = end_rec.get('billing_each_timeframe') * 60\n if end_rec.get('hour_from', 0) > end_rec.get('hour_to', 0):\n duration = (end_rec.get('hour_to') - end_rec.get('hour_from')) * 60\n else:\n duration = (end_rec.get('hour_to') - (24 - end_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration - (timeframe - duration_after_timeframe)\n total_price += total_duration * end_rec.get('minute_price')\n\n return total_price\n else:\n total_price = 0\n return total_price", "def poll_price_data():\n resp = requests.get(COINDESK_ENDPOINT) # Powered by CoinDesk\n if resp.status_code == 200:\n logging.info(\"GET request succeeded\")\n data = resp.json()\n data_dict = {\n \"id\": str(uuid.uuid1()),\n \"time\": data['time']['updated'],\n \"currency\": data['bpi']['USD']['code'],\n \"price\": data['bpi']['USD']['rate']\n }\n return data_dict\n else:\n logging.error(\"GET request failed\")", "def target_buy_price(self):\n if self.period_tick == 0:\n return random.randint(1, 10)\n elif self.period_tick % self.perseverance == 0:\n # Player runs out of patience and decides to change target price.\n (avg_price,\n max_price,\n min_price) = self.market.get_stock_price_last_period()\n\n power = self.period_tick / self.perseverance\n target_price = min(min_price + power, self.money_balance * 0.5)\n return target_price\n else:\n return None", "def get_product_price(self, url):\n self.driver.get(url)\n\n try:\n price = self.driver.find_element_by_id(\"priceblock_ourprice\").text\n except:\n pass\n\n try:\n price = self.driver.find_element_by_id(\"priceblock_dealprice\").text\n except:\n pass\n\n if price is None:\n price = \"Not available\"\n\n else:\n non_decimal = re.compile(r'[^\\d.]+')\n price = non_decimal.sub('', price)\n\n return price", "def fetch_price():\n\n url = \"https://www.bitstamp.net/api/ticker/\"\n\n response = json.load(urllib2.urlopen(url))\n\n return {\"buy\": response['ask'], \"sell\": response['bid']}", "def get_market_price(self, exchange, pair, type):\n return self.ccxt.get_market_price(exchange, pair, type)", "def get_price(data):\n return data[\"summaryDetail\"][\"regularMarketPreviousClose\"][\"raw\"]", "def block5_price(self):\n return self._safe_value(VAR_BLOCK5PRICE, float)", "def get_price(self):\r\n return self.price", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://api.kraken.com/0/public/Ticker\"\n requestUrl = uri + \"?pair=\" + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"result\"][pair][\"c\"]\n return currentPrice", "def field_buy(self, symbol):\r\n\r\n end_percent = 150\r\n current_price = 15#self.get_price()\r\n self.log(current_price)\r\n buys = {}\r\n new_price = current_price * 1.05\r\n while (new_price / current_price) > 150:\r\n self.log(\"New sell at: {}\".format(new_price))\r\n new_price *= 1.05\r\n\r\n self.log(buys)\r\n\r\n return buys", "def calculate_buy_price(price: float):\n return round(price / (1 + CONF.trade_advantage_in_percent / 100), 1)" ]
[ "0.65415394", "0.6299414", "0.62688947", "0.6254417", "0.62187046", "0.6205762", "0.620122", "0.61773175", "0.61718124", "0.6161998", "0.6105141", "0.60968745", "0.6074585", "0.6072953", "0.601132", "0.5992859", "0.5969653", "0.5964009", "0.5900362", "0.5885169", "0.586665", "0.5839722", "0.58291465", "0.5826046", "0.576981", "0.57643616", "0.57634306", "0.5751886", "0.5744245", "0.5734768" ]
0.70017016
0
Retrieve and store last month's EUR to GBP average rate.
def _get_eur_gbp_last_month(self) -> None: last_month = _last_month() data = _get_ecb_data(FREQUENCY_MONTHLY, last_month, last_month) self.eur_gbp_last_month = _get_latest_ecb_rate(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_eur_gbp_last_daily(self) -> None:\n data = _get_ecb_data(FREQUENCY_DAILY, _ten_days_ago(), _today())\n\n self.eur_gbp_last_day = _get_latest_ecb_rate(data)", "def get_avg(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n mean = df.mean(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n mean = round(float(mean), 4)\r\n return mean", "def bitcoinaverage(site):\n url = \"https://apiv2.bitcoinaverage.com/frontend/constants/exchangerates/local\"\n try:\n session = requests.Session()\n cfscrape_requests = cfscrape.create_scraper(sess=session)\n ret = cfscrape_requests.get(url, timeout=(15, 15)).json()[\"rates\"]\n data = {\"USD:\" + key: float(val[\"rate\"]) for key, val in ret.items()}\n data = refine_data(data)\n print(site, data)\n race_write(f\"{site}_forex.txt\", json_dumps(data))\n except:\n print(f\"{site} failed to load\")", "def test_get_historical_gold_rate(self):\n rates = [153.50, 162.49, 123.86, 155.10]\n helper.gold_loop_helper(get_historical_gold_rate, TestHistoricalRates.dates_rate, rates)", "def get_average_for_month(self, month, weekend):\n\t\tif weekend:\n\t\t\treturn self.averages_weekend[month]\n\t\telse:\n\t\t\treturn self.averages_weekday[month]", "def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0", "def rate(self):\n return self.brate / FAC", "def year_average_price_rule(_m, y):\r\n\r\n # Total revenue\r\n return sum(m.SCENARIO_REVENUE[y, s] for s in m.S) / sum(m.SCENARIO_DEMAND[y, s] for s in m.S)", "def calculate_revenue(self, period: int, currency_rate: float) -> float:\n revenue_in_currency = self.capital * ((1.0 + self.interest) ** period - 1.0)\n revenue = revenue_in_currency * currency_rate\n return revenue", "def get_current_rate(self):\n pass", "def market_avg_price(**params):\n endpoint = 'calc/trade/avg'\n return request(authenticate=False, version=2, endpoint=endpoint, method='POST', query_params=params)", "def test_forecast_precision_mase1y_avg():\n joined_data = pd.DataFrame({'temp': [1], 'dt': [1575082800], 'today': ['2019-11-30'],\n 't5': [4.0],\n 't4': [3],\n 't3': [2.0],\n 't2': [1],\n 't1': [1.0]})\n historical_data = rutil.read_historical_data(\"tests/csv_files/historical_data_year_avg.csv\")\n years_back = 2\n\n result = rmet.mean_absolute_scaled_error_year_avg(joined_data, historical_data, 'temp', years_back)\n assert result == [1, 2 / 3, 1 / 3, 0, 0]", "def get_meanrates(self):\n return np.asarray([ n.meanrate for n in self.alln.values() ])", "def growthrate(cur, pre, y):\n return (cur-pre)/y", "def calculate_today_last(region, db, local_forecast):\n\t\n\t# get all temps from today table\n\ttoday_historical_params = {'table': 'today',\n\t\t\t\t\t\t 'conditions': ['region_id'],\n\t\t\t\t\t\t 'condition_data': (region.id,),\n\t\t\t\t\t\t 'filters': ['hour_{}'.format(x) for x in range(0, 24, 3)]}\n\n\t# returns a list of tuples\n\ttoday_historical = db.select(**today_historical_params)\n\n\tif None in today_historical[0]:\n\t\treturn 0.0\n\n\t# calculate mean\n\ttoday_avg = round(statistics.mean(today_historical[0]), 2)\n\n\t# save to history\n\tcurrent_date = dt_to_string(list(local_forecast)[0].date(), time=False)\n\tsave_to_history_params = {'table': 'history',\n\t\t\t\t\t\t\t 'destinations': ['region_id', 'temp', 'date', 'datetime'],\n\t\t\t\t\t\t\t 'data': (region.id, today_avg, current_date,\n\t\t\t\t\t\t\t\t\t dt_to_string(datetime.datetime.now()))}\n\tdb.insert(**save_to_history_params)\n\n\t# set today table to null\n\tset_today_to_null_params = {'table': 'today',\n\t\t\t\t\t\t\t\t'destinations': ['hour_{}'.format(x) for x in range(0, 24, 3)] + ['datetime'],\n\t\t\t\t\t\t\t\t'data': tuple([None] * 9),\n\t\t\t\t\t\t\t\t'conditions': ['region_id'],\n\t\t\t\t\t\t\t\t'condition_data': (region.id,)}\n\tdb.update(**set_today_to_null_params)\n\n\treturn today_avg", "def get_value(\n self\n ) -> float:\n\n return self.average", "def value_ret_calendar_period(self, year: int, month: int = None) -> float:\n if month is None:\n period = str(year)\n else:\n period = '-'.join([str(year), str(month).zfill(2)])\n rtn = self.tsdf.copy().pct_change()\n rtn = rtn.loc[period] + 1\n return float(rtn.apply(np.cumprod, axis='index').iloc[-1] - 1)", "def get_latest_average(fsym, tsym, markets='all', try_conversion=True, \n format='raw'):\n\t\n\t# build url \n\turl = build_url('generateAvg', fsym=fsym, tsym=tsym, markets=markets,\n\t try_conversion=try_conversion)\n\n\t# http request\n\tr = requests.get(url)\n\n\t# decode to json\n\tdata = r.json()\n\n\tif format == 'raw':\n\t\tdata = data['RAW']\n\telif format == 'display':\n\t\tdata = data['DISPLAY']\n\n\treturn data", "def annualized_gains(self, day='today'):\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n if day == 'today':\n day = self.data.index[-1]\n if self.data.index[-1] >= day >= self.data.index[0]:\n day = self._first_good_date(day)\n initialValue = self.invested_amount(day)\n finalValue = self.value(day)\n numberOfDays = (day - self.data.index[0]).days\n return round(((finalValue / initialValue)**(365/numberOfDays) - 1) * 100, 2) \n else:\n return 0", "def api_asset_calculate_revenue():\n periods = request.args.getlist(\"period\")\n\n daily_response = requests.get(CBR_DAILY_URL)\n key_indicators_response = requests.get(CBR_INDICATORS_URL)\n currency_rates = parse_cbr_currency_base_daily(daily_response.text)\n currency_rates.update(parse_cbr_key_indicators(key_indicators_response.text))\n\n result = {}\n for period in periods:\n result[period] = app.bank.calculate_revenue(int(period), currency_rates)\n return result, 200", "def getAvg(self):\r\n\t\treturn self.data['avg']", "def GrowthAPR(self, years=10):\n pastPrice = self.history[-1].price\n import datetime\n today = datetime.datetime.now()\n pastDate = self.history[-1].date\n # TODO: This is an inefficient way to look up a specific date\n for index in range(0, len(self.history) - 1):\n if today - self.history[index].date < datetime.timedelta(days=365.25*years):\n # Assuming the stock data is in chronological order, the first result more recent than X years\n # is a good enough approximation\n pastPrice = self.history[index].price\n pastDate = self.history[index].date\n break\n if pastPrice == 0.:\n return 0.\n n_years = (self.history[-1].date - pastDate).days / 365.25\n if n_years == 0.:\n return 0.\n return 100. * (self.history[-1].price / pastPrice) ** (1. / n_years) - 100.", "def calc_av_daily_return(self):\n av_return = 0.0\n total_ret = sum(self._returns)\n num_averages = len(self._returns)\n \n if num_averages > 0:\n av_return = total_ret/float(num_averages)\n \n self._av_daily_return = av_return\n return av_return", "def genMarketStat(self):\n myMarketStat = marketstat.MarketStat({'id':str(self.currentRound)})\n self.marketStats[str(self.currentRound)] = myMarketStat\n # set avg price to last rounds market avg price\n if self.currentRound > 1:\n lastMarketStat = self.marketStats[str(self.currentRound-1)]\n myMarketStat.avgSoldAL = lastMarketStat.avgSoldAL\n myMarketStat.avgSoldEC = lastMarketStat.avgSoldEC\n myMarketStat.avgSoldIA = lastMarketStat.avgSoldIA", "def stock_average(stock):\n closing_price=stock['Close']\n average=stats.mean(closing_price)\n return average", "def rate_last(self):\n diff = (self.time - self.lasts[0][0]).total_seconds()\n try:\n return (self.pos - self.lasts[0][1]) / FAC / diff\n except ZeroDivisionError:\n return 0.0", "def compute_rate(self):\n bg_rate = self.counts.data / self.livetime.data\n\n bg_rate /= self.counts.bin_volume\n\n bg_rate = bg_rate.to('MeV-1 sr-1 s-1')\n\n self.bg_rate.data = bg_rate\n self.bg_rate.data_err = (np.sqrt(self.counts.data) / (self.counts.bin_volume * self.livetime.data)).to(\n 'MeV-1 sr-1 s-1')", "def arithmetic_ret(self) -> float:\n return float(np.log(self.tsdf).diff().mean() * self.periods_in_a_year)", "def get_monthly_avg(all_stock_data):\n try:\n monthly_data = {}\n for data in all_stock_data:\n month = data[0][0:7]\n if month not in monthly_data:\n monthly_data[month] = []\n monthly_data[month].append(data)\n monthly_avg_list = []\n for month, stock_data in monthly_data.items():\n monthly_avg_list.append((month, get_avg(stock_data)))\n return monthly_avg_list\n\n except Exception as e:\n print(e)\n exit()", "def em_mean(self) -> float:\n if self.__total_pulls == 0:\n raise Exception('Number of pulls is 0. No empirical mean.')\n return self.__total_rewards / self.__total_pulls" ]
[ "0.65970474", "0.6344092", "0.5897189", "0.5880332", "0.58152103", "0.5769175", "0.57214457", "0.5711045", "0.55768675", "0.5523535", "0.5501505", "0.5479372", "0.5457628", "0.54402447", "0.542044", "0.5388946", "0.5384516", "0.5383942", "0.53801686", "0.53687716", "0.53634995", "0.5359037", "0.5356137", "0.535052", "0.5333389", "0.53300786", "0.53068876", "0.5300441", "0.5293764", "0.52898085" ]
0.7096632
0
Retrieve and store the latest daily EUR to GBP average rate.
def _get_eur_gbp_last_daily(self) -> None: data = _get_ecb_data(FREQUENCY_DAILY, _ten_days_ago(), _today()) self.eur_gbp_last_day = _get_latest_ecb_rate(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bitcoinaverage(site):\n url = \"https://apiv2.bitcoinaverage.com/frontend/constants/exchangerates/local\"\n try:\n session = requests.Session()\n cfscrape_requests = cfscrape.create_scraper(sess=session)\n ret = cfscrape_requests.get(url, timeout=(15, 15)).json()[\"rates\"]\n data = {\"USD:\" + key: float(val[\"rate\"]) for key, val in ret.items()}\n data = refine_data(data)\n print(site, data)\n race_write(f\"{site}_forex.txt\", json_dumps(data))\n except:\n print(f\"{site} failed to load\")", "def fetch_and_store_latest_ecb_exrates():\n response = requests.get(DAILY_ECB_URL)\n # Raise exception if status_code != 200 or ConnectionError\n response.raise_for_status()\n info = ET.fromstring(response.content)[2][0]\n datestamp = datetime.strptime(info.attrib['time'], \"%Y-%m-%d\").date()\n rates = [x.attrib for x in info]\n\n exrates = []\n for item in rates:\n if item['currency'] in SUPPORTED_CURRENCIES:\n exrate, created = ExchangeRate.objects.update_or_create(\n datestamp=datestamp,\n currency=item['currency'],\n defaults={'rate': Decimal(item['rate'])}\n )\n exrates.append(exrate)\n print(exrate, \"NEW EXRATE!\" if created else \"<noupdate>\")\n\n return exrates", "def get_avg(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n mean = df.mean(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n mean = round(float(mean), 4)\r\n return mean", "def test_get_historical_gold_rate(self):\n rates = [153.50, 162.49, 123.86, 155.10]\n helper.gold_loop_helper(get_historical_gold_rate, TestHistoricalRates.dates_rate, rates)", "def exchange_rate(self):\n res = r.get(self.url + self.current_rate)\n return self.execute(res)", "def _get_eur_gbp_last_month(self) -> None:\n last_month = _last_month()\n data = _get_ecb_data(FREQUENCY_MONTHLY, last_month, last_month)\n\n self.eur_gbp_last_month = _get_latest_ecb_rate(data)", "def market_avg_price(**params):\n endpoint = 'calc/trade/avg'\n return request(authenticate=False, version=2, endpoint=endpoint, method='POST', query_params=params)", "def get_current_rate(self):\n pass", "def interval_average():\r\n import statistics as st\r\n from tach_detect import tach_detect\r\n r = request.get_json()\r\n try:\r\n email = r[\"user_email\"]\r\n except KeyError:\r\n return jsonify(\"no email input\"), 400\r\n raise LookupError(\"no email input\")\r\n check_email = Check_For_User(email)\r\n if check_email.user_exists is False:\r\n return jsonify(str(email) + \" was not found. Please re-enter\"), 400\r\n raise LookupError(str(user_email) + \" was not found. Please re-enter\")\r\n try:\r\n input_date_time = r[\"date_time\"]\r\n except KeyError:\r\n return jsonify(\"no date entered\"), 400\r\n raise LookupError(\"no date entered\")\r\n try:\r\n validate_date_time(input_date_time)\r\n except (ValueError, TypeError) as error:\r\n return jsonify(\"date entered is invalid. Please re-type.\"), 400\r\n date_time = datetime.datetime(input_date_time[0], input_date_time[1],\r\n input_date_time[2], input_date_time[3],\r\n input_date_time[4], input_date_time[5],\r\n input_date_time[6])\r\n time_list = get_all_times(email)\r\n heart_rate_list = get_all_rates(email)\r\n interval_list = find_first_date(date_time, time_list, heart_rate_list)\r\n try:\r\n interval_average_post = st.mean(interval_list)\r\n user = models.User.objects.raw({\"_id\": email}).first()\r\n curr_age = user.age\r\n tach_test = tach_detect(curr_age, interval_average_post)\r\n return_dict = {\r\n \"user_email\": email,\r\n \"heart_rate_average_since\": str(date_time),\r\n \"heart_rate_average\": interval_average_post,\r\n \"is_heart rate_tachycardic\": str(tach_test)\r\n }\r\n except st.StatisticsError:\r\n interval_average_post = heart_rate_list[len(heart_rate_list)-1]\r\n user = models.User.objects.raw({\"_id\": email}).first()\r\n curr_age = user.age\r\n tach_test = tach_detect(curr_age, interval_average_post)\r\n return_dict = {\r\n \"user_email\": email,\r\n \"heart_rate_average_since\": str(date_time),\r\n \"heart_rate_average\": interval_average_post,\r\n \"is_heart rate_tachycardic\": str(tach_test)\r\n }\r\n return jsonify(return_dict), 200", "def getAvg(self):\r\n\t\treturn self.data['avg']", "def rate(self):\n return self.brate / FAC", "def get_updated_currency(self, currency_array, main_currency,\n max_delta_days):\n url = 'http://rate.bot.com.tw/xrt/flcsv/0/day'\n\n # We do not want to update the main currency\n if main_currency in currency_array:\n currency_array.remove(main_currency)\n _logger.debug(\"BOT currency rate service : connecting...\")\n try:\n url_open = urllib.request.urlopen(url)\n csvfile = csv.reader(io.StringIO(url_open.read().decode('utf-8-sig')), delimiter=',')\n url_open.close()\n except IOError:\n raise UserError(\n _('Web Service does not exist (%s)!') % url)\n\n next(csvfile)\n exchange = {}\n for row in csvfile:\n bid = float(row[3])\n ask = float(row[13])\n\n exchange[row[0]] = {\n 'bid': bid,\n 'ask': ask\n }\n\n self.check_rate_date(datetime.today(), max_delta_days)\n self.supported_currency_array = list(exchange.keys())\n\n self.supported_currency_array.append('TWD')\n _logger.debug(\"Supported currencies = %s \" %\n self.supported_currency_array)\n self.validate_cur(main_currency)\n if main_currency != 'TWD':\n main_rate = float(exchange[main_currency]['ask'])\n if main_currency in currency_array:\n currency_array.remove(main_currency)\n for curr in currency_array:\n self.validate_cur(curr)\n if curr == 'TWD':\n rate = main_rate\n else:\n if main_currency == 'TWD':\n rate = 1 / float(exchange[curr]['ask'])\n else:\n rate = main_rate / float(exchange[curr]['ask'])\n self.updated_currency[curr] = rate\n _logger.debug(\n \"Rate retrieved : 1 %s = %s %s\" % (main_currency, rate, curr)\n )\n return self.updated_currency, self.log_info", "def get_latest(self):\n url = f\"{self.get_api_url()}+latest\"\n # set api parameters\n params = {}\n params.update({'base': self.base_currency})\n params.update({'symbols': ','.join(self.target_currency_codes)})\n # call the api for rates\n response = requests.get(url, params=params)\n if response.status_code == 200:\n base, rates = response.json().get('base'), response.json().get('rates')\n # remove base currency from rates if it is returned by the data source\n rates.pop(self.base_currency, None)\n return base, rates\n return None, None", "def get_avg(all_stock_data):\n try:\n sum_close_vol = 0.0\n sum_vol = 0.0\n for item in all_stock_data:\n adj_close = item[1]\n volume = item[2]\n sum_close_vol += adj_close * volume\n sum_vol += item[2]\n return sum_close_vol / sum_vol\n\n except Exception as e:\n print(e)\n exit()", "def averagePrice(self, onlyUnconsumed):\n\n\t\tif onlyUnconsumed:\n\t\t\treturn self.unconsumedValue / (len(self.bottles) - self.numberConsumed)\n\n\t\treturn self.totalValue / len(self.bottles)", "def update_attendance_rate(self):\n session_avg_rate = self.session_set\\\n .filter(attendance_rate__isnull=False)\\\n .aggregate(Avg('attendance_rate'))\n self.attendance_rate = session_avg_rate['attendance_rate__avg']\n self.save()", "def get_latest_average(fsym, tsym, markets='all', try_conversion=True, \n format='raw'):\n\t\n\t# build url \n\turl = build_url('generateAvg', fsym=fsym, tsym=tsym, markets=markets,\n\t try_conversion=try_conversion)\n\n\t# http request\n\tr = requests.get(url)\n\n\t# decode to json\n\tdata = r.json()\n\n\tif format == 'raw':\n\t\tdata = data['RAW']\n\telif format == 'display':\n\t\tdata = data['DISPLAY']\n\n\treturn data", "def ADK_Rate_Avg(Uion,Z,E):\n\treturn Cycle_Averaging_Factor(Uion,E)*ADK_Rate(Uion,Z,E)", "def get_days_rate():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_days_rate = \"\"\"\n SELECT * FROM (SELECT TO_CHAR(time::date,'Mon DD, YYYY') AS date,\n ROUND((COUNT(status) FILTER (\n WHERE status='404 NOT FOUND'))*100/COUNT(status)::decimal, 2)::text\n ||'% errors' AS rate\n FROM log\n GROUP BY time::date) AS error_rate\n WHERE rate::text > 1::text;\"\"\"\n c.execute(query_days_rate)\n rates = from_db_cursor(c)\n db.close()\n return rates", "def update(self):\n self.rate = self.exchange.latest()", "def get_patient_average():\n r = requests.get(\"http://vcm-7474.vm.duke.edu:5000/api/heart_rate/average/2\")\n print(r.text)", "def getData(self):\n\n url = 'https://www.ecb.europa.eu/stats/eurofxref/eurofxref-hist.zip'\n try:\n file, _ = urlretrieve(url)\n zip_file_object = zipfile.ZipFile(file, 'r')\n first_file = zip_file_object.namelist()[0]\n file = zip_file_object.open(first_file)\n\n file_handler = []\n for row in file:\n file_handler.append(row.decode())\n\n # getting the currency headers into header_list\n header_list = []\n notFound = True\n x = 0\n while notFound:\n if file_handler[x].startswith('Date'):\n header = file_handler[x].split(',')\n for col in header:\n header_list.append(col.strip())\n notFound = False\n x += 1\n self.currencies = list(filter(None, header_list))\n self.currencies.append('EUR')\n self.currencies = self.currencies[1:] # Removing the \"Date\" entry\n\n data = []\n for row in file_handler[x:]:\n if row.startswith('`\\n'):\n break\n else:\n data.append(list(filter(None, [x.replace('\\n', '') for x in row.split(',')]))) # Removing any empty extra columns at the end of each rows\n\n # filling my self.rates with the currency in the format {CURR: {date: rate, ...}, ...}\n for row in data:\n for i in range(len(self.currencies)):\n try:\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: row[i + 1]}\n else:\n self.rates[self.currencies[i]].update({row[0]: row[i + 1]})\n except IndexError:\n # We reached the EUR section\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: '1.0000'}\n else:\n self.rates[self.currencies[i]].update({row[0]: '1.0000'})\n\n self.currencies.sort()\n\n except Exception as e:\n print('Failed to process the data')\n print(e)\n finally:\n file.close()", "def save(self, *args, **kwargs):\n self.item.rates_total += 1\n self.item.average_rate += (self.item.average_rate + self.rate) / self.item.rates_total\n self.item.save()\n super(Rate, self).save(*args, **kwargs)", "def _get_latest_ecb_rate(data: bytes) -> float:\n root = etree.fromstring(data)\n values = root.xpath('.//generic:ObsValue/@value', namespaces=root.nsmap)\n last_value = len(values) - 1\n\n return float(values[last_value])", "def calculateDataRate(self):\n pass", "def compute_rate(self):\n bg_rate = self.counts.data / self.livetime.data\n\n bg_rate /= self.counts.bin_volume\n\n bg_rate = bg_rate.to('MeV-1 sr-1 s-1')\n\n self.bg_rate.data = bg_rate\n self.bg_rate.data_err = (np.sqrt(self.counts.data) / (self.counts.bin_volume * self.livetime.data)).to(\n 'MeV-1 sr-1 s-1')", "def data_rate(self):\n return self._data_rate", "def rate(self):\n return self._rate", "def get_average_survival(self):\n return np.mean(self.survival_rates)", "def rate(self):\n return self.__rate" ]
[ "0.65365905", "0.62230974", "0.61657596", "0.6133238", "0.60252744", "0.5859477", "0.58187735", "0.58073163", "0.5803112", "0.5690306", "0.56884164", "0.56819123", "0.56722516", "0.5655974", "0.56529254", "0.56445503", "0.5622465", "0.5596036", "0.559005", "0.5584927", "0.55840766", "0.55819106", "0.5573295", "0.5554427", "0.5548469", "0.5541163", "0.55384475", "0.5531847", "0.5527902", "0.5507146" ]
0.714428
0
Calculate the 15min delayed BTC market price in GBP.
def _get_btc_gbp_15min(self) -> None: self._get_eur_gbp_last_daily() self.btc_gbp_15min = self.btc_eur_15min * self.eur_gbp_last_day
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_work(self) -> None:\n self._get_btc_eur_15min()\n print(\n f\"1 BTC = {self.btc_eur_15min} EUR\"\n f\"\\t\\t(15min delayed market price)\"\n )\n\n self._get_eur_gbp_last_month()\n print(\n f\"1 EUR = {self.eur_gbp_last_month} GBP\"\n f\"\\t(last month average rate)\"\n )\n\n self._get_btc_gbp_15min()\n print(\n f\"1 BTC = {self.btc_gbp_15min:.6f} GBP\"\n f\"\\t(BTC 15min delayed market price; GBP latest daily average rate)\"\n )", "def calculate_buy_price(price: float):\n return round(price / (1 + CONF.trade_advantage_in_percent / 100), 1)", "def _get_btc_eur_15min(self) -> None:\n with requests.get(BITCOIN_TICKER) as response:\n response.raise_for_status()\n json_data = response.json()\n\n self.btc_eur_15min = json_data[\"EUR\"][\"15m\"]", "def buying_price(self):\n buy_price = self.standard_init_price()\n # Special status and resources price adaptation\n if self.planet.status in [self.tradeitem.dps]:\n buy_price = (buy_price * 5) / 3\n\n elif self.planet.special in [self.tradeitem.cr]:\n buy_price = (buy_price * 3) / 4\n\n elif self.planet.special in [self.tradeitem.er]:\n buy_price = (buy_price * 4) / 3\n\n # randomize a bit\n moins = random.randrange(self.tradeitem.var)\n plus = random.randrange(self.tradeitem.var)\n buy_price = buy_price - moins + plus\n\n # price can't be negative\n if buy_price < 0:\n buy_price = 0\n\n return int(buy_price)", "def BuyingPrice(self):\n return self.buying_rice", "def target_buy_price(self):\n if self.period_tick == 0:\n return random.randint(1, 10)\n elif self.period_tick % self.perseverance == 0:\n # Player runs out of patience and decides to change target price.\n (avg_price,\n max_price,\n min_price) = self.market.get_stock_price_last_period()\n\n power = self.period_tick / self.perseverance\n target_price = min(min_price + power, self.money_balance * 0.5)\n return target_price\n else:\n return None", "def get_price():\n return uniform(1.0, 350.0)", "def CalculateTimeFrameGasEneregyCost(self, dth:float, dollarsPerDTH = 6.53535):\n\t\treturn dth * dollarsPerDTH", "def buy_cost(self):\n return self._manager.get_buy_price(self.name)", "def price(temp):\n now = datetime.datetime.now()\n r = requests.get(\"https://bitcoin.co.th/\")\n soup = BeautifulSoup(r.content, \"html.parser\")\n data = soup.find_all(\"div\", {\"class\": \"price\"})\n print(\"[%02i:%02i:%02i] Now BTC Price : \" % (now.hour, now.minute, now.second), end=\"\")\n for i in range(len(data)):\n price = (data[i].text)\n print(price)\n if price != temp: # Price Change\n line_sent(price)\n temp = price\n time.sleep(30) # Delay 30 second\n main(temp) # call function main for loop", "def field_buy(self, symbol):\r\n\r\n end_percent = 150\r\n current_price = 15#self.get_price()\r\n self.log(current_price)\r\n buys = {}\r\n new_price = current_price * 1.05\r\n while (new_price / current_price) > 150:\r\n self.log(\"New sell at: {}\".format(new_price))\r\n new_price *= 1.05\r\n\r\n self.log(buys)\r\n\r\n return buys", "def buy_one_cent_less_than_bid_or_50(self, bid_price):\n if bid_price:\n buying_price = self.buy_fixed_quantity_less_than_bid_price(\n bid_price=bid_price,\n fixed_quantity=0.01)\n else:\n buying_price = self.buy_fixed_price(50)\n return buying_price", "def get_sp500():\n sp500 = si.get_live_price(\"^GSPC\")\n sp500_trim = \"%.2f\" % sp500\n\n _time = datetime.datetime.now().timetuple()\n _time = time.mktime(tuple(_time))\n _time_label = f\"test\"\n\n return float(sp500_trim), int(_time)", "def usdToBtc(dollar, bitcoin):\n global btc\n global usd\n if usd>dollar:\n usd-=dollar\n btc+=bitcoin\n return True\n return False", "def compute_time_price(supplier_with_transaction):\n supplier_item = supplier_with_transaction.get('supplier_detail')\n transaction_item = supplier_with_transaction.get('supplier_transaction')\n # Check if there is time prices or not\n if supplier_with_transaction.get('time_price'):\n # Check if we will compute in complex or simple\n if not supplier_item.get('has_complex_minute_price'):\n # start to calculate the simple version for time price\n charging_start = transaction_item.get('charging_start')\n charging_end = transaction_item.get('charging_end')\n if charging_start and charging_end:\n charging_start_obj = datetime.strptime(charging_start, '%Y-%m-%dT%H:%M:%S')\n charging_end_obj = datetime.strptime(charging_end, '%Y-%m-%dT%H:%M:%S')\n duration_in_minutes = (charging_end_obj - charging_start_obj).total_seconds() / 60\n # Check for min duration\n if supplier_item.get('min_duration') and duration_in_minutes < supplier_item.get('min_duration'):\n duration_in_minutes = supplier_item.get('min_duration')\n price = supplier_item.get('simple_minute_price')\n total_price = price * duration_in_minutes\n return total_price\n else:\n # start calculate the complex version for time price\n total_price = 0\n if supplier_item.get('interval') == 'start':\n for start_rec in supplier_item.get('time_price'):\n timeframe = start_rec.get('billing_each_timeframe') * 60\n if start_rec.get('hour_from', 0) > start_rec.get('hour_to', 0):\n duration = (start_rec.get('hour_to') - start_rec.get('hour_from')) * 60\n else:\n duration = (start_rec.get('hour_to') - (24 - start_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration + duration_after_timeframe\n total_price += total_duration * start_rec.get('minute_price')\n else:\n for end_rec in supplier_item.get('time_price'):\n timeframe = end_rec.get('billing_each_timeframe') * 60\n if end_rec.get('hour_from', 0) > end_rec.get('hour_to', 0):\n duration = (end_rec.get('hour_to') - end_rec.get('hour_from')) * 60\n else:\n duration = (end_rec.get('hour_to') - (24 - end_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration - (timeframe - duration_after_timeframe)\n total_price += total_duration * end_rec.get('minute_price')\n\n return total_price\n else:\n total_price = 0\n return total_price", "def get_base_price(self):\n base_price = random.randint(5,9)\n print(base_price)\n\n # see if the order was placed during rush hour\n now = datetime.datetime.now()\n\n dow = now.weekday() # Mon is 0, Sun is 6\n hour = now.hour\n\n if hour >= 8 and hour < 11 and dow >= 0 and dow < 5:\n base_price += 4\n\n return base_price", "def purchase_price(self):\n if self.sold_on is None:\n return 0.0 # Not yet sold\n return 10000 - (.10 * self.miles)", "def gbm(price: float,\n mu: float,\n sigma: float,\n dt: float,\n n: int) -> np.array:\n y = np.exp((mu - sigma ** 2 / 2) * dt + sigma * np.random.normal(0, np.sqrt(dt), size=n).T)\n y = price * y.cumprod(axis=0)\n return y", "def standard_init_price(self):\n # If a system can't use something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tu and _good.name not in 'fuel':\n base_price = 0\n else:\n base_price = _good.plt + (self.planet.tech_level * _good.pi)\n # if good is highly requested, increase the price\n if self.planet.status in [_good.dps]:\n base_price = base_price + (base_price * 0.5)\n # large system: high production decreases prices\n base_price = (base_price * (100 - self.planet.system_size)) / 100\n\n # price can't be negative\n if base_price < 0:\n base_price = 0\n\n return int(base_price)", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def price(self) -> float:\n return self.close", "def tick(price, tick_size=0.05):\n return round(price / tick_size)*tick_size", "def INVITE_COST(sent, isNonProfit=False):\n cost = 0\n if sent > 100:\n cost = 500 # $5\n if sent > 500:\n cost = 1000 # $10\n if sent > 1000:\n cost = 1500 # $15\n if sent > 2000:\n cost = 2000 # $20\n if sent > 10000:\n cost = 2500 # $25\n if isNonProfit:\n cost = cost * .75\n return int(round(cost))", "def get_base_price(self):\n\n price = randint(5, 9)\n\n now = datetime.now()\n weekday = now.weekday()\n hour = now.hour\n\n if weekday < 5 and 7 < hour < 12:\n price = price + 4\n\n return price", "def buy_and_pay(self):\n return self.price", "def buy_fixed_price(self, buying_price):\n\n print(f\"Ingresando orden a ${buying_price:,.2f}\".replace('.', ','))\n pyRofex.send_order(\n ticker=self.symbol,\n side=pyRofex.Side.BUY,\n price=buying_price,\n size=1,\n order_type=pyRofex.OrderType.LIMIT\n )\n return buying_price", "def _get_eur_gbp_last_daily(self) -> None:\n data = _get_ecb_data(FREQUENCY_DAILY, _ten_days_ago(), _today())\n\n self.eur_gbp_last_day = _get_latest_ecb_rate(data)", "def trading_cost(self) -> float:\n return self.__trading_cost", "def get_used_balance():\n try:\n if CONF.exchange == 'bitmex':\n position = EXCHANGE.private_get_position()\n if not position:\n return None\n return position[0]['currentQty']\n if CONF.exchange == 'kraken':\n result = EXCHANGE.private_post_tradebalance()['result']\n return round(float(result['e']) - float(result['mf']))\n if CONF.exchange == 'liquid':\n return round(get_crypto_balance()['used'] * get_current_price())\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n get_used_balance()", "def calculate_sell_price(price: float):\n return round(price * (1 + CONF.trade_advantage_in_percent / 100), 1)" ]
[ "0.6735869", "0.6266725", "0.61613244", "0.6135455", "0.6012119", "0.5998848", "0.5924012", "0.5846881", "0.58299065", "0.58050394", "0.5770253", "0.5769695", "0.576529", "0.5724111", "0.57105625", "0.5661389", "0.563068", "0.56113887", "0.560933", "0.5585725", "0.5583436", "0.5555005", "0.5533116", "0.55289125", "0.55273867", "0.5526039", "0.5513316", "0.5493882", "0.54899186", "0.5470674" ]
0.7985306
0
Instantiate and run the worker.
def main() -> None: worker = Worker() worker.do_work()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_and_run_worker(self):\n\n # Run processing on QThread worker - prevents GUI lock up\n # Create processing object, map control data\n processing_hub = ProcessingHub(control=self.control)\n\n # Create worker thread, connect signals to methods in this class and start, which calls worker.run()\n self.worker = ProcessingWorker(processing_hub, parent=self)\n self.worker.signal_screening_output_to_gui.connect(self.set_screening_output_to_gui)\n self.worker.signal_error.connect(self.error)\n self.worker.start()", "def start(self):\n if not self._worker:\n # the worker might be already created in case of deserialization\n self._worker = APIWorker(self.queue)\n self._worker.start()", "def worker(ctx_obj):\n execute(start_worker_command(settings=ctx_obj['settings']))", "def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()", "def run(self, worker, evaluator=None):\n pass", "def worker(self, **options):\n pass", "def run(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)\n tasks = []\n self.threads[0] = Worker(self, 0, self.options, self.logger,\n self.queue, self.storage, self.parser, self.addToQueue, role=1)\n tasks.append(self.threads[0].begin())\n for thread in range(1, self.max_threads):\n # Spawn and start the threads\n self.threads[thread] = Worker(self, thread, self.options, self.logger,\n self.queue, self.storage, self.parser, self.addToQueue)\n tasks.append(self.threads[thread].begin())\n self.loop.run_until_complete(asyncio.gather(*tasks))", "def run(self):\n self.submit()\n self.start()", "def _worker(self, args):\n pass", "def create(self):\n return self.start()", "def __init__(self, run, expname):\n logger.debug('Initializing worker {}.'.format(rank))\n self.run = int(run)\n self.expname = expname\n bcast_var = None\n dsname = comm.bcast(bcast_var, root=0)\n print(dsname)\n \n print('********** Start setup.')\n t0 = time.time()\n self.dsIdx = psana.DataSource(str(dsname))\n logger.info('********** Datasource on rank {}: {}s'.format(rank, time.time()-t0))\n self.dsIdxRun = next(self.dsIdx.runs())\n self.parse_detectors()\n logger.info('Rank {} has datasource and detectors.'.format(rank))\n print('********** Setup on rank {}: {}s'.format(rank, time.time()-t0))\n return", "def _run(self):\n self._worker = _stash.runtime.run(\n input_=self.cmd,\n final_ins=self._sp_stdin,\n final_outs=self._sp_stdout,\n final_errs=self._sp_stderr,\n add_to_history=None,\n persistent_level=2,\n is_background=False,\n cwd=self._cwd,\n environ=self._environ\n )\n self.pid = self._worker.job_id", "def run(self):\n self.run()", "def start(self):\r\n thread = threading.Thread(target=self.run)\r\n try:\r\n thread.start()\r\n except RuntimeError as e:\r\n raise SchedulerError(f\"Failed to start worker '{self.WORKER_ID}': \" + str(e))", "def run(self):\n run_simple(self.hostname, self.port, self.dispatch,\n use_reloader=self.debug)", "def create_worker(context=None):\n return BasicWorker(context)", "def run(self):\n self.log.info(\"Starting thread: \" + self.name)\n self.object__ = self.run_process(self.object__, self.args)", "def run(self):\n self.initialize()\n\n self.engine = setup_db_connection(driver=\"Fake\")\n self.logger = multiprocessing.get_logger()\n self.logger.handlers[0] = setup_logging()\n\n self.logger.debug(\"\\n\\n\")\n self.logger.debug(f'Spawning Worker')\n self.logger.debug(\"\\n\\n\")\n\n self.time_start_process = time.time()\n self.time_start_cycle = time.time()\n\n # -------------------------------\n # Start Processing Data\n\n\n data_unprocessed = self.get_data_from_queue()\n\n df = pd.DataFrame()\n\n df = self.process_data(data_unprocessed)\n\n if not df.empty:\n self.insert_data_into_database(df)\n\n # -------------------------------\n\n self.check_status(\"COMPLETED\")\n return", "def __init__(self, worker):\n self._worker = worker\n self._jobs = Queue()\n self._results, self._errors = [], []\n self._jobfinished = Condition()", "def getWorker(self):\n pass", "def run_worker(self):\n\n # exec(open('restarter.py').read())\n # sys.exit()\n self.update_session_state()\n currentTime = QTime().currentTime()\n fromTime = QTime(int(self.settings.TECHFROMHOUR), int(self.settings.TECHFROMMIN))\n toTime = QTime(int(self.settings.TECHTOHOUR), int(self.settings.TECHTOMIN))\n sessionState = self.lblMarket.text()\n\n if fromTime < currentTime < toTime:\n print(\"Worker skept-Technical break : \", fromTime.toString(\"hh:mm\"), \" to \", toTime.toString(\"hh:mm\"))\n self.update_console(\"Technical break untill \" + toTime.toString(\"hh:mm\"))\n\n else:\n self.update_console(\"Starting Worker- UI Paused\")\n self.uiTimer.stop() # to not cause an errors when lists will be resetted\n worker = Worker(\n self.ibkrworker.process_positions_candidates) # Any other args, kwargs are passed to the run function\n worker.signals.result.connect(self.update_ui)\n worker.signals.status.connect(self.update_status)\n worker.signals.notification.connect(self.update_console)\n # Execute\n self.threadpool.start(worker)", "def run (self):\n t = threading.Thread(target=self.runController)\n t.start()", "def run(self):\n self.connect()\n self.run_forever()", "def run(self):\n self.thread = threading.Thread(target=self._main)\n self.thread.start()\n self.running = True", "def run_worker():\n listen = ['default']\n conn = Redis(host=app.config['RQ_DEFAULT_HOST'],\n port=app.config['RQ_DEFAULT_PORT'],\n db=0,\n password=app.config['RQ_DEFAULT_PASSWORD'])\n\n with Connection(conn):\n worker = Worker(map(Queue, listen))\n worker.work()", "def run(self):\n self.class_inst_obj.processor(self.msg)", "def run(self):\n self.started()", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass" ]
[ "0.7279886", "0.7101314", "0.70331264", "0.7012368", "0.70091134", "0.69631004", "0.6924996", "0.68743694", "0.6871539", "0.6871292", "0.68372667", "0.6789902", "0.67830294", "0.6669269", "0.6668853", "0.6661168", "0.6640133", "0.6624162", "0.6593175", "0.6540589", "0.6489322", "0.6478791", "0.64734346", "0.64396524", "0.6408806", "0.6343388", "0.6324077", "0.63200325", "0.63200325", "0.63200325" ]
0.7226993
1
Return the cosmology that is being used
def get_cosmology(cosmology=conf.cosmology): if cosmology.lower() not in available_cosmologies: raise ValueError( "Unrecognised cosmology {}. Available cosmologies are {}".format( cosmology, ", ".join(available_cosmologies) ) ) elif cosmology.lower() in _astropy_cosmologies: ind = [ num for num, name in enumerate(_astropy_cosmologies) if name == cosmology.lower() ][0] return getattr(cosmo, list(parameters.available)[ind]) elif cosmology.lower() == "planck15_lal": return Planck15_lal_cosmology() elif "_with_riess2019_h0" in cosmology.lower(): base_cosmology = cosmology.lower().split("_with_riess2019_h0")[0] return Riess2019_H0_cosmology(base_cosmology)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_cos_dscp(self):\n return self.__cos_dscp", "def set_cosmology(self, cosmo):\n self.cosmo = cosmo\n self.h70 = cosmo['h'] # Hubble parameter, H0 = 100h km/s/Mpc\n self.Om = cosmo['omega_M_0'] # Omega_matter\n self.Ol = cosmo['omega_lambda_0'] # Omega_Lambda", "def _get_cosmo(self, *args):\n with open(self.filename) as f:\n for l in f:\n if l.startswith(\"#a\"):\n self.cosmo_a = float(l.split('=')[-1])\n if l.startswith(\"#O\"):\n self.cosmo_h = float(l.split(';')[-1].split('=')[-1])", "def get_coml_s(hyplo):\r\n\tres=\"\"\r\n\tfor x in hyplo:\r\n\t\tif x==\"1\":\r\n\t\t\tres+=\"0\"\r\n\t\telse:\r\n\t\t \tres+=\"1\"\r\n\treturn res", "def _get_dscp_cos(self):\n return self.__dscp_cos", "def get_cosmology_from_name(cosmology):\n\n # This list should be updated when astropy releases the Planck18 cosmology\n available_cosmologies = {\n \"WMAP5\": acosmo.WMAP5,\n \"WMAP7\": acosmo.WMAP7,\n \"WMAP9\": acosmo.WMAP9,\n \"Planck13\": acosmo.Planck13,\n \"Planck15\": acosmo.Planck15,\n }\n\n # If the user uses a string for the cosmology look it up in the dict.\n # If they specify a cosmology class, use that instead.\n if isinstance(cosmology, str):\n if cosmology in available_cosmologies.keys():\n cosmo = available_cosmologies[cosmology]\n else:\n msg = (f\"\"\"The cosmology '{cosmology}' is not in the list of\n available cosmologies with string keywords. The list\n if available cosmologies accessable via keyword are:\n {available_cosmologies.keys()}\"\"\")\n raise ValueError(msg)\n\n elif isinstance(cosmology, acosmo.core.FLRW):\n cosmo = cosmology\n\n return cosmo", "def Seljak04_Cosmo(self,dc,nu):\n mass_non_linear = (np.argmin((self.sigmaM-dc)**2.).to(self.Msunh)).value\n Mh = (self.M.to(self.Msunh)).value\n x = Mh/self.mass_non_linear\n if len(self.bias_par.keys()) == 0:\n a = 0.53\n b = 0.39\n c = 0.45\n d = 0.13\n e = 40.\n f = 5e-4\n g = 1.5\n a1 = 0.4\n a2 = 0.3\n a3 = 0.8\n else:\n a = self.bias_par['a']\n b = self.bias_par['b']\n c = self.bias_par['c']\n d = self.bias_par['d']\n e = self.bias_par['e']\n f = self.bias_par['f']\n g = self.bias_par['g']\n a1 = self.bias_par['a1']\n a2 = self.bias_par['a2']\n a3 = self.bias_par['a3']\n if self.cosmo_code == 'camb':\n Om0m = self.camb_pars.omegam\n ns = self.cosmo_input_camb['ns']\n s8 = self.cosmo.get_sigma8_0()\n nrun = self.cosmo_input_camb['nrun']\n else:\n Om0m = self.cosmo.Omega0_m()\n ns = self.cosmo.n_s()\n s8 = self.cosmo.sigma8()\n try:\n nrun = self.cosmo_input_class['alpha_s']\n except:\n nrun = 0.\n return a + b*x**c + d/(e*x+1.) + f*x**g + np.log10(x)* \\\n (a1*(Om0m - 0.3 + ns - 1.) + \\\n a2*(self.s8-0.9 + self.hubble - 0.7) + a4*nrun)", "def covariates(self):\n return None", "def _get_cofm(self, num, base):\n try:\n #Use saved sightlines if we have them.\n return (self.cofm, self.axis)\n except AttributeError:\n #Otherwise get sightlines at random positions\n #Re-seed for repeatability\n np.random.seed(23)\n box = _get_header_attr_from_snap(\"BoxSize\", num, base)\n #All through y axis\n axis = np.ones(self.NumLos)\n cofm = box*np.random.random_sample((self.NumLos,3))\n return cofm, axis", "def get_cosmo(fname):\n grep_cosmo = Popen(['grep', '^#Omega_', str(fname)], stdout=PIPE)\n grep_box = Popen(['grep', '^#Full box', str(fname)], stdout=PIPE)\n cosmo_str = (grep_cosmo.communicate()[0]\n .decode(\"utf-8\")\n .strip(\"#\\n\")\n .split(\"; \"))\n box_str = (grep_box.communicate()[0]\n .decode(\"utf-8\")\n .strip(\"#\\n\")\n .split(\" = \"))\n cosmo = {i.split(' = ')[0]: float(i.split(' = ')[1]) for i in cosmo_str}\n cosmo['Box_size_Mpc/h'] = float(box_str[1].split()[0])\n return cosmo", "def get_coulomb_info(self):\n return", "def Planck15_lal_cosmology():\n return cosmo.LambdaCDM(H0=67.90, Om0=0.3065, Ode0=0.6935)", "def nonflatcosmo(self):\n return LambdaCDM(70, 0.4, 0.8)", "def CIS(self):\n return self.get_class_average(self.CIS_class_level)", "def set_cosmo(self,astropycosmo):\n if \"astropy\" not in astropycosmo.__module__:\n raise ValueError(\"'astropycosmo' must be an astropy cosmology object\")\n \n self._side_properties[\"cosmology\"] = astropycosmo\n self._update_distance_()", "def getStoichiometryMath(self, *args):\n return _libsbml.SpeciesReference_getStoichiometryMath(self, *args)", "def init_physical(\n ombh2=0.022161, omch2=0.11889, H0=67.77, omkh2=0.0, t0=2.726, nnu=3.046\n ):\n h = H0 / 100.0\n\n c = Cosmology()\n\n c.omega_b = ombh2 / h ** 2\n c.omega_c = omch2 / h ** 2\n c.H0 = H0\n\n rhoc = 3.0 * c.H() ** 2 * c_sl ** 2 / (8.0 * math.pi * G_n)\n rhorad = a_rad * t0 ** 4\n c.omega_g = rhorad / rhoc\n\n rhonu = nnu * rhorad * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)\n c.omega_n = rhonu / rhoc\n\n c.omega_l = 1.0 - (omkh2 + ombh2 + omch2) / h ** 2 - (c.omega_g + c.omega_n)\n\n return c", "def createStoichiometryMath(self):\n return _libsbml.SpeciesReference_createStoichiometryMath(self)", "def cole_coeff(self):\n return self.diseq_coeff(standardize=True)", "def get_cosin_sim(question, contexts):\r\n cos_sim_for_question = []\r\n for context in contexts :\r\n cv = CountVectorizer(stop_words=MY_STOPWORDS, lowercase=False)\r\n matrix = cv.fit_transform(pd.DataFrame([question, context])[0]).toarray()\r\n cos_sim = dot(matrix[0], matrix[1])/(norm(matrix[0])*norm(matrix[1]))\r\n cos_sim_for_question.append(cos_sim)\r\n return pd.Series(cos_sim_for_question)", "def matthewscc(self):\n if not self.total_examples:\n return 0.\n\n true_pos = float(self.true_positives)\n false_pos = float(self.false_positives)\n false_neg = float(self.false_negatives)\n true_neg = float(self.true_negatives)\n terms = [(true_pos + false_pos),\n (true_pos + false_neg),\n (true_neg + false_pos),\n (true_neg + false_neg)]\n denom = 1.\n for t in filter(lambda t: t != 0., terms):\n denom *= t\n return ((true_pos * true_neg) - (false_pos * false_neg)) / math.sqrt(denom)", "def _get_traffic_class_cos(self):\n return self.__traffic_class_cos", "def mychem_info(self):\n return self._mychem_info", "def calc_coherence(model, corpus):\n cm = CoherenceModel(model=model, corpus=corpus, coherence='u_mass')\n coherence = cm.get_coherence()\n print(timestamp(),\"Topic coherence:\", coherence)", "def concentrations(self):\n return self.quantities/self.volume", "def cosmo(self):\n return self.cls(*self.cls_args, **self.cls_kwargs)", "def coherence(self):\r\n\r\n #XXX Calculate this from the standard output, instead of recalculating\r\n #the coherence:\r\n\r\n tseries_length = self.input.data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n coherence = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length))\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n coherence[i][j] = tsa.coherence_spec(self.spectrum[i][j],\r\n self.spectrum[i][i],\r\n self.spectrum[j][j])\r\n\r\n idx = tril_indices(tseries_length, -1)\r\n coherence[idx[0], idx[1], ...] = coherence[idx[1], idx[0], ...].conj()\r\n\r\n return coherence", "def getStoichiometry(self):\n return _libsbml.SpeciesReference_getStoichiometry(self)", "def from_mypackage(mycosmo):\n # Cosmology provides a nice method \"mapping\", so all that needs to\n # be done here is create a dictionary of the parameters\n mapping = {}\n mapping[\"H0\"] = mycosmo.hubble_parameter\n mapping[\"Om0\"] = mycosmo.Omega_matter_initial\n ... # keep building mapping\n\n return Cosmology.from_format(\n mapping, format=\"mapping\", move_to_meta=True\n ) # extra info -> meta", "def getMath(self):\n return _libsbml.StoichiometryMath_getMath(self)" ]
[ "0.6441433", "0.64167005", "0.6164821", "0.6161581", "0.6125467", "0.60074854", "0.59982276", "0.5963294", "0.5915752", "0.58894485", "0.5884741", "0.57696545", "0.5693378", "0.5673406", "0.5641914", "0.56051314", "0.56012136", "0.55621797", "0.55592734", "0.55408865", "0.5471717", "0.544486", "0.54382503", "0.5423644", "0.5420825", "0.5414656", "0.54142207", "0.5402984", "0.53972864", "0.5392122" ]
0.7251539
0
Return the Planck15 cosmology coded up in lalsuite
def Planck15_lal_cosmology(): return cosmo.LambdaCDM(H0=67.90, Om0=0.3065, Ode0=0.6935)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cie_lab(self):\n K = Fraction(1, 3) * Fraction(29, 6) ** 2\n e = Fraction(6, 29) ** 3\n x, y, z = (n / m for n, m in zip(self.cie_xyz, D65))\n fx, fy, fz = (\n n ** Fraction(1, 3) if n > e else K * n + Fraction(4, 29)\n for n in (x, y, z)\n )\n return (116 * fy - 16, 500 * (fx - fy), 200 * (fy - fz))", "def get_cosmology(cosmology=conf.cosmology):\n if cosmology.lower() not in available_cosmologies:\n raise ValueError(\n \"Unrecognised cosmology {}. Available cosmologies are {}\".format(\n cosmology, \", \".join(available_cosmologies)\n )\n )\n elif cosmology.lower() in _astropy_cosmologies:\n ind = [\n num for num, name in enumerate(_astropy_cosmologies) if\n name == cosmology.lower()\n ][0]\n return getattr(cosmo, list(parameters.available)[ind])\n elif cosmology.lower() == \"planck15_lal\":\n return Planck15_lal_cosmology()\n elif \"_with_riess2019_h0\" in cosmology.lower():\n base_cosmology = cosmology.lower().split(\"_with_riess2019_h0\")[0]\n return Riess2019_H0_cosmology(base_cosmology)", "def Problem11():\n return 'Ductile Coulomb-Mohr'", "def get_coml_s(hyplo):\r\n\tres=\"\"\r\n\tfor x in hyplo:\r\n\t\tif x==\"1\":\r\n\t\t\tres+=\"0\"\r\n\t\telse:\r\n\t\t \tres+=\"1\"\r\n\treturn res", "def printPolyCoeffs(lam) :\n ell = len(lam)\n useFormat = \"2.6e\"\n count = 0\n def printLine(s, count) :\n if lam[count] < 0 :\n s = s + 3 * \" \"\n else :\n s = s + 4 * \" \"\n s = s + \"{0:\" + useFormat + \"}\"\n print(s . format(lam[count]))\n count = count + 1\n return count\n if ell >= 1 :\n count = printLine(\"x0y0\", count)\n if ell >= 3 :\n count = printLine(\"x1y0\", count)\n count = printLine(\"x0y1\", count)\n if ell >= 6 :\n count = printLine(\"x2y0\", count)\n count = printLine(\"x1y1\", count)\n count = printLine(\"x0y2\", count)\n if ell >= 10 :\n count = printLine(\"x3y0\", count)\n count = printLine(\"x2y1\", count)\n count = printLine(\"x1y2\", count)\n count = printLine(\"x0y3\", count)\n if ell >= 15 :\n count = printLine(\"x4y0\", count)\n count = printLine(\"x3y1\", count)\n count = printLine(\"x2y2\", count)\n count = printLine(\"x1y3\", count)\n count = printLine(\"x0y4\", count)\n if ell >= 21 :\n count = printLine(\"x5y0\", count)\n count = printLine(\"x4y1\", count)\n count = printLine(\"x3y2\", count)\n count = printLine(\"x2y3\", count)\n count = printLine(\"x1y4\", count)\n count = printLine(\"x0y5\", count)\n if ell >= 28 :\n count = printLine(\"x6y0\", count)\n count = printLine(\"x5y1\", count)\n count = printLine(\"x4y2\", count)\n count = printLine(\"x3y3\", count)\n count = printLine(\"x2y4\", count)\n count = printLine(\"x1y5\", count)\n count = printLine(\"x0y6\", count)\n if ell >= 36 :\n count = printLine(\"x7y0\", count)\n count = printLine(\"x6y1\", count)\n count = printLine(\"x5y2\", count)\n count = printLine(\"x4y3\", count)\n count = printLine(\"x3y4\", count)\n count = printLine(\"x2y5\", count)\n count = printLine(\"x1y6\", count)\n count = printLine(\"x0y7\", count)\n if (ell > 36) or (ell < 1) :\n raise ValueError(\"Polynomial degree less than or equal to 7, please.\")", "def bmad_linac_phasing_lines(epics):\n lines = [\n '! Linac overall phasing',\n 'O_L1[phase_deg] = 0 ! K21_1 sets this directly. This is a delta on top of that.', \n 'O_L2[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:CALC204')),\n 'O_L3[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:AO499'))\n ]\n return lines", "def get_answer(self, syl):\n \n if (self.figure_counter[syl[2]] != 0) and (np.random.random() < self.figure_nvc[syl[2]] / self.figure_counter[syl[2]]):\n answer = \"NVC\"\n else:\n rep = self.get_representation(syl)\n concl_quant = self.determine_quantifier(rep)\n quant = list(self.representation.keys())[list(self.representation.values()).index(concl_quant)]\n if self.figure_concl[syl[2]] > 0:\n order = \"ac\"\n elif self.figure_concl[syl[2]] < 0:\n order = \"ca\"\n else:\n order = np.random.choice([\"ac\", \"ca\"])\n answer = quant + order\n \n return answer", "def license_plate(self) -> str:\n temp = re.sub(\n r\"\\?\",\n lambda x: self.random_element(self.ascii_uppercase_azerbaijan),\n self.random_element(self.license_formats),\n )\n temp = temp.replace(\"##\", self.random_element(self.license_plate_initial_numbers), 1)\n # temp = temp.format(self.random_element(range(1, 999)))\n return self.numerify(temp)", "def css(ax, col, legend):\n d = Planck15.luminosity_distance(z=0.034).cgs.value\n\n # low frequency\n nu = 6E9\n\n # add the points from Deanne's paper\n x = np.array([69, 99, 162, 357])\n y = np.array([4.5, 6.1, 2.3, 0.07])*nu\n lum = plot_line(\n ax, d, x, y,\n 'AT2018cow', None, col, legend, zorder=10)\n print(lum)\n ax.text(x[0], lum[0]*1.1, 'CSS161010', fontsize=11,\n verticalalignment='bottom',\n horizontalalignment='right')", "def calico_kitty():\n return __cat_whisperer(colors=True, coat='calico_colorz', logo_colorz='logo_colorz')[Cat.ASS]", "def chelsea():\n from skimage import data\n\n return data.chelsea()", "def showCl(ell,temps,title='CAMB ISWout power spectrum'):\n plt.plot(ell,temps*ell*(ell+1)/(2*np.pi) *1e12) #1e12 to convert to microK**2\n plt.xlabel('multipole moment l')\n plt.ylabel('l(l+1)C_l/(2pi) [microK**2]')\n plt.title(title)\n plt.show()", "def lookup_Pk(cosmology='planck',nonlinear=0):\n\n # k in h/Mpc\n k = N.logspace(-4., 3., 3*1024)\n\n if nonlinear==1:\n hf = 'halofit'\n saveto = 'data_itam/'+cosmology+'_pk.txt'\n\n else:\n hf = ''\n saveto = 'data_itam/'+cosmology+'_pk_linear.txt'\n\n if cosmology == 'planck':\n class_params = {\n 'non linear': hf,\n 'output': ['mPk','vTk'],\n 'P_k_max_1/Mpc': 1000.,\n 'z_pk': 0.,\n 'A_s': 2.3e-9,\n 'n_s': 0.96,\n 'h': 0.7,\n 'omega_b': 0.0225,\n 'Omega_cdm': 0.25,\n }\n sig8_0 = 0.8\n\n\n elif cosmology == 'wmap':\n class_params = {\n 'non linear': hf,\n 'output': ['mPk','vTk'],\n 'P_k_max_1/Mpc': 1000.,\n 'z_pk': 0.,\n 'A_s': 2.3e-9,\n 'n_s': 0.967,\n 'h': 0.704,\n 'omega_b': 0.02253,\n 'Omega_cdm': 0.226,\n }\n sig8_0 = 0.81\n\n\n elif cosmology == 'ML':\n class_params = {\n 'non linear': hf,\n 'output': ['mPk','vTk'],\n 'P_k_max_1/Mpc': 1000.,\n 'z_pk': 0.,\n 'A_s': 2.3e-9,\n 'n_s': 1.,\n 'h': 0.73,\n 'omega_b': 0.045*0.73**2,\n 'Omega_cdm': 0.25-0.045,\n }\n sig8_0 = 0.9\n\n else:\n raise ValueError(\"the cosmology you chose does not exist\")\n\n cosmoClass_nl = Class()\n cosmoClass_nl.set(class_params)\n cosmoClass_nl.compute()\n\n # rescale the normalization of matter power spectrum to have sig8=0.8 today\n sig8 = cosmoClass_nl.sigma8()\n A_s = cosmoClass_nl.pars['A_s']\n cosmoClass_nl.struct_cleanup() # does not clean the input class_params, cosmo.empty() does that\n cosmoClass_nl.set(A_s=A_s*(sig8_0*1./sig8)**2)\n cosmoClass_nl.compute()\n\n h = cosmoClass_nl.pars['h']\n pk_nl = N.asarray([ cosmoClass_nl.pk(x*h, 0.,)*h**3 for x in k ])\n\n kpk = N.vstack((k,pk_nl))\n \n N.savetxt(saveto,kpk)\n print('saving', saveto )\n return", "def at2018cow(ax, col, legend):\n d = Planck15.luminosity_distance(z=0.014).cgs.value\n\n # high frequency\n a, b, c = sma_lc()\n dt, f, ef = b\n ef_comb = np.sqrt(ef**2 + (0.15*f)**2)\n nu = 231.5E9\n\n # low frequency\n nu = 9E9\n data_dir = \"/Users/annaho/Dropbox/Projects/Research/AT2018cow/data\"\n dat = Table.read(\n \"%s/radio_lc.dat\" %data_dir, delimiter=\"&\",\n format='ascii.no_header')\n tel = np.array(dat['col2'])\n choose = np.logical_or(tel == 'SMA', tel == 'ATCA')\n\n days = np.array(dat['col1'][choose])\n freq = np.array(dat['col3'][choose]).astype(float)\n flux_raw = np.array(dat['col4'][choose])\n flux = np.array(\n [float(val.split(\"pm\")[0][1:]) for val in flux_raw])\n eflux_sys = np.array([0.1*f for f in flux])\n eflux_form = np.array(\n [float(val.split(\"pm\")[1][0:-1]) for val in flux_raw])\n eflux = np.sqrt(eflux_sys**2 + eflux_form**2)\n choose = freq == 9\n\n # add the Margutti point and the Bietenholz point\n margutti_x = np.array([84,287])\n margutti_y = np.array([6E28, 3.2E26])/(4*np.pi*d**2)/1E-23/1E-3\n x = np.hstack((days[choose], margutti_x))\n y = np.hstack((flux[choose], margutti_y)) * nu\n lum = plot_line(\n ax, d, x, y,\n 'AT2018cow', None, col, legend, zorder=10)\n ax.text(x[0], lum[0]/1.4, 'AT2018cow', fontsize=11,\n verticalalignment='top',\n horizontalalignment='center')", "def generate_l1ca_codes(self, prn):\n output_taps = self.l1_code_phase_assignments.loc[prn, 'CA_Phase_Select']\n g1 = self.generate_mls(10, self.g1_feedback_taps, [10])\n g2 = self.generate_mls(10, self.g2_feedback_taps, output_taps)\n ca_code = []\n for index, bit in enumerate(g1):\n ca_code.append(int((bit + g2[index]) % 2))\n return ca_code", "def cvpr2018_labels():\n\n return {\n 0: 'others',\n 33: 'car',\n 34: 'motorcycle',\n 35: 'bicycle',\n 36: 'pedestrian',\n 38: 'truck',\n 39: 'bus',\n 40: 'tricycle'\n }", "def generate_symbole(figure_name = \"canon\"):\n if figure_name == \"planeur\": #PLANNEUR\n planneur = np.zeros((3, 3))\n planneur[1, 0] = 1\n planneur[0, 1] = 1\n planneur[0, 2] = 1\n planneur[1, 2] = 1\n planneur[2, 2] = 1\n return planneur\n\n elif figure_name == \"canon\": #CANON\n canon = np.zeros((36,9))\n canon[0:2,5:7] = 1\n canon[11,4:7] = 1\n canon[15:17,4:7] = 1\n canon[12,3] = 1\n canon[14,3] = 1\n canon[13,2] = 1\n canon[12,7] = 1\n canon[14,7] = 1\n canon[13,8] = 1\n canon[25,0:2] = 1\n canon[22:25,1:3] = 1\n canon[21,2:5] = 1\n canon[24,3] = 1\n canon[22:25,4:6] = 1\n canon[25,5:7] = 1\n canon[30,1:3] = 1\n canon[34:36,3:5] = 1\n return canon\n\n elif figure_name == \"blinker\": #BLINKER\n blinker = np.ones((3,1))\n return blinker\n\n elif figure_name == \"oscillator_alone\":\n osc = np.zeros((11,11))\n osc[2,2:9] = 1\n osc[8,2:9] = 1\n osc[2:9,2] = 1\n osc[2:9,8] = 1\n osc[5,2] = 0\n osc[5,8] = 0\n osc[2,5] = 0\n osc[8,5] = 0\n osc[0,5] = 1\n osc[10,5] = 1\n osc[5,0] = 1\n osc[5,10] = 1\n osc[1,4:7] = 1\n osc[9,4:7] = 1\n osc[4:7,1] = 1\n osc[4:7,9] = 1\n return osc\n\n elif figure_name == \"oscillator_one_block\":\n osc = generate_symbole(\"oscillator_alone\")\n osc[0:2,-2:] = 1\n return osc\n\n elif figure_name == \"oscillator_four_blocks\":\n osc = generate_symbole(\"oscillator_alone\")\n osc[0:2, -2:] = 1\n osc[0:2,0:2] = 1\n osc[-2:,0:2] = 1\n osc[-2:,-2:] = 1\n return osc\n\n elif figure_name == \"croix\":\n return osc\n\n elif figure_name == \"diag\":\n return osc\n\n elif figure_name == \"octogone\":\n return osc\n\n else:\n return 0", "def get_info(self):\n return \"Malayalam Stemmer(Experimental)\"", "def grb030329(ax, col, legend):\n z = 0.1686\n d = Planck15.luminosity_distance(z=z).cgs.value\n\n # LOW FREQUENCY\n\n # Berger: this is the best frequency to pick from this paper\n t = np.array(\n [0.58, 1.05, 2.65, 3.57, 4.76, 6.89, 7.68, 9.49, 11.90, \n 12.69, 14.87, 16.66, 18.72, 20.58, 25.70, 28.44, 31.51, \n 33.58, 36.52, 42.55, 44.55, 59.55, 66.53]) / (1+z)\n f = np.array(\n [3.50, 1.98, 8.50, 6.11, 9.68, 15.56, 12.55, 13.58, 17.70, \n 17.28, 19.15, 17.77, 15.92, 16.08, 15.34, 12.67, 13.55, \n 13.10, 10.64, 8.04, 8.68, 4.48, 4.92])\n nu = np.array([8.5E9]*len(f))\n\n # Van der Horst: best frequency is 2.3 GHz\n t = np.append(t, np.array([268.577, 306.753, 365.524, 420.168, 462.078, \n 583.683, 743.892, 984.163]) / (1+z))\n f = np.append(\n f, np.array([1613, 1389, 871, 933, 707, 543, 504, 318]) * 1E-3)\n nu = np.append(nu, np.array([2.3E9]*8))\n lum = plot_line(ax, d, t, nu*f, 'GRB030329', 'GRB', col, legend)\n ax.text(t[6]*1.05, lum[10]*1.05, 'GRB030329', fontsize=11,\n verticalalignment='bottom',\n horizontalalignment='left')", "def test_lightcurve_seismology_plot():\n KeplerLightCurveFile(TABBY_Q8).PDCSAP_FLUX.periodogram().plot()", "def main():\n station = \"Merikannontie\"\n coefs, score = cycling_weather_linregr(station)\n print(f\"Measuring station: {station}\")\n print(\n f\"Regression coefficient for variable 'precipitation': {coefs[0]:.1f}\")\n print(f\"Regression coefficient for variable 'snow depth': {coefs[1]:.1f}\")\n print(f\"Regression coefficient for variable 'temperature': {coefs[2]:.1f}\")\n print(f\"Score: {score:.2f}\")\n return", "def grb111209a(ax, col, legend):\n z = 0.677\n d = Planck15.luminosity_distance(z=z).cgs.value\n\n t = np.array([5.1])/(1+z)\n f = np.array([0.97])\n nu = np.array([9E9]*len(f))\n\n lum = plot_line(ax, d, t, nu*f, 'GRB111209A', 'GRB', col, legend)\n ax.text(t[0]*1.5, lum[0]*1.3, 'GRB111209A/SN2011kl', fontsize=11,\n verticalalignment='bottom',\n horizontalalignment='center')", "def info():\n return r\"\"\"Lin-Yu Tseng and Chun Chen, \"Multiple trajectory search for Large Scale Global Optimization,\" 2008 IEEE Congress on Evolutionary Computation (IEEE World Congress on Computational Intelligence), Hong Kong, 2008, pp. 3052-3059. doi: 10.1109/CEC.2008.4631210\"\"\"", "def info(self):\n txt = \"\"\"Lick Index {s.name}\n wavelength units: {s.wavelength_unit}\n Index Band: {s.band}\n Blue continuum band: {s.blue}\n Red continuum band: {s.red}\n Measurement unit: {s.index_unit}\"\"\".format(s=self)\n print(txt)", "def info(self):\n txt = \"\"\"Lick Index {s.name}\n wavelength units: {s.wavelength_unit}\n Index Band: {s.band}\n Blue continuum band: {s.blue}\n Red continuum band: {s.red}\n Measurement unit: {s.index_unit}\"\"\".format(s=self)\n print(txt)", "def cie1931cmf(wavelength):\n if wavelength < 380 or wavelength > 780:\n return [0, 0, 0]\n index=int(round((wavelength-380)/5.0))*3\n return [_CIE1931[index+i] for i in range(3)]", "def test_get_tone_from_IBM():\n comments = [\"This was a really sucky movie. I will probably never go see this movie ever again. I am going to \"\n \"tell my whole family never to watch this movie. I very much enjoyed the special cameo in it \"\n \"though. I loved the plot line.\"]\n tone_info_dictionary = get_tone_from_IBM(comments[0])\n\n tones = get_columns_from_IBM_tone(tone_info_dictionary)\n print(tones)", "def lae_nccd(year):\n html = load_campdoc_html(year)\n table = extract_main_table_from_html(html)\n data = process_main_table(table)\n return(data)", "def getcolorcodeALA15(ramapath, N, ssize=5):\n\n from analyse_ala_15 import AngleCategorizer\n\n nResidues = 15\n #angles = np.loadtxt('rama_dataset_ala_15.xvg', skiprows=32, usecols=range(0, 2), delimiter=' ')\n angles = np.loadtxt(os.path.join(ramapath, 'rama_dataset_ala_15_1500.xvg'), skiprows=32, usecols=range(0, 2), delimiter=' ')\n nSamples = angles.shape[0]/15\n angles.resize(nSamples, nResidues, 2)\n angCat = AngleCategorizer(angles)\n angCat.categorize()\n angCat.countConfigurations()\n colInd = angCat.getColorMatrix()\n alphaInd = angCat.getAlphaVals()\n\n marker = list()\n patchlist = list()\n\n marker.append('o')\n marker.append('o')\n marker.append('o')\n\n import matplotlib.patches as mpatches\n patchlist.append(mpatches.Patch(color='black', label=r'$\\alpha$'))\n patchlist.append(mpatches.Patch(color='blue', label=r'$\\beta$-1'))\n patchlist.append(mpatches.Patch(color='red', label=r'$\\beta$-2'))\n\n alpha = plt.scatter(0, 1, c='k', marker=marker[0], s=ssize, label=r'$\\alpha$')\n beta1 = plt.scatter(0, 1, c='b', marker=marker[1], s=ssize, label=r'$\\beta\\textnormal{-}1$')\n beta2 = plt.scatter(0, 1, c='r', marker=marker[2], s=ssize, label=r'$\\beta\\textnormal{-}2$')\n plt.close()\n\n patchlist = [alpha, beta1, beta2]\n\n return colInd, marker, patchlist, alphaInd", "def _repr_(self):\n return (\"%d-d CPR-Fano toric variety covered by %d affine patches\"\n % (self.dimension_relative(), self.fan().ngenerating_cones()))" ]
[ "0.55920184", "0.549476", "0.54696536", "0.54056984", "0.53960073", "0.5380349", "0.53594655", "0.53576845", "0.53418833", "0.53027004", "0.5290028", "0.5287675", "0.5284653", "0.52330744", "0.51322997", "0.50801104", "0.5078625", "0.50296295", "0.50066954", "0.5004581", "0.49985564", "0.49900883", "0.498733", "0.49734536", "0.49734536", "0.49690753", "0.49663875", "0.49662814", "0.4957113", "0.4949705" ]
0.70417845
0
Return the base cosmology but with the Riess2019 H0 value. For details
def Riess2019_H0_cosmology(base_cosmology): _base_cosmology = get_cosmology(base_cosmology) return cosmo.LambdaCDM( H0=74.03, Om0=_base_cosmology.Om0, Ode0=_base_cosmology.Ode0 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sound_horizon_EH(self):\n om_m = self.omega_cb\n om_b = self.omega_b\n om_n = np.sum(self.omega_nu)\n h = self.h \n if self.M_nu_tot == 0.: rs = 44.5*np.log(9.83/om_m)/np.sqrt(1+10*om_b**0.75)*h\n else: rs = 55.154*np.exp(-72.3*(om_n+0.0006)**2.)/(om_m**0.25351*om_b**0.12807)*h\n return rs", "def init_physical(\n ombh2=0.022161, omch2=0.11889, H0=67.77, omkh2=0.0, t0=2.726, nnu=3.046\n ):\n h = H0 / 100.0\n\n c = Cosmology()\n\n c.omega_b = ombh2 / h ** 2\n c.omega_c = omch2 / h ** 2\n c.H0 = H0\n\n rhoc = 3.0 * c.H() ** 2 * c_sl ** 2 / (8.0 * math.pi * G_n)\n rhorad = a_rad * t0 ** 4\n c.omega_g = rhorad / rhoc\n\n rhonu = nnu * rhorad * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)\n c.omega_n = rhonu / rhoc\n\n c.omega_l = 1.0 - (omkh2 + ombh2 + omch2) / h ** 2 - (c.omega_g + c.omega_n)\n\n return c", "def getP0(self):\n\t\tmyhmag.initializehelmholtz()\n\t\tabar = 13.714285714285715\n\t\tzbar = abar/2.0\n\t\tself.data[\"P0\"] = np.zeros(len(self.data[\"rho\"]))\n\t\tfor i in range(len(self.data[\"rho\"])):\n\t\t\tself.data[\"P0\"][i],energ,sound,gammaout,entropy,dummyfail = myhmag.gethelmholtzeos(1000.,self.data[\"rho\"][i],abar,zbar,True)", "def get_h0(self, t):\n return self.h0", "def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def h2o_from_rh_and_temp(RH, TEMP):\n TC = TEMP - 273.15\n frh = RH / 100.\n svp_millibar = 6.11 * 10**((7.5 * TC)/(TC+237.3))\n svp_pa = svp_millibar * 100\n vp_pa = svp_pa * frh\n molecule_per_cubic_m = vp_pa * Avogadro / R / TEMP\n molecule_per_cubic_cm = molecule_per_cubic_m * centi**3\n #print RH, TEMP, molecule_per_cubic_cm\n return molecule_per_cubic_cm", "def rhe(m):\n \n m = m*u.kg.to(u.M_sun)\n \n logr = np.full(m.shape,0)\n \n iless = np.where(m<=2.5)\n igreater = np.where(m>2.5)\n \n logr[iless] = 3.0965 - 2.013*np.log10(m[iless])\n logr[igreater] = 0.0557*(np.log10(m[igreater])-0.172)**-2.5\n return (10**logr)*u.Rsun.to(u.m)", "def ST_zero_flux(self):\n return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s**-1*cm**-2*AA**-1')", "def calculate_rh(self):\n # Check for existence of relative humidity and mixing ratio\n if self.data.get('Relative_Humidity') is None:\n if self.data.get('Mixing_Ratio') is None:\n raise KeyError('Calculate mixing ratio first!')\n else:\n # Convert mixing ratio to relative humidity\n sat_vapor = 6.11 * (10.0**((7.5 * self.data['Temperature_C']) /\n (237.7 + self.data['Temperature_C'])))\n\n sat_w = 621.97 * (sat_vapor / (self.data['Pressure'] -\n sat_vapor))\n\n self.data['Relative_Humidity'] = ((self.data['Mixing_Ratio'] /\n sat_w) * 100.0)", "def _calculate_strehl(self):\n\n self.strehl = np.exp(-1*((2*np.pi/self.science_wavelength)*self.high_order_wfe)**2)", "def MH(self):\n\n #return math.log10(self.glb[user_params_index[\"Zs\"]]*constants.solar_x/(self.glb[user_params_index[\"Xs\"]]*constants.solar_z))\n return math.log10(self.glb[iz0]*constants.solar_x/(self.glb[ix0]*constants.solar_z))", "def get_h0(self, t):\n return self.h0 * np.sin(2 * np.pi * t / self.Pmod + self.Pmod_phi)", "def get_hcore1(mol, atom, coord):\n\n mf = scf.RHF(mol)\n g = grad.rhf.Gradients(mf)\n\n hcore1 = g.hcore_generator(mol)(atom)[coord]\n\n omega = np.identity(2)\n hcore1 = np.kron(omega, hcore1)\n\n return hcore1", "def rho0_c(self, c):\n return 200./3*self.rhoc*c**3/(np.log(1+c)-c/(1+c))", "def h_P(self, z0):\n # Get the governing variables\n (B, N, u_slip, u_inf) = self.get_variables(z0, 0.)\n \n # Compute U_N\n U_N = u_slip / (B * N)**(1./4.)\n \n # Compute the correlation equation\n return 5.2 * np.exp(-(U_N - 1.8)**2 / 10.24) * (B / N**3)**(1./4.)", "def ST_zero_flux(self):\n return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s-1*cm-2*AA-1')", "def OLSV2_HA(code):\n if len(code)==6:\n if (code[0]=='6')|(code[0]=='9'):\n code='SS_'+code\n _get_index_data('000001')\n index1='SS_000001'\n else:\n code='SZ_'+code\n _get_index_data('399001')\n index1='SZ_399001'\n elif len(code)==4:\n code='HK_'+code\n index1='HK_HSI'\n else:\n print('Input Wrong code.')\n\n pre_code='YAHOO/'\n ticker=pre_code+code\n index1=pre_code+index1\n\n fn='./Quandl/'+ticker+'.csv'\n ind='./Quandl/'+index1+'.csv'\n\n df=pd.read_csv(fn,parse_dates=True,index_col=0)\n dff1=df[['Open','High','Low','Close','Volume','Adjusted Close']].copy()\n print ('Caculating the analysing 1/4 statistics:')\n print (dff1.describe(),'\\n')\n dfi=pd.read_csv(ind,parse_dates=True,index_col=0)\n df['cpct']=df['Close'].pct_change()\n df['vpct']=df['Volume'].pct_change()\n dfi['indpct']=dfi['Close'].pct_change()\n\n rets=pd.concat([df['cpct'],dfi['indpct'],df['vpct']],axis=1)\n rets=rets.dropna(how='any')\n #print (rets)\n\n X=np.array(rets.iloc[:,1:3])\n X=sm.add_constant(X)\n #print(X)\n\n Y=np.array(rets.iloc[:,0])\n\n\n #y=np.dot(X,beta)+e\n model=sm.OLS(Y,X)\n results=model.fit()\n print (results.summary())\n \n print (\"The params for the model:\",results.params)\n print (\"The std for the model:\",results.bse)\n return results", "def noyes84_rossby_activity(logRpHK):\n y = 5 + logRpHK\n logRo = 0.324 - 0.400*y - 0.283 * y**2 - 1.325 * y**3\n return 10**logRo", "def ISA_trop(h):\n\tT = 288.15 - 0.0065*h;\n\tp = 101325*(T/288.15)**(-g/(-0.0065*287));\n\trho = 1.225*(T/288.15)**(-g/(-0.0065*287) - 1);\n\ta = np.sqrt(1.4*287*T);\n\treturn T, p, rho, a;", "def MAH_Hearin_2021(halo_mass_t0, cosmic_t):\r\n\r\n #U_a_early = 2.5\r\n #U_a_early_late = 0.3\r\n #log10tau_c = 1.25\r\n\r\n k = 3.5\r\n\r\n a_late_early = 2.5-0.3 #np.log( np.power(np.e, U_a_early_late) + 1. )\r\n a_early = 2.5 #np.log( np.power(np.e, U_a_early) + 1. )\r\n tau_c = 1.25 #np.power(10., log10tau_c)\r\n alpha = a_early + a_late_early / (1. + np.exp(-k*(cosmic_t - tau_c)) )\r\n\r\n MAH = np.log10( 10.**halo_mass_t0 * np.power(cosmic_t / Cosmo.age(0), alpha) )\r\n\r\n return MAH", "def rh(self, h):\n sez=self.getSect(h)\n area=self.area(sez)\n wetborder = self.wetBorder(sez)\n return area/wetborder", "def convert_H_kcalmol(en_H):\n return en_H/kcalmol_H", "def sidm_halo_model_default(r, N0, v0, ns0, sigma0, w0, log10M200, c200):\n\n op_sigma_model = \"velocity-dependent\"\n\n Msun_in_cgs = 1.98848e33\n kpc_in_cgs = 3.08567758e21\n\n t_age = 7.5 # Gyr - assuming constant halo age\n rho0 = find_rho0(N0, t_age, v0, ns0, sigma0, w0, op_sigma_model)\n t_age_cgs = t_age * 1e9 * 365.24 * 24 * 3600 # sec\n rho0_cgs = rho0 * Msun_in_cgs / kpc_in_cgs ** 3 # g/cm^3\n\n G = 4.3e-6 # kpc km^2 Msun^-1 s^-2\n r0 = v0**2 / (4. * np.pi * G * rho0)\n r0 = np.sqrt(r0) # kpc\n\n sol = fsolve(find_r1, 20, args=(rho0_cgs, v0, ns0, t_age_cgs, sigma0, w0, op_sigma_model))\n r1 = sol[0] * r0 # kpc\n\n rho = rho_joint_profiles(r, r1, r0, rho0, ns0, log10M200, c200)\n\n log10rho = np.log10(rho)\n\n return log10rho", "def haurwitz(zenith):\n\n # GHI = 1098 * cos(z) * exp(-0.057 / cos(z))\n clearsky_ghi = 1098.0 * np.cos(np.radians(zenith)) * np.exp(-0.057 / np.cos(np.radians(zenith)))\n\n # remove negative values\n clearsky_ghi[clearsky_ghi < 0] = 0\n\n return clearsky_ghi", "def _calc_Hc(self, signal):\n\n return 2.8 * np.nanstd(signal)", "def _calculate_r0(self):\n\n self.r0 = self.coherence_cell_size * (np.cos(np.deg2rad(self.zenith_angle)))**(3/5)", "def clinopyroxene_92():\n\n rho = 3327.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 257.3; C[0,1] = 85.9; C[0,2] = 76.2; C[0,3] = 0.; C[0,4] = 7.1; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 216.2; C[1,2] = 71.8; C[1,3] = 0.; C[1,4] = 13.3; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 260.2; C[2,3] = 0.; C[2,4] = 33.7; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 80.2; C[3,4] = 0.; C[3,5] = 10.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 70.6; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 85.8\n\n return C, rho", "def _convert_rh2w(self):\n sat_vapor = 6.11 * (10.0 ** ((7.5 * self.data['Temperature_C']) /\n (237.7 + self.data['Temperature_C'])))\n\n sat_w = 621.97 * (sat_vapor / (self.data['Pressure'] - sat_vapor))\n\n self.data['Mixing_Ratio'] = (\n self.data['Relative_Humidity'] / 100.0) * sat_w", "def _interpolate(self, omch2, h0):\n omch2_index = (\n 1.0\n * (self.CAMBGenerator.om_resolution - 1)\n * (omch2 - self.CAMBGenerator.omch2s[0])\n / (self.CAMBGenerator.omch2s[-1] - self.CAMBGenerator.omch2s[0])\n )\n\n if self.CAMBGenerator.h0_resolution == 1:\n h0_index = 0\n else:\n h0_index = (\n 1.0 * (self.CAMBGenerator.h0_resolution - 1) * (h0 - self.CAMBGenerator.h0s[0]) / (self.CAMBGenerator.h0s[-1] - self.CAMBGenerator.h0s[0])\n )\n\n x = omch2_index - np.floor(omch2_index)\n y = h0_index - np.floor(h0_index)\n\n data = self.data\n result = {}\n for key in data.keys():\n\n v1 = data[key][int(np.floor(omch2_index)), int(np.floor(h0_index))] # 00\n v2 = data[key][int(np.ceil(omch2_index)), int(np.floor(h0_index))] # 01\n\n if self.CAMBGenerator.h0_resolution == 1:\n result[key] = v1 * (1 - x) * (1 - y) + v2 * x * (1 - y)\n else:\n v3 = data[key][int(np.floor(omch2_index)), int(np.ceil(h0_index))] # 10\n v4 = data[key][int(np.ceil(omch2_index)), int(np.ceil(h0_index))] # 11\n result[key] = v1 * (1 - x) * (1 - y) + v2 * x * (1 - y) + v3 * y * (1 - x) + v4 * x * y\n return result" ]
[ "0.6274564", "0.6107101", "0.60727805", "0.58837694", "0.5814193", "0.5814193", "0.56165946", "0.55797195", "0.5553944", "0.5534817", "0.553009", "0.55183256", "0.5515757", "0.5511963", "0.5507393", "0.54924595", "0.5484746", "0.5456038", "0.5441649", "0.54290783", "0.5415065", "0.5408925", "0.5395768", "0.5391213", "0.53902936", "0.5379662", "0.53723", "0.53493166", "0.5347208", "0.5334433" ]
0.70496
0
Returns the supported components e.g. set(['mmic_autodock_vina',...]). Returns Set[str]
def tactic_comps(cls) -> Set[str]: return set(["mmic_autodock_vina"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_supported_components(self):\n props = [cdav.SupportedCalendarComponentSet()]\n response = self.get_properties(props, parse_response_xml=False)\n response_list = response.find_objects_and_props()\n prop = response_list[unquote(self.url.path)][\n cdav.SupportedCalendarComponentSet().tag\n ]\n return [supported.get(\"name\") for supported in prop]", "def get_supported_sets(self):\n return _SUPPORTED_SETS", "def supported_modes(self) -> Set[str]:\n raise NotImplementedError", "def test_get_all_components(self):\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/b': 'no',\n })\n c = self.u.get_all_components()\n self.assertEqual(c, set(('a', 'b')))", "def supported_constructs(self) -> Set[Construct]:\n config: Dict[str, bool] = self.options.get(\"constructs\", {})\n result = set()\n for construct, supported in config.items():\n if supported:\n result.add(Construct[construct.upper()])\n return result", "def vendor_list():\n return ['nxos', 'eos', 'cumulus']", "def list_supported_models() -> Sequence[str]:\r\n return list(_MODELS)", "def get_supported_models(self):\n # type: () -> list\n return [model for model in self.__MODELS]", "def list_uses(self):\n return list(set(self._prop_typology['USE'].values))", "def get_supported_browsers_suggestions():\n supported_browsers = [\n 'chrome',\n 'chrome-remote',\n 'chrome-headless',\n 'chrome-remote-headless',\n 'firefox',\n 'firefox-remote',\n 'ie',\n 'ie-remote'\n ]\n return supported_browsers", "def _available_algorithms(**_: str) -> Set[str]:\n avail = set()\n pass2 = set()\n for algo in hashlib.algorithms_available:\n lalgo = algo.lower()\n if \"with\" in lalgo:\n continue # skip apparently redundant ones\n if lalgo != algo:\n pass2.add(algo)\n else:\n avail.add(lalgo)\n for algo in pass2:\n if algo.lower() not in avail:\n avail.add(algo)\n return avail", "def required_components(cls) -> List[Type[Component]]:\n\n return []", "def required_components(cls) -> List[Type[Component]]:\n\n return []", "def detect_supported_caps():\n result = []\n # generate list of supported capabilities\n\n # Intel RDT L3 CAT\n if common.PQOS_API.is_l3_cat_supported():\n result.append(common.CAT_L3_CAP)\n\n # Intel RDT L2 CAT\n if common.PQOS_API.is_l2_cat_supported():\n result.append(common.CAT_L2_CAP)\n\n # Intel RDT MBA\n if common.PQOS_API.is_mba_supported():\n result.append(common.MBA_CAP)\n\n if sstbf.is_sstbf_enabled():\n result.append(common.SSTBF_CAP)\n\n if power.is_sstcp_enabled():\n result.append(common.POWER_CAP)\n\n return result", "def _allowed_components():\n pass", "def chipset_driver_modules(self):\n\t\treturn self.__info_dict['info']['chipset_driver_modules']['value']", "def get_available_entities_models():\n return ['concat', 'bahdanau', 'luong']", "def get_supported_feature_sets(flags) -> List[str]:\n\n # find all supported feature sets\n supported = []\n for one_feature_set in sorted(REQUIRED_FEATURES.keys()):\n if supports_feature_set(flags, one_feature_set):\n supported.append(one_feature_set)\n return supported", "def get_platforms(self):\n if self.platform == 'All':\n return PLATFORMS\n else:\n return self.platform.split(':')", "def getChemCompSysNames(self):\n dataDict = self.__dict__\n result = frozenset(y for x in self.chemComp.namingSystems for y in x.chemCompSysNames if not y.specificChemCompVars).union(self.specificSysNames)\n return result", "def test__get_component_version_short(self):\n self._ucr({'repository/online/component/a/version': '%d.%d' % (MAJOR, MINOR)})\n ver = self.u._get_component_versions('a', None, None)\n self.assertEqual(set((U.UCS_Version((MAJOR, MINOR, 0)),)), ver)", "def get_graded_components(self):\r\n return self.components.keys()", "def manufacturers(self):\n return self._manufacturers", "def class_exts(cls):\n return set()", "def get_platform_combinations():\n mapped_osname = platform_map(g_osname)\n mapped_osarch = g_osarch\n ret = [mapped_osname]\n while True:\n ret += [mapped_osarch, mapped_osname + \"-\" + mapped_osarch]\n mapped_osarch = platform_map_iterate(mapped_osarch)\n if not mapped_osarch:\n break\n return sorted(ret, reverse=True) + [\"default\"]", "def get_supported_games(self):\n sg = []\n for game in c.supported_games.keys():\n sg.append(c.supported_games[game].game_name)\n return sg", "def used_features(self) -> List[str]:\n mapped = map_pipeline_names(self.input_features, self.output_features)\n return list(set(mapped))", "def get_component_name_list(self):\n return self._component_name_list", "def preset_modes(self) -> List[str]:\n return self._support_presets", "def list_components(self) -> Dict[str, Any]:\n return self._manager.list_components()" ]
[ "0.7520092", "0.6942378", "0.6502353", "0.64853776", "0.64341474", "0.6051912", "0.603774", "0.58496153", "0.58443105", "0.58355975", "0.5832998", "0.5783114", "0.5783114", "0.577528", "0.57419336", "0.57056123", "0.56868654", "0.5655444", "0.5626463", "0.5625516", "0.56065476", "0.558589", "0.5582539", "0.5577305", "0.5568958", "0.556739", "0.5559511", "0.55418396", "0.55068254", "0.5482981" ]
0.7170247
1
Load the specified mojofile, and return its model id.
def load_model(self, mojofile: str) -> str: return self._request("GET /loadmojo", params={"file": mojofile})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model(self, filename):\r\n pass", "def load_model(self, file=None):\n return None", "def load_model(self, file_name=None):\n try:\n if file_name:\n self.agent.load_model(file_name)\n else:\n self.agent.load_model()\n print('Model loaded successfully')\n return 1\n except:\n print('Failed to load model')\n return 0", "def load(model_file):\n return pickle.load(open(model_file))", "def load_model(filename):\n return Model.load_savefile(filename)", "def load_model(self, model_path: str):", "def load(path_to_model):\n pass", "def load_model(self, path):\n pass", "def load(self, file_id):\n pass", "def load_model(file_name):\n with open(file_name, 'rb') as file:\n return pickle.load(file)", "def load_model(self) -> Any:", "def load_model():\n\n # find location of model\n\n file_path = '/Users/davidodwyer/Desktop' # to the directory\n file_name = 'original_mlr.joblib' \n the_file = os.path.join(file_path, file_name)\n\n # load model\n\n model = load(the_file)\n\n return model", "def load_model():\n logger.info('load_model called')\n return 1", "def load_model(fname: os.PathLike) -> Model:\n return Model.load(fname)", "def load_model(self, **params):\n \t# file_name = params['name']\n # return pickle.load(gzip.open(file_name, 'rb'))", "def load_model_custom(file, object):\n return getattr(load_module(file), object)", "def load(self, path, model_id):\n self.load_state_dict(torch.load(os.path.join(path, '{}-retriever'.format(model_id))))", "def load_model(task_id):\n # get model file name\n task_chain_id = task_id.split('-')[0]\n\n root_dir = os.path.split(os.path.realpath(__file__))[0]\n model_path = os.path.join(root_dir, '..', 'common', 'model', task_chain_id)\n model_file_name = os.path.join(model_path, task_id + '.model')\n if not os.path.exists(model_file_name):\n raise Exception(\"Algorithm load_model not find model {}\".format(model_file_name))\n # load mode from disk\n model = load(model_file_name)\n\n return model", "def load(identifier, path):\r\n\tloader = importlib.machinery.SourceFileLoader(identifier, path)\r\n\thandle = loader.load_module(identifier)\r\n\treturn handle", "def load_model(language_id, model_type):\n\n # getting the language code from it's id\n language_code = get_language_code(language_id)\n\n # getting the model name from it's type\n model_name = get_model_name(model_type)\n\n # building the model's full path\n model_full_path = \"%s/%s/%s.txt\" % (models_base_path, language_code, model_name)\n\n # returning the model loaded directly from file\n return load_model_from_file(model_full_path)", "def import_model(path=None):\n path = get_model_path() if path is None else path\n return torch.jit.load(path)", "def load_model_by_name(model, global_step, device=None, path=\"/scratch/users/zucks626/ADNI/IPMI/checkpoints/\"):\r\n # path = \"/scratch/users/zucks626/ADNI/ae_cls/checkpoints/\"\r\n file_path = path + model.name + \"/\" + 'model-{:05d}.pt'.format(global_step)\r\n state = torch.load(file_path, map_location=device)\r\n model.load_state_dict(state)\r\n print(\"Loaded from {}\".format(file_path))", "def loadModel(file_name):\n with open(SAVE_PATH + file_name, \"rb\") as in_file:\n model = pickle.load(in_file, encoding = \"uft-8\")\n print(\"{} loaded\".format(file_name))\n return model", "def import_model(file):\n file = os.path.expanduser(file)\n obj = IsolationForest()\n metadata = obj._cpp_obj.deserialize_obj(file)\n metadata = json.loads(metadata)\n obj._take_metadata(metadata)\n return obj", "def read_model(filename):\n return joblib.load(filename)", "def load_model(self, filename):\n filename = path.join(self.root_path, f'models/{filename}.pkl')\n self.model = pickle.load(open(filename, \"rb\"))\n print('Successfully loaded model from '+filename)", "def load_model():\n return \"None\"", "def load_model(filename):\n checkpoint = torch.load(filename)\n model = QNetwork(checkpoint['input_size'], checkpoint['output_size'], checkpoint['hidden_layers'])\n model.load_state_dict(checkpoint['state_dict'])\n return model", "def get_model_id(model_name, workspace, header, user):\n uri = \"https://api.anaplan.com/1/3/workspaces/{}/models/\".format(workspace)\n response = requests.get(uri, headers = header)\n response_json = json.loads(models.text.encode(\"utf-8\"))\n for model in response_json:\n if model[u\"name\"] == unicode(model_name):\n return model[u\"id\"]", "def load(\n self,\n modelLoadPath\n ):\n pass" ]
[ "0.6718768", "0.654268", "0.61337996", "0.6125505", "0.6113581", "0.6078083", "0.60401374", "0.60291064", "0.5945967", "0.5840561", "0.5827812", "0.5739245", "0.5689958", "0.5678766", "0.5677562", "0.5670736", "0.56696165", "0.5610047", "0.5605511", "0.55876744", "0.5587407", "0.55653065", "0.55591935", "0.5557846", "0.5543136", "0.55376154", "0.55330473", "0.5532693", "0.55221325", "0.5519036" ]
0.78735054
0
Shutdown / kill the server. Sometimes the ``POST /shutdown`` request may fail. In any case we attempt to terminate the process with the SIGKILL signal if it still seems to be running.
def shutdown(self): try: self._request("POST /shutdown") time.sleep(0.300) except requests.exceptions.ConnectionError: pass if self._process and self._process.poll() is None: self._process.kill() if self._session: self._session.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shutdown():\n os.kill(os.getpid(), signal.SIGTERM)", "def shutdown():\n self_pid = os.getpid()\n logging.info('Forcibly terminating program (PID=%s)', self_pid)\n os.kill(self_pid, signal.SIGKILL)", "def stop(self):\n self.shutdown_ = True\n if self.running():\n os.kill(self.server_pid_, signal.SIGTERM)", "def _shutdown(self, *args):\n self.server.shutdown()", "def shutdown():\n shutdown_func = request.environ.get(\n 'werkzeug.server.shutdown') # default web server with flask\n if shutdown_func is None:\n return 'unable to shutdown server!', 501\n shutdown_func()\n return \"server shutting down...\"", "def force_stop(self):\n self.shutdown_ = True\n if self.running():\n os.kill(self.server_pid_, signal.SIGINT)", "def server_shutdown():\n if not current_app.testing:\n abort(404)\n shutdown = request.environ.get('werkzeug.server.shutdown')\n if not shutdown:\n abort(500)\n shutdown()\n return 'Shutting down...'", "def shutdown_server():\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()", "def shutdown(self):\n self._send_command('shutdown')\n self.sock.close()\n self.disconnected = True", "def shutdown(self):\n # TODO: Build a certificate chain so we can verify our localhost and remove the verify=False workaround.\n requests.get('{local_server_address}/shutdown'.format(local_server_address=self.local_server_address),\n verify=False)", "def shutdown(self) -> None:\n prefix = f\"In {ThreadedServer.__name__}.{ThreadedServer.shutdown.__name__}\"\n\n print(f\"{prefix}: Instructing the server to shut down...\", file=self.stdout)\n with self._server_exception_lock:\n if self._server_exception is not None:\n raise self._server_exception\n\n print(f\"{prefix}: Waiting for server to shut down...\", file=self.stdout)\n self._httpd.shutdown()", "def shutdown(self):\n self.logger.info(\"Received graceful shutdown request\")\n self.stop()", "def _HandleShutdown(self):\n self.send_response(httplib.OK)\n self.send_header('Content-Type', 'text/plain')\n self.end_headers()\n self.wfile.write('API Server Quitting')\n self.server.shutdown()", "def Quit(self):\n t = threading.Thread(target=self.server.shutdown)\n t.start()", "def shutdown(self):\n self.broadcast(self.server_socket, '[server shutdown]', 'server')\n self.selector.unregister(self.server_socket)\n self.server_socket.close()", "def shutdown(self):\n self.req_shutdown = True", "def shutdown():\n\n cmd = dict()\n cmd[\"type_\"] = \"shutdown\"\n cmd[\"name_\"] = \"all\"\n\n ## In case of the shutdown there will be no returned message to\n ## check the success.\n s = comm.send_and_receive_socket(cmd)\n\n s.close()", "def shutdown(self, signum, frame):\n self.serverSocket.close()\n sys.exit(0)", "def shutdown():\n shutdown_server()\n return \"Shutting down server\"", "def stop(self):\n self.logger.info('Shutting down SimpleHTTPServer')\n stop_cmd = \"pkill -9 -f '{0}'\".format(self.server_cmd)\n self._execute_command(stop_cmd)", "def shutdown_server():\n func = flask.request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()", "async def shutdown(self):\n\n if self.log_output:\n logging.info('Shutting down ...')\n else:\n print('Shutting down ...')\n\n await self.send_reset()\n\n try:\n self.loop.stop()\n except:\n pass\n try:\n self.loop.close()\n except:\n pass\n sys.exit(0)", "def shutdown(self):\n self._shutdown_requested_event.set()\n SimpleJSONRPCServer.SimpleJSONRPCServer.shutdown(self)\n logging.info('Server shutdown complete')", "def shutdown():\n func = flask.request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n return 'Server shutting down...'", "def rpc_shutdown(self):\n\t\tshutdown_thread = threading.Thread(target=self.server.shutdown)\n\t\tshutdown_thread.start()\n\t\treturn", "def shutdown(self):\n self.exit_app()", "async def kill_server(self):\n if await self._kill():\n await self.send('Server killed')", "def shutdown(self):\n # First call superclass shutdown()\n HTTPServer.shutdown(self)\n\n # We also need to manually close the socket\n self.socket.close()", "def stop() -> None:\n global _server\n if _server:\n try:\n _server.shutdown()\n except Exception:\n pass", "def shutdown(self):\n # shutdown all known sessions\n for session in self.sessions.values():\n session.shutdown()\n\n # if we are a daemon remove pid file\n if self.config[\"daemonize\"]:\n pid_file = self.config[\"pidfile\"]\n try:\n os.unlink(pid_file)\n except OSError:\n logger.exception(\"error daemon pid file: %s\", pid_file)\n\n # remove server from server list\n CoreServer.remove_server(self)" ]
[ "0.8043204", "0.7408491", "0.74051774", "0.73047185", "0.7299", "0.7294056", "0.72432417", "0.7183416", "0.7163862", "0.7163367", "0.7136405", "0.71052444", "0.70983547", "0.70959175", "0.70620507", "0.70323277", "0.70266837", "0.7002895", "0.6961565", "0.696054", "0.69446033", "0.69380605", "0.6931379", "0.68896073", "0.68697613", "0.686606", "0.6856233", "0.6852677", "0.6819714", "0.68126875" ]
0.81470776
0
Update the kernelspecs table.
def refresh_kernelspecs() -> None: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh_kernels() -> None:\n ...", "def modify_devices(self):\n\n for i in self._nodes.items():\n node = i[1]\n devices = node[\"devices\"]\n other_devices = devices[\"other_devices\"]\n kernel_devices = devices[\"kernel_devices\"]\n dpdk_devices = devices[\"dpdk_devices\"]\n\n if other_devices:\n self._modify_other_devices(\n node, other_devices, kernel_devices, dpdk_devices\n )\n\n # Get the devices again for this node\n self._get_device(node)\n devices = node[\"devices\"]\n kernel_devices = devices[\"kernel_devices\"]\n dpdk_devices = devices[\"dpdk_devices\"]\n\n klen = len(kernel_devices)\n if klen > 0:\n print(\"\\nThese devices are safe to be used with VPP.\\n\")\n VppPCIUtil.show_vpp_devices(kernel_devices)\n question = (\n \"\\nWould you like to use any of these \" \"device(s) for VPP [y/N]? \"\n )\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppd = {}\n for dit in kernel_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to use device {} \".format(dvid)\n question += \"for VPP [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppd[dvid] = device\n for dit in vppd.items():\n dvid = dit[0]\n device = dit[1]\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n question = \"Would you like to bind the driver {} for {} [y/N]? \".format(\n driver, dvid\n )\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n logging.debug(\n \"Binding device {} to driver {}\".format(\n dvid, driver\n )\n )\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\n \"Could not bind device {}\".format(dvid)\n )\n dpdk_devices[dvid] = device\n del kernel_devices[dvid]\n\n dlen = len(dpdk_devices)\n if dlen > 0:\n print(\"\\nThese device(s) are already using DPDK.\\n\")\n VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)\n question = \"\\nWould you like to remove any of \"\n question += \"these device(s) [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppdl = {}\n for dit in dpdk_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to remove {} [y/N]? \".format(dvid)\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppdl[dvid] = device\n for dit in vppdl.items():\n dvid = dit[0]\n device = dit[1]\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n logging.debug(\n \"Binding device {} to driver {}\".format(dvid, driver)\n )\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\"Could not bind device {}\".format(dvid))\n else:\n kernel_devices[dvid] = device\n del dpdk_devices[dvid]\n\n interfaces = {}\n for dit in dpdk_devices.items():\n dvid = dit[0]\n device = dit[1]\n VppPCIUtil.vpp_create_interface(interfaces, dvid, device)\n node[\"interfaces\"] = interfaces\n\n self._update_auto_config()\n self.updateconfig()", "def update_system_versions(self):\n #system_versions = [SystemVersion(id=-1 ,type=u'QX100',desc=u'Unknown Hardware version'),\n # SystemVersion(id=0 ,type=u'QX100',desc=u'QX100 - HW Rev A/B'),\n system_versions = [SystemVersion(id=1 ,type=u'QX100', desc=u'QX100 - HW Rev A/B bigger detector cap differences'),\n SystemVersion(id=2 ,type=u'QX100', desc=u'QX100 - HW Rev C'),\n SystemVersion(id=3 ,type=u'QX150', desc=u'QX150 - HW Rev Z Upgrade'),\n SystemVersion(id=4 ,type=u'QX200', desc=u'QX200 - HW Rev Z'),\n SystemVersion(id=5 ,type=u'QX201', desc=u'QX200 - HW with BR built Detector'),\n\t\t\t SystemVersion(id=6 ,type=u'QX150L', desc=u'QX150 - HW Rev Z Upgrade with LED'),\n SystemVersion(id=7 ,type=u'QX201L', desc=u'QX201 - HW with BR built LED Detector'),\n SystemVersion(id=200,type=u'QX200', desc=u'QX200 - Pre-Beta HW')]\n for sv in system_versions:\n dbsv = Session.query(SystemVersion).filter_by(id=sv.id).first()\n if not dbsv:\n Session.add(sv)\n else:\n if (dbsv.type != sv.type):\n dbsv.type = sv.type\n if( dbsv.desc != sv.desc):\n dbsv.desc = sv.desc\n\n Session.commit()", "def test_update_software_components_for_system_module(self):\n pass", "def update_device(self, dev_dict):\n # Note(jprabh1x): added bus,slot,function into fields dict as \n # seperate fields.\n no_changes = ('status', 'instance_uuid', 'id', 'extra_info', 'workload')\n map(lambda x: dev_dict.pop(x, None),\n [key for key in no_changes])\n\n # Note(jprabh1x): populating values for bus,slot,function from address in dev_dict.\n if dev_dict.has_key(\"address\"):\n \t\taddress = pci_utils.parse_address(dev_dict[\"address\"])\n \t\tdev_dict.update({'bus':str(address[1]), 'slot':str(address[2]), 'function':str(address[3])})\n for k, v in dev_dict.items():\n if k in self.fields.keys():\n self[k] = v\n else:\n extra_info = self.extra_info\n extra_info.update({k: str(v)})\n self.extra_info = extra_info", "def test_list_drives_drive_firmware_update(self):\n pass", "def command_update_hw(self, cmd):\n # TODO\n pass", "def test_update_pci_device(self):\n pass", "def test_update_software_component_for_system_module(self):\n pass", "def update_many(self, isystem_uuid, patch):\n\n if self._from_isystems and not isystem_uuid:\n raise exception.InvalidParameterValue(_(\n \"System id not specified.\"))\n\n # Validate if there are pending updates on the controllers lvg\n controller_hosts = pecan.request.dbapi.ihost_get_by_personality(\n constants.CONTROLLER\n )\n\n controllers_lvg_updated = True\n for host in controller_hosts:\n host_fs_list = pecan.request.dbapi.host_fs_get_by_ihost(host.uuid)\n host_lvg_list = pecan.request.dbapi.ilvg_get_by_ihost(host.uuid)\n controllers_lvg_updated = controllers_lvg_updated and \\\n utils.is_host_lvg_updated(host_fs_list, host_lvg_list)\n\n # Validate input filesystem names\n controller_fs_list = pecan.request.dbapi.controller_fs_get_list()\n valid_fs_list = []\n if controller_fs_list:\n valid_fs_list = {fs.name: fs.size for fs in controller_fs_list}\n\n reinstall_required = False\n reboot_required = False\n modified_fs = []\n update_fs_list = []\n for p_list in patch:\n p_obj_list = jsonpatch.JsonPatch(p_list)\n for p_obj in p_obj_list:\n if p_obj['path'] == '/name':\n fs_name = p_obj['value']\n if fs_name in update_fs_list:\n msg = _(\"Duplicate fs_name \"\n \"'%s' in parameter list\" % fs_name)\n raise wsme.exc.ClientSideError(msg)\n else:\n update_fs_list.append(fs_name)\n elif p_obj['path'] == '/size':\n size = p_obj['value']\n\n if fs_name not in valid_fs_list.keys():\n msg = _(\"ControllerFs update failed: invalid filesystem \"\n \"'%s' \" % fs_name)\n raise wsme.exc.ClientSideError(msg)\n elif not cutils.is_int_like(size):\n msg = _(\"ControllerFs update failed: filesystem '%s' \"\n \"size must be an integer \" % fs_name)\n raise wsme.exc.ClientSideError(msg)\n elif int(size) <= int(valid_fs_list[fs_name]):\n msg = _(\"ControllerFs update failed: size for filesystem '%s' \"\n \"should be bigger than %s \" % (fs_name, valid_fs_list[fs_name]))\n raise wsme.exc.ClientSideError(msg)\n elif not controllers_lvg_updated:\n msg = _(\"ControllerFs update failed: controllers have pending LVG \"\n \"updates, please retry again later.\")\n raise wsme.exc.ClientSideError(msg)\n\n if fs_name in constants.SUPPORTED_REPLICATED_FILEYSTEM_LIST:\n if utils.is_drbd_fs_resizing():\n raise wsme.exc.ClientSideError(\n _(\"A drbd sync operation is currently in progress. \"\n \"Retry again later.\")\n )\n\n modified_fs += [fs_name]\n\n controller_fs_list_new = []\n for fs in controller_fs_list:\n replaced = False\n for p_list in patch:\n p_obj_list = jsonpatch.JsonPatch(p_list)\n for p_obj in p_obj_list:\n if p_obj['value'] == fs['name']:\n try:\n controller_fs_list_new += [ControllerFs(\n **jsonpatch.apply_patch(fs.as_dict(), p_obj_list))]\n replaced = True\n break\n except utils.JSONPATCH_EXCEPTIONS as e:\n raise exception.PatchError(patch=p_list, reason=e)\n if replaced:\n break\n if not replaced:\n controller_fs_list_new += [fs]\n\n cgtsvg_growth_gib = _check_controller_multi_fs_data(\n pecan.request.context,\n controller_fs_list_new)\n\n if _check_controller_state():\n _check_controller_multi_fs(controller_fs_list_new,\n cgtsvg_growth_gib=cgtsvg_growth_gib)\n for fs in controller_fs_list_new:\n if fs.name in modified_fs:\n value = {'size': fs.size}\n if fs.replicated:\n value.update({'state': constants.CONTROLLER_FS_RESIZING_IN_PROGRESS})\n pecan.request.dbapi.controller_fs_update(fs.uuid, value)\n\n try:\n # perform rpc to conductor to perform config apply\n pecan.request.rpcapi.update_storage_config(\n pecan.request.context,\n update_storage=False,\n reinstall_required=reinstall_required,\n reboot_required=reboot_required,\n filesystem_list=modified_fs\n )\n\n except Exception as e:\n msg = _(\"Failed to update filesystem size \")\n LOG.error(\"%s with patch %s with exception %s\" % (msg, patch, e))\n raise wsme.exc.ClientSideError(msg)", "def test_update_device(self):\n pass", "def test_update_device(self):\n pass", "def update(self):\n for component in self.components.values():\n try:\n component.update()\n except Exception as e:\n if self.ds.isFMSAttached():\n log.error(\"In subsystem %s: %s\" % (component, e))\n else:\n raise e", "def _update_device_types(self):\n device_types = self.adapter.device_types()\n for device_type in device_types.items:\n key = device_type.id\n self._make_up_to_date('/device_types', key, device_type)", "def test_update_software_configuration_for_system_module(self):\n pass", "def test_update_hyperflex_server_firmware_version(self):\n pass", "def test_patch_hyperflex_server_firmware_version(self):\n pass", "def update_firmware(self) -> str:", "def test_patch_pci_device(self):\n pass", "def update_firmware(self):\n self.execute_command(CMD_UPDATE_FIRMWARE)", "def test_update_hyperflex_capability_info(self):\n pass", "def visit_table(self, sytable):\n self.current.update(sytable)", "def visit_table(self, sytable):\n self.current.update(sytable)", "def update(self):\n self.device = self._api.device_query(self._hardware_address, {})", "def test_update_bios_unit(self):\n pass", "def update(self):\n self.platform_list.update()\n self.enemy_list.update()", "def test_update_bios_boot_mode(self):\n pass", "async def _async_udev_events(self, kernel: pyudev.Device):\n # Update device List\n if not kernel.device_node or self.sys_hardware.helper.hide_virtual_device(\n kernel\n ):\n return\n\n hw_action: HardwareAction | None = None\n device: Device | None = None\n\n ##\n # Remove\n if kernel.action == UdevKernelAction.REMOVE:\n try:\n device = self.sys_hardware.get_by_path(Path(kernel.sys_path))\n except HardwareNotFound:\n return\n else:\n self.sys_hardware.delete_device(device)\n hw_action = HardwareAction.REMOVE\n\n ##\n # Add\n if kernel.action in (UdevKernelAction.ADD, UdevKernelAction.CHANGE):\n # We get pure Kernel events only inside container.\n # But udev itself need also time to initialize the device\n # before we can use it correctly\n udev = None\n for _ in range(3):\n await asyncio.sleep(2)\n try:\n udev = pyudev.Devices.from_sys_path(self.context, kernel.sys_path)\n except pyudev.DeviceNotFoundAtPathError:\n continue\n if udev.is_initialized:\n break\n\n # Is not ready\n if not udev:\n _LOGGER.warning(\n \"Ignore device %s / failes to initialize by udev\", kernel.sys_path\n )\n return\n\n device = Device.import_udev(udev)\n self.sys_hardware.update_device(device)\n\n # If it's a new device - process actions\n if kernel.action == UdevKernelAction.ADD:\n hw_action = HardwareAction.ADD\n\n # Ignore event for future processing\n if device is None or hw_action is None:\n return\n _LOGGER.info(\n \"Detecting %s hardware %s - %s\", hw_action, device.path, device.by_id\n )\n\n # Fire Hardware event to bus\n if hw_action == HardwareAction.ADD:\n self.sys_bus.fire_event(BusEvent.HARDWARE_NEW_DEVICE, device)\n elif hw_action == HardwareAction.REMOVE:\n self.sys_bus.fire_event(BusEvent.HARDWARE_REMOVE_DEVICE, device)", "def test_update_device_template(self):\n pass", "async def container_specs(self, event):\n await self.send(text_data=event['specs'])" ]
[ "0.5808753", "0.5629541", "0.5591042", "0.5496405", "0.54948765", "0.5418482", "0.52984756", "0.5270303", "0.5197712", "0.5154047", "0.5119544", "0.5119544", "0.51178193", "0.5101636", "0.50797075", "0.5073883", "0.5045194", "0.50378954", "0.50309587", "0.5014471", "0.5006939", "0.49790138", "0.49790138", "0.49558872", "0.4944775", "0.49345356", "0.4933418", "0.49157098", "0.49117544", "0.48883072" ]
0.7550118
0
Creates a new terminal and returns the name.
def create_terminal() -> str: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n return \"Terminal('{}')\".format(self.name)", "def addTerminal(self, name, **opts):\n opts.update(renamable=True, removable=True)\n name = self.nextTerminalName(name)\n term = NetTerminal(self, name, **opts)\n self.terminals[name] = term\n if term.isInput():\n self._inputs[name] = term\n elif term.isOutput():\n self._outputs[name] = term\n self.graphicsItem().updateTerminals()\n self.sigTerminalAdded.emit(self, term)\n return term", "def console_create(self):\n return self.call('console.create')", "def _spawn_turtle(self, trt_x, trt_y, name=None):\n\n\t\tif name is None or name == \"\":\n\t\t\tname = self._create_unique_turtle_name()\n\t\telif self._has_turtle(name):\n\t\t\treturn \"\"\n\n\t\tturtle = Turtle(name, Point(trt_x, trt_y))\n\t\tself._turtles[name] = turtle\n\n\t\trospy.loginfo(\"New turtle [%s] at x=[%d], y=[%d]\", name, trt_x, trt_y)\n\n\t\treturn name", "def get_custom_terminal_cmd():\n return lnp.userconfig.get_string('terminal')", "def create_namespace(self):\n print(\"\\nCreating namespace...\")\n\n name = input(\" - name (default = commands): \") or \"commands\"\n path = \"./{}\".format(name.replace(\".\", \"/\")).lower()\n\n os.makedirs(path, exist_ok=True)\n\n init_path = os.path.join(path, \"__init__.py\")\n if not os.path.isfile(init_path):\n open(init_path, 'w+').close()\n\n return name, path", "def _create_unique_turtle_name(self):\n\n\t\tself._id_counter += 1\n\t\tnew_name = \"turtle{}\".format(self._id_counter)\n\n\t\tif self._has_turtle(new_name):\n\t\t\treturn self._create_unique_turtle_name()\n\n\t\treturn new_name", "def new_session(self):\n self.command(\"new\")", "def CreateConsole(self):\n lc = launcher.TextFrame('title')\n return lc", "def create_name (self):\n return self.create_topic().create_name('Name')", "def create_device(name, device_type, runtime):\n command = 'create \"%s\" \"%s\" \"%s\"' % (\n name, device_type.identifier, runtime.identifier)\n device_id = _run_command(command)\n\n # The device ID has a new line at the end. Strip it when returning.\n return device_id[:-1]", "def generate_unique_name():\n return 'titanic-' + str(get_mac())", "def name_create(self, name):\n values = {\n 'name': name,\n }\n return self.create(values).name_get()[0]", "def create_name() -> str:\r\n user_input = str(input(\"What is your name?\\n\"))\r\n return user_input", "def terminal_name(arg_terminal):\n if not arg_terminal:\n return 'до двери'\n curs_dict = APP.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n terminal_sql = curs_dict.mogrify(TERMINAL_SQL_TEMPL, (arg_terminal,))\n logging.info('terminal_sql=%s', terminal_sql)\n curs_dict.execute(terminal_sql)\n res = curs_dict.fetchone()\n curs_dict.close()\n return '{}, {}'.format(res.get('name', 'Название терминала по id не найдено'),\n res.get('address', 'Адрес терминала по id не найден'))", "def createname(cls):\n name = config.get(\"pyzombie_filesystem\", \"execbase\")\n name = \"{0}_{1}\".format(name, datetime.utcnow().strftime(\"%Y%jT%H%M%SZ\"))\n if os.path.isdir(Executable.execdirpath(name)):\n #Need to handle the rare case of duplicate resource names---this\n #will happen all the time in testing, but rarely in production.\n index = 0\n altname = \"{0}_{1:03}\".format(name, index)\n while os.path.isdir(Executable.execdirpath(altname)):\n index = index + 1\n altname = \"{0}_{1:03}\".format(name, index)\n name = altname\n return name", "def get_configured_terminal():\n s = lnp.userconfig.get_string('terminal_type')\n terminals = get_valid_terminals()\n for t in terminals:\n if s == t.name:\n return t\n return CustomTerminal", "def create_session(\n path: str,\n type: str,\n name: Optional[str] = None,\n kernel_name: Optional[str] = None,\n kernel_id: Optional[str] = None,\n) -> str:\n ...", "def name(self):\n if not self._name:\n prefix = self.random.choice(['Desktop'] * 4 + ['Laptop'])\n self._name = '{}-{}'.format(prefix, ''.join(\n self.random.choice(string.ascii_uppercase + string.digits) for _ in range(7)))\n return self._name", "def terminal(self):\n return self._term", "async def osname(self):\n\n await self.bot.say(box(system(), 'Bash'))", "def terminal_init(self):\n pass", "def create_table(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"create_table\")", "def create_table(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"create_table\")", "def CreateCharacter(self):\n attrs = Dictionary[str, object]()\n for k in self._attrs.keys():\n attrs[k] = self._attrs[k]\n return ClientAPI.Network.CreateCharacter(attrs)", "def parse_terminal(node, ty=None):\n if ty == \"METHOD_CALL\":\n return Terminal(\"var\", None)\n elif ty:\n return Terminal(\"var\", ty)\n else:\n return Terminal(\"var\", get_type(node))", "def terminalRenamed(self, term, oldName):\n newName = term._name\n for d in [self.terminals, self._inputs, self._outputs]:\n if oldName not in d:\n continue\n d[newName] = d[oldName]\n del d[oldName]\n\n self.graphicsItem().updateTerminals()\n self.sigTerminalRenamed.emit(term, oldName)", "def create(self) -> dict:\n\n questions = [\n Text(name=\"name\", message=\"Enter category name\"),\n ]\n\n return prompt(questions)", "def create_kernel(name: str) -> str:\n ...", "def configure_custom_terminal(new_path):\n lnp.userconfig['terminal'] = new_path\n lnp.userconfig.save_data()" ]
[ "0.6440198", "0.6267893", "0.62144405", "0.60047346", "0.5891908", "0.5890155", "0.5865237", "0.57142264", "0.5690836", "0.5674329", "0.56701756", "0.56039375", "0.5576599", "0.5536455", "0.5528613", "0.5440345", "0.5381108", "0.5373771", "0.52840245", "0.52517766", "0.52179325", "0.51868945", "0.51438683", "0.51438683", "0.5118812", "0.51126826", "0.50778836", "0.50727916", "0.5062687", "0.50386935" ]
0.80201596
0
Update the kernels table.
def refresh_kernels() -> None: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh_kernelspecs() -> None:\n ...", "def callUpdateTable(self):\r\n self.updateTable()", "def gpu_kernels(self, node, name):\r\n raise MethodNotDefined, 'gpu_kernels'", "def run(self):\n\n self.sess.run(self.update_operations)", "def update(self, x_train_single, updated_h):\n # x_row = cp.array(x_train_single.toarray())\n # cp.cuda.Stream.null.synchronize()\n updater(x_train_single,updated_h,self.weights,self.num_features,self.num_models,self.learning_rate)\n # self.biases += updated_h * self.learning_rate", "def _modify_updates(self, updates):\n\n if self.max_kernel_norm is not None:\n W, = self.transformer.get_params()\n if W in updates:\n updated_W = updates[W]\n row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=(0, 1, 2)))\n desired_norms = T.clip(row_norms, 0, self.max_kernel_norm)\n scales = desired_norms / (1e-7 + row_norms)\n updates[W] = (updated_W * scales.dimshuffle('x', 'x', 'x', 0))", "def __update_table(self):\n\n headlines = [\"\", ]\n headlines += range(1, + 1)\n headlines = [\" \"] + [str(x) for x in range(1, self.find_table_length() + 1)]\n self.__main_display_table.config(columns=headlines)\n\n for headline in headlines:\n self.__main_display_table.heading(headline, text=headline)\n self.__main_display_table.column(headline, anchor=\"center\", width=35)\n\n data = self.__display_buses_location()\n\n for i in self.__main_display_table.get_children():\n # deletes all the data in the chart\n self.__main_display_table.delete(i)\n for line in data:\n # inserts new data into the chart, goes line by line\n self.__main_display_table.insert(\"\", END, values=line)", "def update(self):\n self.cursor.execute(\"\"\"SELECT * FROM sensors_powersensor\"\"\")\n list = self.cursor.fetchall()\n for sensor in list:\n self.add(sensor[2], sensor[1])", "def add(self, kernels):\n if not isinstance(kernels, list):\n kernels = [kernels]\n self.kernels += kernels\n # update `_active_indices` from scratch: inactive kernels might be added\n self._active_indices = [idx for idx in range(len(self)) if \\\n not self.kernels[idx].stop()]\n self._ratio_nondom_offspring_incumbent = len(self) * [0] # len(self) changed", "def updateGrid(self) -> None:\n emu = self.emulator\n arch = self.root.arch\n registers = arch.registers\n self.__values.setRowCount(len(registers))\n for i, reg in enumerate(registers):\n self.__values.setRowHeight(i, self.__row_size)\n name = QTableWidgetItem(reg)\n name.setFlags(Qt.NoItemFlags)\n val = emu.get_register_value(reg) if emu.vm else 0\n old_val = self.__old_register_values.get(reg, 0)\n if type(val) in (int, int):\n value = format_address(val, arch)\n else:\n value = str(val)\n value = QTableWidgetItem( value )\n if old_val != val:\n self.__old_register_values[reg] = val\n value.setForeground(QColor(Qt.red))\n value.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable)\n self.__values.setItem(i, 0, name)\n self.__values.setItem(i, 1, value)\n return", "def update_vluln_table():", "def release(self):\n # type: () -> None\n for k in self.kernels:\n k.release()", "def update(self, a, b, c, d):\n self.table.ravel()[:] = [a, b, c, d]\n self.N = self.table.sum()", "def update_data(self):\n for sai_id_key in self.if_id_map:\n namespace, sai_id = mibs.split_sai_id_key(sai_id_key)\n if_idx = mibs.get_index_from_str(self.if_id_map[sai_id_key])\n counter_table = self.namespace_db_map[namespace].get_all(mibs.COUNTERS_DB, \\\n mibs.counter_table(sai_id))\n if counter_table is None:\n counter_table = {}\n self.if_counters[if_idx] = counter_table\n\n\n self.lag_name_if_name_map, \\\n self.if_name_lag_name_map, \\\n self.oid_lag_name_map, _, _ = Namespace.get_sync_d_from_all_namespace(mibs.init_sync_d_lag_tables, self.db_conn)\n\n self.if_range = sorted(list(self.oid_name_map.keys()) + list(self.oid_lag_name_map.keys()))\n self.if_range = [(i,) for i in self.if_range]", "def visit_table(self, sytable):\n self.current.update(sytable)", "def visit_table(self, sytable):\n self.current.update(sytable)", "def update(self):\n self.platform_list.update()\n self.exit_sprite.update()\n self.bagGroup.update()\n self.enemy_list.update()", "def update_knobs(self):\n self.previous_knobs = self.current_knobs\n self.current_knobs = {'Modulation' : 'fsk',\n 'Rs' : 0,\n 'EIRP' : 0,\n 'Speed' : 0}", "def update(self):\n current = LazyRegister(self.db)\n current.render()\n cur = self.db.cursor()\n for table in self.tables:\n if table in current.tables:\n additions, removals = current.tables[table].migrate(self.tables[table])\n for addition in additions:\n cur.execute(\"\"\"ALTER TABLE %s ADD COLUMN %s\"\"\" % (\n table, addition[1].get_sql()\n ))\n print('Added column: ', addition[0])\n for removal in removals:\n #cur.execute(\"\"\"ALTER TABLE %s DROP COLUMN %s\"\"\" % (\n # table, removal[0]\n #))\n #print('Removed column: ', removal[0])\n print('Did not removed column: ', removal[0])\n else:\n schema = self.tables[table].get_create_table_sql()\n cur.execute(schema)\n print('Added table %s' % table)", "def __initializeKernels(self):\n # FFT plans:\n self.__initializeDopplerIfftPlan() # for Doppler Ifft\n self.__initializeDemodIfftPlan() # for demod \n self.__initializeSNRFftPlan() # for findSNR\n \n # GPU kernels\n kernel = self.CudaKernels\n ## kernels for initialization\n self.GPU_multInputVectorWithMasks = kernel.get_function('multInputVectorWithMasks').prepare('PPP')\n \n self.GPU_complexConj = kernel.get_function('complexConj').prepare('P')\n self.GPU_scaleComplexByScalar = kernel.get_function('scaleComplexByScalar').prepare('Pf')\n self.GPU_setComplexArrayToZeros = kernel.get_function('setComplexArrayToZeros').prepare('P')\n \n ## kernels for doppler search\n self.GPU_filterMasks = kernel.get_function('multInputVectorWithShiftedMasksDopp').prepare('PPPPii')\n # for multInputVectorWithShiftedMasks\n self.numBlocks = self.Nfft/self.numThreads\n self.bShapeVecMasks = (int(self.numThreads),1,1)\n self.gShapeVecMasks = (int(self.numBlocks),1)\n assert self.bShapeVecMasks[0]*self.gShapeVecMasks[0]==self.Nfft,'Dimension mismatch'\n\n self.GPU_absSumDoppler = kernel.get_function('blockAbsSumAtomic').prepare('PPi')\n # for the absSumKernel to sum the rows together\n self.bShapeAbsSum = (128,1,1) # 128 and 2 in next line is just picked TODO: should be config val\n self.gShapeAbsSum = (2,int(self.doppIdxArrayLen)) # tweak these\n\n assert self.Nfft % self.bShapeAbsSum[0]*self.gShapeAbsSum[0] == 0,'Nfft has to be dividable by block and grid dimensions'\n\n self.GPU_estDoppler = kernel.get_function('findDopplerEst').prepare('PPPii')\n # for the small kernel that finds the doppler\n self.bShapeDopp = (self.num_masks,1,1)\n self.gShapeDopp = (1,1)\n\n self.GPU_setArrayToZeros = kernel.get_function('setArrayToZeros').prepare('P')\n # for the set to zero kernel for the sum\n self.bShapeZero = (int(self.num_masks),1,1)\n self.gShapeZero = (int(self.doppIdxArrayLen),1)\n\n ## for demodulation\n self.bShapeVecMasks2 = (int(256),1,1) ## 256 is just picked, TODO: should be config val\n self.gShapeVecMasks2 = (int(self.Nfft/self.bShapeVecMasks2[0]),1)\n self.complexShiftMulMasks = kernel.get_function('multInputVectorWithShiftedMask').prepare('PPPi')\n self.complexHeterodyne = kernel.get_function('complexHeterodyne').prepare('PPfffi')\n self.findcentres = kernel.get_function('findCentres').prepare('PPPPffii')\n self.bShapeCentres = (256,1,1) ## 256 is just picked, TODO: should be config val", "def command_update_hw(self, cmd):\n # TODO\n pass", "def update():", "def update():", "def update(self):\n # GPS data\n self.model.GPS_latitude.set(self._kernel.data.lat)\n self.model.GPS_longitude.set(self._kernel.data.lon)\n \n self.model.GPS_heading.set(self._kernel.data.gps_heading)\n self.model.GPS_speed.set(self._kernel.data.speed)\n self.model.GPS_altitude.set(self._kernel.data.altitude)\n \n self.model.GPS_fix.set(self._kernel.data.fix)\n self.model.GPS_satellite_count.set(self._kernel.data.num_sat)\n \n # compass data\n self.model.compass_heading.set(self._kernel.data.compass_heading)\n \n # time data\n self.model.time.set(self._kernel.data.timestamp.isoformat())\n self.model.date.set(self._kernel.data.datestamp.isoformat())\n \n # other data\n self.model.temperature.set(self._kernel.data.temperature)", "def _modify_updates(self, updates):\n wxf = self.wxf\n wyf = self.wyf\n wxf_updated = updates[wxf]\n wyf_updated = updates[wyf]\n nwxf = (wxf_updated.std(0) + SMALL)[numpy.newaxis, :]\n nwyf = (wyf_updated.std(0) + SMALL)[numpy.newaxis, :]\n meannxf = nwxf.mean()\n meannyf = nwyf.mean()\n # Center filters\n centered_wxf = wxf_updated - wxf_updated.mean(0)\n centered_wyf = wyf_updated - wyf_updated.mean(0)\n # Fix standard deviation\n wxf_updated = centered_wxf * (meannxf / nwxf)\n wyf_updated = centered_wyf * (meannyf / nwyf)\n updates[wxf] = wxf_updated\n updates[wyf] = wyf_updated", "def update(self):\n self.platform_list.update()\n self.enemy_list.update()", "def update(self):\n self.device.update()", "def update(self):\n self.device.update()", "def updateH(self,k_vec,it):\n self.k_vec = k_vec\n self.it = it\n self.H_kc = fl.H_k(k_vec, self.it, self.delta)", "def refresh_table(self):\n self._table['bounty_column'] = Driver.instance.find_elements(*self._selectors['bounty_column'])\n self._table['first_name_column'] = Driver.instance.find_elements(*self._selectors['first_name_column'])\n self._table['last_name_column'] = Driver.instance.find_elements(*self._selectors['last_name_column'])\n self._table['edit_column'] = Driver.instance.find_elements(*self._selectors['edit_column'])\n self._table['details_column'] = Driver.instance.find_elements(*self._selectors['details_column'])\n self._table['delete_column'] = Driver.instance.find_elements(*self._selectors['delete_column'])" ]
[ "0.6155734", "0.57529676", "0.5730305", "0.56955045", "0.56193566", "0.5611958", "0.5498789", "0.5324433", "0.5293424", "0.5292726", "0.5291626", "0.52879345", "0.5268349", "0.5242812", "0.52361995", "0.52361995", "0.51978123", "0.5197042", "0.51949096", "0.5192662", "0.519087", "0.5156624", "0.5156624", "0.51537174", "0.51536757", "0.51419264", "0.51350445", "0.51350445", "0.51272947", "0.5105007" ]
0.7096408
0
Creates a new kernel and returns the ID
def create_kernel(name: str) -> str: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_kernel(self, kernel_id):", "def testCreateKernel(self):\n try:\n contextID, retErr = PyOpenCLInterface.CreateContext(self.testResources.listDevicesIDs, self.testResources.dictProperties)\n self.assertEqual(retErr, 0)\n # create program\n programID, retErr = PyOpenCLInterface.CreateProgram(contextID, self.testResources.programCodeStrings)\n self.assertEqual(retErr, 0)\n buildOptions = \"\"\n retErr = PyOpenCLInterface.BuildProgram(programID, self.testResources.listDevicesIDs, buildOptions)\n self.assertEqual(retErr, 0)\n #create kernel\n kernelName = self.testResources.KernelFunctionName\n kernelID, retErr = PyOpenCLInterface.CreateKernel(programID, kernelName)\n self.assertEqual(retErr, 0)\n kernelProperties, retErr = PyOpenCLInterface.GetKernelProperties(kernelID)\n self.assertEqual(kernelProperties['Program'], programID)\n self.assertEqual(kernelProperties['id'], kernelID)\n self.assertEqual(kernelProperties['Context'], contextID)\n self.assertEqual(kernelProperties['KernelFunctionName'], kernelName)\n retErr = PyOpenCLInterface.ReleaseKernel(kernelID)\n self.assertEqual(retErr, 0)\n retErr = PyOpenCLInterface.ReleaseProgram(programID)\n self.assertEqual(retErr, 0)\n listPrograms = PyOpenCLInterface.ListPrograms()\n self.assertEqual(listPrograms, [])\n retErr = PyOpenCLInterface.ReleaseContext(contextID)\n self.assertEqual(retErr, 0)\n except:\n print \"Exception caught:\", sys.exc_info()[0]", "def create_device(name, device_type, runtime):\n command = 'create \"%s\" \"%s\" \"%s\"' % (\n name, device_type.identifier, runtime.identifier)\n device_id = _run_command(command)\n\n # The device ID has a new line at the end. Strip it when returning.\n return device_id[:-1]", "def create_session(\n path: str,\n type: str,\n name: Optional[str] = None,\n kernel_name: Optional[str] = None,\n kernel_id: Optional[str] = None,\n) -> str:\n ...", "def LUCID_create(lucid_kernel=None, blur_kernel=None): # real signature unknown; restored from __doc__\n pass", "def start_kernel(self, kernel_name=None, **kwargs):", "def kernel_model(self, kernel_id):\n self._check_kernel_id(kernel_id)\n kernel = self._kernels[kernel_id]\n\n model = {\n \"id\":kernel_id,\n \"name\": kernel.kernel_name,\n \"last_activity\": isoformat(kernel.last_activity),\n \"execution_state\": kernel.execution_state,\n \"connections\": self._kernel_connections[kernel_id],\n }\n return model", "def create_kernel(ktype='sph-anarchy'):\n \n kernel = get_kernel(ktype)\n header = np.array([{'kernel': ktype, 'bins': kernsize}])\n np.savez('kernel_{}.npz'.format(ktype), header=header, kernel=kernel)\n \n print (header)\n \n return kernel", "async def _initialize(self, kernel_name, kernel_id_future):\n kernel_id = await kernel_id_future\n extension = None\n language = None\n\n kernel = self.get_kernel(kernel_id)\n\n try:\n language_to_extensions = {\"python\": \"py\"}\n language = kernel.kernel_spec_manager.get_all_specs()[kernel_name][\"spec\"][\"language\"]\n extension = language_to_extensions[language]\n except Exception:\n pass\n\n py_imports = language == \"python\" and self.python_imports\n\n config_code = self.initialization_code.get(kernel_name)\n\n if not extension and not py_imports and not config_code:\n # Save some effort\n return kernel_id\n\n self.log.info(\"Initializing kernel: %s\", kernel_id)\n\n client = ExecClient(kernel)\n\n from jupyter_core.paths import jupyter_config_path\n from pathlib import Path\n\n async with client.setup_kernel():\n if py_imports:\n code = python_init_import_code.format(modules=self.python_imports)\n await client.execute(code)\n if config_code:\n await client.execute(config_code)\n if extension:\n for base_path in map(Path, jupyter_config_path()):\n path = base_path / f\"kernel_pool_init_{kernel_name}.{extension}\"\n if path.exists():\n with open(path) as f:\n self.log.debug(\"Running %s for initializing kernel\", path)\n code = f.read()\n await client.execute(code)\n self.log.debug(\"Initialized kernel: %s\", kernel_id)\n return kernel_id", "def _launch_kernel(self, kernel_cmd, **kw):", "def _setup_kernel(self, program, kernel_name, *argv):\n kernel = cl.Kernel(program, kernel_name)\n for idx, value in enumerate(argv):\n kernel.set_arg(idx, value)\n\n return kernel", "def define_kernel(self, *args, **kwargs):\n k = getattr(kernels, self.kernel_name)\n k_base = getattr(kernels, self.base_kernel)\n\n kernel = k(base_graph_kernel=k_base, *args, **kwargs)\n\n return kernel", "def cuda_kernel_factory(nvrtc_kernel_str, dtypes, kernel_name=None):\n\n dtype_strs = get_dtype_strs(dtypes)\n\n for idx, dtype in enumerate(dtypes):\n nvrtc_kernel_str = nvrtc_kernel_str.replace(\n \"{%d}\" % idx, dtype_strs[idx]\n )\n\n kernel_name = f\"\"\"{uuid1()\n if kernel_name is None\n else kernel_name}_{\n \"\".join(dtype_strs).replace(\" \", \"_\")\n }\"\"\"\n\n nvrtc_kernel_str = \"%s\\nvoid %s%s\" % (\n extern_prefix,\n kernel_name,\n nvrtc_kernel_str,\n )\n\n if logger.should_log_for(logger.level_debug):\n logger.debug(str(nvrtc_kernel_str))\n\n return cp.RawKernel(nvrtc_kernel_str, kernel_name)", "def _newClusterId(self):\n return self.guidGenerator.new_id()", "def remove_kernel(self, kernel_id):", "def get_new_oid(cls):\n return OidGenerator.allocate()", "def create_program(template, func, loc=None):\n\n k_args = []\n\n func.set_cl_kernel_args()\n k_args.extend(func.cl_args_name)\n\n # Build the kernel args string.\n kernel_args = ',\\n '.join(k_args)\n \n # Get the kernel workgroup code\n workgroup_code = func.get_cl_workgroup_code()\n \n # Construct the neighbor loop code.\n neighbor_loop_code = \"for (int src_id=0; src_id<nbrs; ++src_id)\"\n\n return template%(locals())", "def allocate_osd_id(\n cluster,\n fsid,\n keyring,\n ):\n\n LOG.debug('Allocating OSD id...')\n try:\n osd_id = _check_output(\n args=[\n 'ceph',\n '--cluster', cluster,\n '--name', 'client.bootstrap-osd',\n '--keyring', keyring,\n 'osd', 'create', '--concise',\n fsid,\n ],\n )\n except subprocess.CalledProcessError as e:\n raise Error('ceph osd create failed', e, e.output)\n osd_id = must_be_one_line(osd_id)\n check_osd_id(osd_id)\n return osd_id", "def mol_kern_factory(kernel_type: str, *args, **kwargs):\n kernel_to_kernel_type = {\n MolGraphKernel: MOL_GRAPH_CONT_KERNEL_TYPES + MOL_GRAPH_INT_KERNEL_TYPES,\n MolFingerprintKernel: MOL_FINGERPRINT_KERNEL_TYPES,\n MolDistanceKernel: MOL_DISTANCE_KERNEL_TYPES,\n MolSimilarityKernel: MOL_SIMILARITY_KERNEL_TYPES\n }\n kernel_type_to_kernel = {\n kernel_type: kernel\n for kernel, kernel_type_list in kernel_to_kernel_type.items()\n for kernel_type in kernel_type_list\n }\n if kernel_type not in kernel_type_to_kernel:\n raise ValueError(\"Not recognized kernel type: {}\".format(kernel_type))\n kernel = kernel_type_to_kernel[kernel_type]\n return kernel(kernel_type, *args, **kwargs)", "async def init_new_kernel_configuration(self, request, image_id):\n\n try:\n self._image = Image(image_id=image_id)\n except ImageDoesNotExist:\n request.ret_error(IMAGE_MISSING)\n\n request.ret(READY)", "def ker_class():\n ker = Kernel()\n return ker", "def kernel_metadata(self, cfg):\n # Pushing a kernel through the API fails if len(title) > 50. (b/120288024)\n title = self.title\n dev = cfg.get('development', False)\n return dict(\n id=self.slug,\n language='python',\n is_private=not cfg.get('public', not dev),\n # Path is relative to where kernel-metadata.json file will be written, which is\n # notebooks/<track>/<cfg-tag>/kernels_api_metadata/<notebook-identifier>/kernel-metadata.json\n code_file=\"../../rendered/{}\".format(self.filename),\n enable_gpu=self.enable_gpu,\n # Enable internet in development mode so we can pip install learntools\n # TODO: Actually, probably only needs to be turned on if we're in\n # development mode AND this is an exercise kernel.\n enable_internet=dev if self.enable_internet is None else self.enable_internet,\n kernel_type='notebook',\n title=title,\n dataset_sources=sorted(self.dataset_sources),\n competition_sources=sorted(self.competition_sources),\n kernel_sources=sorted(self.kernel_sources),\n keywords=sorted(self.keywords),\n docker_image_pinning_type=\"latest\",\n )", "def create(self):\n print('Creating container: {}'.format(self.cfg['name']))\n create = self.docker_client.create(**self.env)\n return create['id']", "def kernel(self, kernel):\n self._context[\"kernel\"] = kernel", "def restart_kernel(self, kernel_id, now=False):", "def intel_run(kernel_call, kernel_def, kernel='autosa.tmp/output/src/kernel_autosa_opencl.cpp'):\n\n # Load kernel call file\n module_calls = []\n fifo_decls = []\n with open(kernel_call, 'r') as f:\n add = False\n while True:\n line = f.readline()\n if not line:\n break\n # Extract the fifo declaration and add to the list\n if add:\n line = line.strip()\n fifo_decls.append(line)\n if line.find('/* FIFO Declaration */') != -1:\n if add:\n fifo_decls.pop(len(fifo_decls) - 1)\n add = not add\n\n with open(kernel_call, 'r') as f:\n add = False\n module_call = []\n while True:\n line = f.readline()\n if not line:\n break\n # Extract the module call and add to the list\n if add:\n line = line.strip()\n module_call.append(line)\n if line.find('/* Module Call */') != -1:\n if add:\n module_call.pop(len(module_call) - 1)\n module_calls.append(module_call.copy())\n module_call.clear()\n add = not add\n\n module_defs = {}\n headers = []\n with open(kernel_def, 'r') as f:\n while True:\n line = f.readline()\n if not line:\n break\n if line.find('#include') != -1:\n line = line.strip()\n headers.append(line)\n\n with open(kernel_def, 'r') as f:\n add = False\n module_def = []\n while True:\n line = f.readline()\n if not line:\n break\n # Extract the module definition and add to the dict\n if add:\n module_def.append(line)\n # Extract the module name\n if (line.find('__kernel')) != -1:\n m = re.search('void (.+?)\\(', line)\n if m:\n module_name = m.group(1)\n if line.find('/* Module Definition */') != -1:\n if add:\n module_def.pop(len(module_def) - 1)\n module_defs[module_name] = module_def.copy()\n module_def.clear()\n add = not add\n\n # compose the kernel file\n kernel = str(kernel)\n generate_intel_kernel(kernel, headers, module_defs, module_calls, fifo_decls)", "def run_kernel(\n environment='emulator',\n timeout=60):\n block_file = OBJ_DIR + 'fsimage.bin'\n subprocess.check_output([BIN_DIR + 'mkfs', block_file, ELF_FILE],\n stderr=subprocess.STDOUT)\n\n return run_program(environment=environment, block_device=block_file,\n timeout=timeout, executable=PROJECT_TOP + '/software/kernel/kernel.hex')", "def _initialize_kernel(input_dim: int,\n kernel: str = 'RBF',\n use_single_gp: bool = False) -> GenericKernel:\n if kernel == 'RBF':\n return RBFKernel(input_dim, use_single_gp)\n elif kernel == 'Matern52':\n return Matern52Kernel(input_dim, use_single_gp)\n elif kernel == 'Matern32':\n return Matern32Kernel(input_dim, use_single_gp)\n elif kernel == 'RationalQuadratic':\n return RationalQuadraticKernel(\n input_dim=input_dim, use_single_gp=use_single_gp)\n elif kernel == 'Sigmoid':\n return SigmoidKernel(input_dim, use_single_gp)\n else:\n sys.exit(\"Error: specified Gaussian Process kernel not valid\")", "def create_tag_id():\n return uuid.uuid1().int", "def define_kernel(self, *args, **kwargs):\n base_kernel = self.base_kernel\n if base_kernel is None:\n base_kernel = 'VertexHistogram'\n k = getattr(kernels, self.kernel_name)\n k_base = getattr(kernels, base_kernel)\n kernel = k(base_graph_kernel=k_base, *args, **kwargs)\n return kernel" ]
[ "0.6998178", "0.6430162", "0.59596896", "0.59022087", "0.58619636", "0.58563536", "0.5824651", "0.5798457", "0.5699474", "0.56958765", "0.5624578", "0.5619856", "0.560809", "0.5569131", "0.5542638", "0.5518145", "0.5472115", "0.5447868", "0.5417481", "0.54118687", "0.5337183", "0.5333342", "0.5293815", "0.5230644", "0.5209176", "0.5197264", "0.5192696", "0.51854235", "0.5174386", "0.5173899" ]
0.7782273
0