query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Reads a file until it finds a 'header finalization' line. By standard, such line is '\n' (five )
def skip_file_header(file_pointer, header_end="-----\n"): # Reads file line once line = file_pointer.readline() while line: # Reads until eof # Checks if the line is a header ender if line == header_end: return line = file_pointer.readline() # If EOF is reached without finding the header end, an error is raised. raise EOFError("Hey, I did not find the header ending string on file:\n" "File: '{}'\n" "Ending str:'{}'\n".format(file_pointer.name, header_end))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def skip_gutenberg_header(fp):\n for line in fp:\n if line.startswith('*END*THE SMALL PRINT!'):\n break", "def skip_gutenberg_header(fp):\n for line in fp:\n if line.startswith(''):\n break", "def skip_gutenberg_header(self, fp):\n for line in fp:\n if line.startswith('*END*THE SMALL PRINT!'):\n break", "def _read_until_end_of_comments(self, fileobj):\n offset = fileobj.tell()\n line = fileobj.readline()\n if not line:\n raise EOFError(\"Read until EOF\")\n\n line = line.strip()\n if line.startswith(\"#\"):\n return self._read_until_end_of_comments(fileobj)\n\n fileobj.seek(offset)", "def skip_gutenberg_header(fp):\n for line in fp:\n if line.startswith('*** START OF THIS PROJECT'):\n break", "def _header_line_generator(file_name):\n with FileSystems.open(file_name) as f:\n record = None\n while True:\n record = f.readline().decode('utf-8')\n while record and not record.strip(): # Skip empty lines.\n record = f.readline().decode('utf-8')\n if record and record.startswith('#'):\n yield record\n else:\n break\n yield record", "def check_file_header(fnames, nlines=5):\n from itertools import islice\n for fname in fnames:\n print(f\"\\nPrinting header from {fname} \\n#########################################\")\n with open(fname) as f:\n head = list(islice(f, nlines))\n for line in head:\n print(line)", "def _readheaderlines(f):\n hdrlines = []\n for i in range(0,26):\n hdrlines.append(f.readline())\n return hdrlines", "def read_header(datafile):\n\thead = []\n\tf = open(datafile,'r')\n\tfor i,line in enumerate(f):\n\t\tif i is 10: break\n\t\thead += [line]\n\tf.close()\n\treturn head", "def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header", "def header_len(fname,header_char='#'):\n Nheader = 0\n with open(fname) as f:\n for i, l in enumerate(f):\n if ( (l[0:len(header_char)]==header_char) or (l==\"\\n\") ):\n Nheader += 1\n else:\n break\n\n return Nheader", "def _read_file_definition(self):\n row_count = 0\n #\n # THIS METHOD ASSUMES A 14 ROW HEADER\n # If the number of header row lines in the glider ASCII input file changes from 14,\n # this method will NOT WORK\n num_hdr_lines = 14\n\n header_pattern = r'(.*): (.*)$'\n header_re = re.compile(header_pattern)\n\n line = self._stream_handle.readline()\n\n while line and row_count < num_hdr_lines:\n\n match = header_re.match(line)\n\n if match:\n key = match.group(1)\n value = match.group(2)\n value = value.strip()\n\n # update num_hdr_lines based on the header info.\n if key == 'num_ascii_tags':\n # this key has a required value of 14, otherwise we don't know how to parse the file\n if int(value) != num_hdr_lines:\n raise DatasetParserException(\"Header must be %d rows, but it is %s\" % (num_hdr_lines, value))\n\n elif key == 'num_label_lines':\n # this key has a required value of 3, otherwise we don't know how to parse the file\n if int(value) != 3:\n raise DatasetParserException(\"There must be 3 Label lines from the header for this parser\")\n\n elif key == 'sensors_per_cycle':\n # save for future use\n self._header_dict[key] = int(value)\n\n elif key in ['filename_label', 'mission_name', 'fileopen_time']:\n # create a dictionary of these 3 key/value pairs strings from\n # the header rows that need to be saved for future use\n self._header_dict[key] = value\n\n else:\n log.warn(\"Failed to parse header row: %s.\", line)\n\n row_count += 1\n # only read the header lines in this method so make sure we stop\n if row_count < num_hdr_lines:\n line = self._stream_handle.readline()\n\n if row_count < num_hdr_lines:\n log.error('Not enough data lines for a full header')\n raise DatasetParserException('Not enough data lines for a full header')", "def get_header(fname, Nrows_header_total=None):\n\n if Nrows_header_total==None:\n Nrows_header_total = header_len(fname)\n\n output = []\n with open(fname) as f:\n for i in range(Nrows_header_total):\n line = f.readline().strip()\n output.append(line)\n\n return output", "def get_header(file):\n buffer=''\n for line in open(file).readlines():\n if line[0]=='#': buffer=buffer+line\n else: break\n return buffer", "def get_next_hundered_lines(file):\n count = 0\n result = []\n while count < 100:\n count += 1\n next_line = file.readline()\n if next_line != \"\":\n result.append(next_line)\n else:\n break\n return result", "def read_data_file(input_file):\n header_lines = 0\n last_pound_pos = -1\n with open(input_file, 'r') as data_file:\n while (data_file.read(1) == '#'):\n last_pound_pos = data_file.tell()\n header = data_file.readline()\n header_lines += 1\n\n #Read the next lines\n data_1 = data_file.readline().split()\n data_2 = data_file.readline().split()\n data_file.seek(last_pound_pos + 1) #Goes back to the last line of the header\n\n if header_lines == 0:\n data = pd.read_csv(data_file, sep=\" \", header=None).dropna(axis=1, how='all')\n\n else:\n # Single line file\n if len(data_2) == 0:\n data_file.readline()\n\n else:\n\n if len(data_1) != len(\n data_2): #If there is a line containing the number of particles,\n data_file.readline()\n data_file.readline()\n\n try:\n data = pd.read_csv(data_file, sep=\" \", header=None).dropna(axis=1, how='all')\n data.columns = header.split()\n except:\n raise Exception(\"The input file '%s' is corrupted, usually the problem is because \"\\\n \"there is an end of a line that has an additional space\" %input_file)\n\n return data", "def _parse_header(self):\n # read the first bytes from the file\n header = self._stream_handle.read(HEADER_BYTES)\n match = HEADER_MATCHER.match(header)\n if not match:\n raise SampleException(\"File header does not match the header regex\")\n\n # update the state to show we have read the header\n self._increment_state(HEADER_BYTES)", "def read_hdr_file(self, rawfilename):\n\n # Get the filename without path or extension\n filename = os.path.basename(rawfilename)\n filesplit = os.path.splitext(filename)\n filebase = filesplit[0]\n dirname = os.path.dirname(rawfilename)\n\n # See if we can find the header file to use\n if os.path.isfile(os.path.join(dirname, filebase + '.hdr')):\n hdrfilename = os.path.join(dirname, filebase + '.hdr')\n elif os.path.isfile(os.path.join(dirname, filename + '.hdr')):\n hdrfilename = os.path.join(dirname, filename + '.hdr')\n else:\n raise IOError('Could not find coresponding header file')\n\n hdrfile = open(hdrfilename, 'r')\n output = collections.OrderedDict()\n inblock = False\n\n # Read line, split it on equals, strip whitespace from resulting strings\n # and add key/value pair to output\n for currentline in hdrfile:\n # ENVI headers accept blocks bracketed by curly braces - check for these\n if not inblock:\n # Split line on first equals sign\n if re.search('=', currentline) is not None:\n linesplit = re.split('=', currentline, 1)\n # Convert all values to lower case\n key = linesplit[0].strip().lower()\n value = linesplit[1].strip()\n\n # If value starts with an open brace, it's the start of a block\n # - strip the brace off and read the rest of the block\n if re.match('{', value) is not None:\n inblock = True\n value = re.sub('^{', '', value, 1)\n\n # If value ends with a close brace it's the end\n # of the block as well - strip the brace off\n if re.search('}$', value):\n inblock = False\n value = re.sub('}$', '', value, 1)\n value = value.strip()\n output[key] = value\n else:\n # If we're in a block, just read the line, strip whitespace\n # (and any closing brace ending the block) and add the whole thing\n value = currentline.strip()\n if re.search('}$', value):\n inblock = False\n value = re.sub('}$', '', value, 1)\n value = value.strip()\n output[key] = output[key] + value\n\n hdrfile.close()\n\n return output", "def _header_transformer(self, lines):\n needle = b'--%s\\n' % self.boundary\n in_header = False\n for line in lines:\n if line == needle:\n in_header = True\n if in_header:\n assert line[-1] == b'\\n'\n line = line[:-1] + b'\\r\\n'\n if line == b'\\r\\n':\n in_header = False\n yield line", "def read_header(fname):\n with gzip.open(fname, 'rt') as f:\n content = f.readline().split()\n return content[:-1], int(content[-1])", "def get_header(file):\n with open(file, 'r') as f:\n return f.readline()", "def file_fzp_start(filename):\n\n with open(filename) as in_f:\n c= 0\n cols = []\n #find start of VISSIM data\n line = in_f.readline()\n while 'VehNr;' not in line:\n line = in_f.readline()\n cols = [x.strip() for x in line.split(';')][:-1]\n c +=1\n\n return {'lines_to_skip' : c, 'header_cols' : cols}", "def process_file(filename, skip_header):\n hist = {}\n fp = open(filename)\n\n if skip_header:\n skip_gutenberg_header(fp)\n\n for line in fp:\n if line.startswith(''):\n break\n\n process_line(line, hist)\n\n return hist", "def get_next_line(fin):\n line = fin.readline()\n\n pos = line.find(\"#\")\n\n while (pos == 0 or line.strip() == \"\") and line:\n line = fin.readline()\n pos = line.find(\"#\")\n\n if pos == -1:\n return line.strip()\n return line[:pos]", "def _read_until_line_startswith(self, fileobj, prefix):\n line = fileobj.readline()\n if not line:\n raise EOFError(\"Read until EOF, no line with prefix {0}\".format(prefix))\n\n line = line.strip()\n if line.startswith(prefix):\n return line\n\n return self._read_until_line_startswith(fileobj, prefix)", "def header_len(self):\n if self.num_lines_header is None:\n Nheader = 0\n with self._compression_safe_file_opener(self.input_fname, \"r\") as f:\n for i, l in enumerate(f):\n if (l[0 : len(self.header_char)] == self.header_char) or (\n l == \"\\n\"\n ):\n Nheader += 1\n else:\n break\n\n return Nheader\n else:\n return self.num_lines_header", "def head(filename, lines=5):\n from itertools import islice\n with open(filename, \"r\") as f:\n return list(islice(f, lines))", "def read_header(file_path):\n with open(file_path, 'r') as f:\n header = f.readline()\n return header.strip()", "def read_lines(filename=\"\", nb_lines=0):\n with open(filename, 'r', encoding='utf8') as f:\n if nb_lines <= 0:\n print(f.read(), end=\"\")\n else:\n for line in f:\n if nb_lines == 0:\n break\n print(line, end=\"\")\n nb_lines -= 1", "def test_is_fasta_header(self):\r\n\r\n is_fasta_header = False\r\n\r\n with open(full_file_name, \"r\") as in_file:\r\n for line in in_file:\r\n is_fasta_header = mfau.is_header_line(line)\r\n\r\n # only testing the first line\r\n break\r\n\r\n self.assertEqual(is_fasta_header, True)" ]
[ "0.6732128", "0.6703168", "0.65750206", "0.6532136", "0.64251715", "0.6425103", "0.64053565", "0.6342697", "0.61418253", "0.6105702", "0.59541345", "0.5769603", "0.5761424", "0.5737028", "0.5736275", "0.57220525", "0.57138824", "0.5702146", "0.56965077", "0.56569594", "0.56490856", "0.56395715", "0.56219906", "0.5609777", "0.55635124", "0.5554045", "0.55419517", "0.55312103", "0.5512191", "0.55107594" ]
0.7243702
0
Tries to read the output file from argv[argn]. If argv[argn] does not exist, the output file is set to sdt_name, whose standard value is 'output.txt'.
def get_output_file_name(argn=2, std_name='output.txt'): try: name = sys.argv[argn] except IndexError: name = std_name print("Warning: no output file name received. Output will be" " written to '%s'." % name) return name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_output_file():\n if len(sys.argv) < 4:\n return -1\n return sys.argv[3]", "def output_file_name_maker(args):\n log.debug(\"Entering output_file_name_maker()\")\n path = os.getcwd() + '/out_files/'\n if not os.path.isdir(path):\n os.mkdir(path)\n\n if args.output is None:\n out_file_name = path + args.input[:-4] + '_' + args.type + '_' + args.layer\n else:\n out_file_name = path + args.output\n\n log.debug(\"Exiting output_file_name_maker()\")\n return out_file_name", "def get_output_file(run, lens_chunk, source_tilename):\n d=get_output_dir(run, lens_chunk)\n fname=\"%(run)s-lens-%(lens_chunk)06d-src-%(source_tilename)s.dat\"\n fname=fname % {'run':run,\n 'lens_chunk':lens_chunk,\n 'source_tilename':source_tilename}\n\n return os.path.join(d, fname)", "def GetOutputFilename(fname):\n return os.path.join(outdir, fname)", "def initialize_output(fn, output_dir, station_id, dataset):\n \n source_file =fn.split('/')[-1]\n output_file = output_dir + '/' + station_id + '_' + dataset + '_harvested_' + source_file + '.nc' # creating an output file name e.g. chera5.conv._10393.nc , try 01009 faster\n return output_file , source_file", "def get_input_file():\n if len(sys.argv) < 3:\n return -1\n return sys.argv[2]", "def get_output_raw_name(journal_file_name, output_type='txt'):\n dot_pos = journal_file_name.rfind('.')\n if dot_pos != -1:\n output_file_name = journal_file_name[0: dot_pos]\n else:\n output_file_name = journal_file_name\n num_of_output = 1\n if output_type == 'txt':\n while True:\n output_file = '%s_%d.txt'%(output_file_name,num_of_output)\n if not os.path.exists(output_file):\n break\n else:\n num_of_output += 1\n else:\n output_file = '%s.%s'%(output_file_name,output_type)\n return output_file", "def AssignOutputFilename(self, output_file):\n if output_file is None:\n gmt = time.gmtime()\n ts = calendar.timegm(gmt)\n return f\"{self.input_file}.{ts}\"\n return output_file", "def parse_args(args):\n\tif len(args) == 3:\n\t\toutput_file_name = args[2]\n\telse:\n\t\toutput_file_name = \"FairAndSquare.out\"\n\t\n\ttry:\n\t\tinput_file = open(args[1], 'r')\n\texcept:\n\t\tprint \"Cannot open file {}\".format(args[1])\n\t\texit(1)\n\t\n\ttry:\n\t\toutput_file = open(output_file_name, 'w')\n\texcept:\n\t\tinput_file.close()\n\t\tprint \"Cannot open file {}\".format(output_file_name)\n\t\texit(1)\n\t\n\treturn input_file, output_file", "def open_outfile(outfilename, force):\n if outfilename == \"-\":\n return sys.stdout\n else:\n if not force and os.path.exists(outfilename):\n sys.stderr.write(\"Output text file, '%s', exists\\n\" % outfilename)\n if (input(\"Overwrite? \")).lower not in (\"y\", \"yes\"):\n return None\n return open(outfilename, \"w\")", "def output_file_from_option(option, open_args):\n if option.output == \":stdout\":\n return sys.stdout\n elif option.output == \":stderr\":\n return sys.stderr\n else:\n # TODO: try to do something intellegent here to avoid/warn when overwriting files?\n return open(option.output, open_args)", "def _build_output_file(self, output):\n\t\tif output is None:\n\t\t\tself.output_file_name = \"index.html\"\n\t\telse:\n\t\t\tself.output_file_name = output", "def call_files():\n try:\n predicted_proteins = sys.argv[1]\n except IndexError:\n predicted_proteins = input('Please input AUGUSTUS file for analysis: ')\n try:\n protein_db = sys.argv[2]\n except IndexError:\n protein_db = input('Please input a protein database file: ')\n\n try:\n output_file_aug_to_fasta = sys.argv[3]\n output_to_file = True\n except IndexError:\n output_to_file = input('Write output to file?'\n + ' [Yes/No]: ')\n if output_to_file.upper() in 'YES':\n output_to_file = True\n output_file_aug_to_fasta = input('Please supply output file name '\n + 'for AUGUSTUS conversion to '\n + 'FASTA: ')\n else:\n output_to_file = False\n output_file_aug_to_fasta = None\n\n try:\n output_file_proteins_to_db = sys.argv[4]\n except IndexError:\n if output_to_file:\n output_file_proteins_to_db = input('Please supply output file name'\n + 'for blast database: ')\n else:\n output_file_proteins_to_db = None\n\n try:\n blastp_output = sys.argv[5]\n except IndexError:\n if output_to_file:\n blastp_output = input('Please supply output file name for blastp: ')\n else:\n blastp_output = None\n\n finally:\n if len(sys.argv) >= 7:\n overwrite = sys.argv[6]\n elif output_file and os.path.exists(output_file):\n overwrite = input('Output file already exists. Overwrite? '\n + '[Yes/No]: ')\n if overwrite.upper() in 'YES':\n overwrite = True\n else:\n overwrite = False\n else: overwrite = False\n\n return (predicted_proteins, protein_db, output_file_aug_to_fasta, \n output_file_proteins_to_db, blastp_output, \n output_to_file, overwrite)", "def load_job_output(output_title, output_summary, output):\n def read_if_file(val):\n if os.path.exists(val):\n logger.info(\"Reading file: %s\", val)\n with open(val, \"r\") as inf:\n return inf.read()\n else:\n return val\n\n if output_title:\n assert output_summary\n return checks.Output(\n title = output_title,\n summary = read_if_file(output_summary),\n text = read_if_file(output) if output else None\n )\n else:\n return None", "def get_output_file(path):\n root, _ = os.path.splitext(path)\n return os.path.basename(root) + get_task_number() + \".txt\"", "def _write_output_file(output: str, file_name: str):\n\tfile1 = open(file_name, 'w')\n\tfile1.write(output)\n\tfile1.close()", "def _get_output_filename(dataset_dir, split_name):\n return '%s/fer_%s.tfrecord' % (dataset_dir, split_name)", "def read_inputs(argn=1):\n if len(sys.argv) < argn+1:\n raise IOError(\"Hey, no input file was passed as argument to\"\n \" the program!!\")\n if not os.path.exists(sys.argv[argn]):\n raise FileNotFoundError(\"Input file '{}' not found.\".\n format(sys.argv[argn]))\n return read_config_file(sys.argv[argn], attribution_char='=')", "def _get_output_filename(dataset_dir, split_name):\n return '%s/%s*.tfrecord' % (dataset_dir, split_name)", "def open_output(name=None):\n return Output(name)", "def try_process_output_file(ins_file, output_file=None):\n if output_file is None:\n output_file = ins_file.replace(\".ins\", \"\")\n df = None\n i = InstructionFile(ins_file)\n try:\n df = i.read_output_file(output_file)\n except Exception as e:\n print(\"error processing instruction/output file pair: {0}\".format(str(e)))\n return df", "def main():\n outfile = 'result.txt'\n\n if os.path.exists(outfile):\n os.remove(outfile)\n\n for arg in sys.argv[1:]:\n get_info(arg, outfile)", "def output_file_name(self) -> typing.Optional[str]:\n return self._values.get(\"output_file_name\")", "def output_file_name(self) -> typing.Optional[str]:\n return self._values.get(\"output_file_name\")", "def output_file_name(self) -> typing.Optional[str]:\n return self._values.get(\"output_file_name\")", "def output_file_name(self) -> typing.Optional[str]:\n return self._values.get(\"output_file_name\")", "def init_datafile(output_settings, instrument):\r\n # find the current directory\r\n target_dir = '/'.join([output_settings['folder'], output_settings['cruise'], 'Data/Acrobat/RAW',instrument['name']])\r\n print_spacer()\r\n \r\n # make a new file\r\n print 'Creating new file....'\r\n # find the current time and write as string\r\n current_time = time.strftime(\"%Y-%m-%dT%H%MZ\", time.gmtime(time.time()))\r\n # give it a name\r\n filestr = current_time+'_'+output_settings['cruise']+instrument['name']+'RAW.dat' # concatenates strings using \r\n # print target_dir+'/'+filestr\r\n # now set the target \r\n targetstr = '/'.join([target_dir,filestr]) # concatencates strings using join\r\n print targetstr\r\n # open the text file\r\n fs = open(targetstr, 'w')\r\n return fs # return the file id\r", "def set_default_output_settings(args):\n # TODO: shouldn't be setting args.X here as a side effect!\n stem_name, _ = os.path.splitext(os.path.basename(args.input))\n input_dir = helpr.get_directory(args.input)\n # Set default output format if there is an output filename specified\n if args.output:\n args.output = helpr.cleanup_filepath(args.output)\n if not args.outputFormat:\n args.outputFormat = os.path.splitext(args.output)[1][1:]\n log.info(\"You didn't specify an output format, \"\n \"assuming from output filename that it is %s\", args.outputFormat)\n # Set default output filename if not already done\n else:\n # Hmm default hidden here, not good\n if not args.outputFormat:\n args.outputFormat = printer_opts_checked[args.printer].default_output_fmt\n log.info(\"You didn't specify an output format, defaulted to %s\", args.outputFormat)\n filename = \"\".join([stem_name, \"_\", str(args.eventNumber), \".\", args.outputFormat])\n args.output = os.path.join(input_dir, filename)\n log.info(\"You didn't specify an output filename, setting it to %s\", args.output)", "def wsRenameOutput(self, nj):\n\n txt = '\\n#Written by cms_cmssw::wsRenameOutput\\n'\n txt += 'echo \">>> current directory $PWD\" \\n'\n txt += 'echo \">>> (SOFTWARE_DIR): $SOFTWARE_DIR\" \\n'\n txt += 'echo \">>> (WORKING_DIR): $WORKING_DIR\" \\n'\n txt += 'echo \">>> current directory content:\"\\n'\n #if self.debug_wrapper==1:\n txt += 'ls -Al\\n'\n txt += '\\n'\n\n for fileWithSuffix in (self.output_file):\n output_file_num = numberFile(fileWithSuffix, '$OutUniqueID')\n txt += '\\n'\n txt += '# check output file\\n'\n txt += 'if [ -e ./'+fileWithSuffix+' ] ; then\\n'\n if (self.copy_data == 1): # For OSG nodes, file is in $WORKING_DIR, should not be moved to $RUNTIME_AREA\n txt += ' mv '+fileWithSuffix+' '+output_file_num+'\\n'\n txt += ' ln -s `pwd`/'+output_file_num+' $RUNTIME_AREA/'+fileWithSuffix+'\\n'\n else:\n txt += ' mv '+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\\n'\n txt += ' ln -s $RUNTIME_AREA/'+output_file_num+' $RUNTIME_AREA/'+fileWithSuffix+'\\n'\n txt += 'else\\n'\n txt += ' job_exit_code=60302\\n'\n txt += ' echo \"WARNING: Output file '+fileWithSuffix+' not found\"\\n'\n if common.scheduler.name().upper() == 'CONDOR_G':\n txt += ' if [ $middleware == OSG ]; then \\n'\n txt += ' echo \"prepare dummy output file\"\\n'\n txt += ' echo \"Processing of job output failed\" > $RUNTIME_AREA/'+output_file_num+'\\n'\n txt += ' fi \\n'\n txt += 'fi\\n'\n file_list = []\n for fileWithSuffix in (self.output_file):\n file_list.append(numberFile('$SOFTWARE_DIR/'+fileWithSuffix, '$OutUniqueID'))\n\n txt += 'file_list=\"'+string.join(file_list,',')+'\"\\n'\n txt += '\\n'\n txt += 'echo \">>> current directory $PWD\" \\n'\n txt += 'echo \">>> (SOFTWARE_DIR): $SOFTWARE_DIR\" \\n'\n txt += 'echo \">>> (WORKING_DIR): $WORKING_DIR\" \\n'\n txt += 'echo \">>> current directory content:\"\\n'\n #if self.debug_wrapper==1:\n txt += 'ls -Al\\n'\n txt += '\\n'\n txt += 'cd $RUNTIME_AREA\\n'\n txt += 'echo \">>> current directory (RUNTIME_AREA): $RUNTIME_AREA\"\\n'\n return txt", "def get_output_file_name(run_parameters, dir_name_key, prefix_string, suffix_string='', type_suffix='tsv'):\n output_file_name = os.path.join(run_parameters[dir_name_key], prefix_string + '_' +\n run_parameters['method'] + '_' + run_parameters[\"correlation_measure\"])\n\n output_file_name = kn.create_timestamped_filename(output_file_name) + '_' + suffix_string + '.' + type_suffix\n return output_file_name" ]
[ "0.68974876", "0.64077896", "0.6069774", "0.6004914", "0.59959024", "0.5963026", "0.58799326", "0.583073", "0.5725154", "0.56617206", "0.5534797", "0.55120677", "0.5463668", "0.5450547", "0.5433109", "0.5391978", "0.5383544", "0.5372698", "0.53699344", "0.5338002", "0.53314203", "0.53244805", "0.5317063", "0.5317063", "0.5317063", "0.5317063", "0.5284572", "0.5279807", "0.52756935", "0.5239931" ]
0.78180575
0
Reads a folder path from argv (argv[2] by standard). Adds the separator character (/, \\) if it was forgotten. Checks if the folder exists, and creates it otherwise. If the corresponding position in argv is not informed, asks for the user the path of the folder, starting from a given root folder.
def get_output_folder_name(argi=2, root_folder=""): # First tries to read the output folder name from argv[2] try: output_folder = sys.argv[argi] except IndexError: # If argv[argi] was not passed, asks the user for the output folder. output_folder = root_folder output_folder += input("Output folder path was not informed. Please inform:\n" "{}".format(root_folder)) # Adds the SEP (/ or \\) character to the end of the folder name. if output_folder[-len(SEP):] != SEP: output_folder += SEP # Checks if the folder does not exist. Creates it, in this case. if not os.path.exists(output_folder): os.system("mkdir -p '{}'".format(output_folder)) return output_folder
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_folder():\n return input(\"Folder: \")", "def ensure_folder(*arg):\n if len(arg) == 0:\n raise Exception(\"No input to ensure_folder\")\n path = get_dir(Path(*arg))\n path.mkdir(parents=True, exist_ok=True)", "def prep_folder(args):\n if(args.save_folder[-1]!='/'):\n args.save_folder += '/'\n if(not os.path.isdir(args.save_folder)):\n os.mkdir(args.save_folder)", "def _create_folder(self, unsupported_file: File) -> str:\n if not self.possibilities:\n print(\n f\"----\\nNo folders found in directory. Please enter directory name for \"\n f\"{unsupported_file} file:\\n\"\n )\n else:\n print(\"Please enter directory name:\")\n\n while True:\n folder_name = input()\n checker = [True if char.isalnum() else False for char in folder_name]\n if False not in checker and folder_name not in self.possibilities.keys():\n os.makedirs(folder_name)\n temp_folder = Folder(folder_name)\n self.folders.append(temp_folder)\n if unsupported_file.get_extension():\n temp_folder.files.append(PlaceHolderFile(unsupported_file.name))\n return folder_name\n else:\n print(\"Invalid input\")", "def folder(ctx,path):\n data = config.readData()\n fav=data.get('favorites',{})\n\n if path is None:\n displayMenu(data)\n path = click.prompt('Choose folder',default=\"\",show_default=False)\n if not path: ctx.abort()\n \n if path not in fav:\n click.echo(td(text=f\"'<r>{path}</r>' isn't in your favorites.\"))\n ctx.exit()\n\n if os.path.isdir(fav[path]):\n sub.run(f\"cd '{fav[path]}'; exec {data.get('shell','zsh')} \",shell=True,)\n else:\n click.echo(td(text=f\"Folder '<y>{fav[path]}</y>' doesn't exist.\"))", "def ask_folder(message=\"Select folder.\", title=None):\n return dialog(\"ask_folder\", message=message, title=title)", "def get_outfolder():\n \n valid = False\n while not valid:\n fname = raw_input(\"Please enter directory to save images. \")\n if not os.path.exists(fname):\n os.makedirs(fname)\n #Check to see if the file is there.\n if os.path.exists(fname): \n valid = True\n #File is not there, check to see if write privileges can be given\n #to created file.\n elif os.access(os.path.dirname(fname), os.W_OK):\n valid = True\n else:\n print \"Invalid local path, please try again.\"\n return fname", "def createFolder(self):\n folderName, ok = QtWidgets.QInputDialog.getText(self, 'Folder Name', 'Enter the folder name :',\n QtWidgets.QLineEdit.Normal)\n\n if ok:\n parent = self.fileDir\n currentPath = self.dataDir\n if self.fileDir.selectedItems():\n parent = self.fileDir.selectedItems()[-1]\n currentPath = str(parent.toolTip(0))\n\n if not os.path.isdir('%s/%s' % (currentPath, str(folderName))):\n item = QtWidgets.QTreeWidgetItem(parent)\n\n item.setText(0, str(folderName))\n item.setToolTip(0, '%s/%s' % (currentPath, str(folderName)))\n\n # connect icon\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap('%s/folder.png' % (self.iconsDir)), QtGui.QIcon.Normal,\n QtGui.QIcon.Off)\n item.setIcon(0, icon)\n\n # be careful about shiboken2, you can use 'is' and 'is not' instead of using operator '==' and '!='\n if parent is not self.fileDir:\n self.fileDir.setItemExpanded(parent, True)\n self.fileDir.setItemSelected(parent, False)\n\n self.fileDir.setItemSelected(item, True)\n\n os.makedirs('%s/%s' % (currentPath, str(folderName)))", "def folder_str(f):\n if not os.path.exists(f):\n raise argparse.ArgumentTypeError('\"%s\" does not exist, you must create this folder' % f)\n return f", "def create_folder(folder_path: List[str]) -> str:\n drive = _drive_gen()\n return _create_or_find_folder(folder_path, drive)", "def main(\n path: str,\n folder: str,\n head: bool,\n separator: str,\n remove_duplicates: bool,\n remove_non_folders: bool,\n remove_non_abs: bool,\n) -> None:\n new_path = common.add(\n path=path,\n folder=folder,\n head=head,\n separator=separator,\n remove_duplicates=remove_duplicates,\n remove_non_folders=remove_non_folders,\n remove_non_abs=remove_non_abs,\n )\n print(new_path)", "def get_path():\n\n path = input(\"Directory path: \")\n if os.path.isdir(path):\n return path\n else:\n raise(ValueError)", "def prepare_folder(path):\n if not os.path.isdir(path):\n os.makedirs(path)", "def GetInputPath(self):\n self.inputDir = raw_input(\"Where should files be read from? This can be a file or a folder of files\\n\\r>>> \")\n if os.path.isabs(self.inputDir):\n if os.path.isdir(self.inputDir):\n self.isFolder = True\n self.inputDirs = os.listdir(self.inputDir)\n elif os.path.isfile(self.inputDir):\n self.isFolder = False\n self.inputDirs = [self.inputDir]\n else:\n print \"That path does not exist. Try again\"\n self.GetInputPath()\n else:\n print \"that was not an excepted path name. Try again.\"\n self.GetInputPath()", "def checkfolder(paths):\n\tpaths = paths if isinstance(paths, list) else [paths]\n\t\n\tdef creat_dir(x):\n\t\tx = Path(x)\n\t\tif x.is_dir():\n\t\t\tprint(f\"Dir {x} already exists\")\n\t\telse:\n\t\t\tPath.mkdir(x)\n\t\t\tprint(f\"Created new dir {x}\")\n\t\n\tlist(map(creat_dir, paths))", "def format_folder_path(folder_path):\n if folder_path[-1] != '/':\n folder_path += '/'\n\n return folder_path", "def entry_set_folder(self, entry):\r\n global folder_name\r\n folder_name = filedialog.askdirectory()\r\n entry.delete(0, 'end')\r\n entry.insert(tk.END, folder_name)", "def test_create_folder(self):\n test = Server()\n inputs = [['create_folder','oook'],['create_folder','oook']]\n response = ['folder created','Folder already exists. Try with another folder name']\n res = []\n for val in inputs:\n res.append(test.create_folder(val))\n self.assertListEqual(res, response)", "def initializeFolder(savePath,name,bOverWrite):\n if savePath==None:\n savePath=os.path.join('.','results')\n path=os.path.join(savePath,name)\n path_copy=deepcopy(path)\n if not bOverWrite:\n if os.path.exists(path_copy):\n fi=2\n poss_path=os.path.join(path_copy,'Run (%i)'%fi)\n while os.path.exists(poss_path):\n fi+=1\n poss_path=os.path.join(path_copy,'Run (%i)'%fi)\n path=poss_path\n \n os.makedirs(path)\n \n bexisted=True\n if not os.path.exists(path):\n os.makedirs(path)\n bexisted=False\n \n if path[-1]!=os.path.sep:\n path+=os.path.sep\n \n print \"Results will be saved in %s folder '%s'\"%('existing' if bexisted else 'new',path) \n return path", "def createFolders(self, *args):\n for folder in args:\n mkdir(folder)", "def createFolders(self, *args):\n for folder in args:\n mkdir(folder)", "def setup_part1(args):\n args.rootarg = args.root\n rootext = os.path.splitext(args.rootarg)[1]\n if rootext == '':\n pass\n else:\n args.root = os.path.dirname(args.root)\n\n if args.root:\n args.root = os.path.abspath(args.root)\n if not os.path.isdir(args.root):\n if args.gallery:\n os.mkdir(args.root)\n else:\n error('Directory not found', args.root)", "def userSpecify():\n valid = False\n while valid != True:\n userPath = raw_input(\"\\nPlease specify directory path or press Enter key for the current directory: \").strip()\n if userPath == \"\":\n path = \".\"\n else:\n path = userPath\n\n if os.path.exists(path):\n print(\"Path has been validated\")\n valid = True\n else:\n print(\"Invalid File Path, File Doesn't Exist! Please try again.\")\n continue\n return path", "def get_path():\n\n output_path = None\n while output_path is None:\n print question + \"Please enter the directory where you would like the file saved?\"\n output_path = raw_input()\n if os.path.isdir(os.path.expanduser(output_path)):\n pass\n else:\n os.system('clear')\n print warn + \"%s is not valid, please try again: \" % str(output_path)\n output_path = None\n return os.path.expanduser(output_path)", "def create_folder(self, unformatted_path):\n os.makedirs(self.format_path(unformatted_path), exist_ok=True)", "def create_folder(client, parent_folder_id, folder_name):\n\n try:\n subfolder = client.folder(parent_folder_id).create_subfolder(folder_name)\n print(f'Created subfolder with ID {subfolder.id}')\n\n except Exception as e:\n print(f\"An error occurred: {e}\")", "def create_folder(path_folder, name_subfolder=None):\n if not name_subfolder:\n if not os.path.exists(path_folder):\n os.makedirs(path_folder)\n else:\n path_result_subolder = os.path.join(path_folder, name_subfolder)\n if not os.path.exists(path_result_subolder):\n os.makedirs(path_result_subolder)", "def ask_path(folder_flag=True, multiple_files_flag=False):\n # This method is almost never used, so the required imports are locally called\n import tkinter as tk\n from tkinter import filedialog\n\n root = tk.Tk()\n root.withdraw()\n path = os.getcwd()\n if folder_flag: # Open folder\n path = filedialog.askdirectory(parent=root, initialdir=path, title='Please select directory')\n else: # Open file\n if multiple_files_flag:\n path = filedialog.askopenfilenames(parent=root, initialdir=path, title='Please select data files')\n path = root.tk.splitlist(path)\n else:\n path = filedialog.askopenfilename(parent=root, initialdir=path, title='Please select data file')\n root.destroy()\n return path", "def select_folder(self):\r\n\r\n root = Tkinter.Tk()\r\n root.withdraw()\r\n diroption = {}\r\n diroption['initialdir'] = '.'\r\n diroption['mustexist'] = False\r\n diroption['parent'] = root\r\n diroption['title'] = 'Select a directory to organize'\r\n return tkFileDialog.askdirectory(**diroption)\r\n root.destroy()", "def enter_path():\n\n path_input=raw_input(\"Enter path (format- /path/to/folder): \")\n subs= raw_input(\"Do you want to search subdirectories (y/n): \").lower()\n filenames=list()\n if subs == \"y\":\n for root, dirs, files in os.walk(path_input):\n files=filter(lambda x: x.endswith(\".txt\"), files)\n #only files with .txt extension considered\n filenames+=[join(root,name) for name in files]\n for item in dirs:\n if item.startswith(\".\"):\n #hidden folders removed from dirs list\n dirs.remove(item)\n\n elif subs == \"n\":\n filenames = filter(lambda x: x.endswith(\".txt\"), [os.path.join(path_input,file_add) \\\n for file_add in next(os.walk(path_input))[2]])\n\n else:\n raise ValueError\n\n return filenames" ]
[ "0.63502616", "0.62756735", "0.6241278", "0.5936637", "0.5681377", "0.55883825", "0.5549787", "0.5519103", "0.5444538", "0.54012996", "0.53955", "0.53797764", "0.537001", "0.53520656", "0.53201383", "0.5316586", "0.5289567", "0.5267681", "0.5265646", "0.5260384", "0.5260384", "0.52422786", "0.52277446", "0.52184075", "0.5192227", "0.51858586", "0.5177045", "0.5152065", "0.5143433", "0.5139293" ]
0.6402549
0
Reads multiple strings separated by commas and removes border spaces.
def read_csv_names(string): return [remove_border_spaces(name) for name in string.split(',')]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_strip_strings_array(strings):\n string_array = strings.strip()\n string_array = string_array.split(',')\n result = []\n for string in string_array:\n string = string.strip()\n if string:\n result.append(string)\n return result", "def _clean_string(self, string):\n if string is None:\n return []\n str_list = string.strip().split(\",\")\n return [each.strip() for each in str_list]", "def clean_rows(reader):\n return [[a.strip() for a in row] for row in reader if row]", "def test_lstrip_whitespace(parallel, read_basic):\n text = \"\"\"\n 1, 2, \\t3\n A,\\t\\t B, C\n a, b, c\n \\n\"\"\"\n\n table = read_basic(text, delimiter=\",\", parallel=parallel)\n expected = Table([[\"A\", \"a\"], [\"B\", \"b\"], [\"C\", \"c\"]], names=(\"1\", \"2\", \"3\"))\n assert_table_equal(table, expected)", "def test_rstrip_whitespace(parallel, read_basic):\n text = \" 1 ,2 \\t,3 \\nA\\t,B ,C\\t \\t \\n \\ta ,b , c \\n\"\n table = read_basic(text, delimiter=\",\", parallel=parallel)\n expected = Table([[\"A\", \"a\"], [\"B\", \"b\"], [\"C\", \"c\"]], names=(\"1\", \"2\", \"3\"))\n assert_table_equal(table, expected)", "def test_get_items_from_string() -> None:\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i, ,p\")\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i- -p\", separator=\"-\")\n assert [\"i\", \" \", \" p\"] == common_util.get_items_from_string(\"i, , p\", remove_blanks=False)\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i, , p\")\n assert [] == common_util.get_items_from_string(\"\")", "def stringInputToList(x):\n return list(filter(None, [y.strip() for y in x.split(',')]))", "def clean_commas(song_list: List[str]) -> List[str]:\n res = []\n for idx, line in enumerate(song_list):\n if line[-1] == ',':\n if idx + 1 >= len(song_list) or song_list[idx + 1] == '':\n line = line[:-1]\n res.append(line)\n return res", "def parse_normalized(line):\n return line.strip().split(',')", "def parse(arr_str):\n return arr_str.rstrip().replace(' ', '').split(',')[:-1]", "def remove_empty_lines(self, string_list):\r\n string_list2 = []\r\n for strn in string_list:\r\n if strn:\r\n line = strn.strip()\r\n if line == \"\":\r\n continue\r\n else:\r\n string_list2.append(line)\r\n return string_list2", "def split_by_comma(s):\n return s.strip().split(\",\")", "def values(line):\n return [v.strip() or None for v in text(line).split(',')]", "def split(self, text):\n\n return [x.strip() for x in text.split(\",\")]", "def _clean(matches):\n # type: (List[str]) -> None\n while True:\n try:\n matches.remove(\"\")\n except ValueError:\n break\n\n while True:\n try:\n matches.remove(\",\")\n except ValueError:\n return", "def process(lines):\n lines = list(map(_clean, lines))\n # lines = list(map(_split, lines))\n return lines", "def split_choices(choices_string):\n return [x.strip() for x in choices_string.split(\",\") if x.strip()]", "def _split_mesy_list(string):\n init_list = [i.strip() for i in string.split(',') if i]\n final_list = []\n for i in init_list:\n if i.isspace():\n continue\n andlist = i.split('and')\n amplist = i.split('&')\n if len(andlist) > 1:\n for j in andlist:\n if not j or j.isspace():\n continue\n final_list.append(j.strip())\n elif len(amplist) > 1:\n for j in amplist:\n if not j or j.isspace():\n continue\n final_list.append(j.strip())\n else:\n final_list.append(i.strip())\n final_list = [i.strip() for i in final_list if not i.isspace()]\n return [i for i in final_list if i]", "def test_reading_empty_strings_for_different_types(self):\n self.prepare()\n self.session.execute(\"\"\"\n CREATE TABLE test_many_empty_strings (\n a text,\n b text,\n c text,\n d text,\n o uuid,\n i1 bigint,\n i2 bigint,\n t text,\n i3 bigint,\n PRIMARY KEY ((a, b, c, d), o)\n )\"\"\")\n\n tempfile = self.get_temp_file()\n with open(tempfile.name, 'w') as f:\n f.write(',,,a1,645e7d3c-aef7-4e3c-b834-24b792cf2e55,,,,r1\\n')\n\n def _test(prepared_statements):\n logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))\n cmds = \"COPY ks.test_many_empty_strings FROM '{}' WITH NULL='-' AND PREPAREDSTATEMENTS = {}\"\\\n .format(tempfile.name, prepared_statements)\n self.run_cqlsh(cmds=cmds)\n\n out, err, _ = self.run_cqlsh(cmds=\"SELECT * FROM ks.test_many_empty_strings\")\n res = self.parse_cqlsh_query(out=out, num_cols=9)\n\n self.assertCsvResultEqual(tempfile.name, res, 'test_many_empty_strings')\n\n _test(True)\n _test(False)", "def discardBlanks (texts, scores):\n\tnew_texts = []\n\tnew_scores = []\n\tfor i,text in enumerate(texts):\n\t\tif text != '' or text != ' ':\n\t\t\tnew_texts.append(text)\n\t\t\tnew_scores.append(scores[i])\n\treturn new_texts,new_scores", "def clean_arrangement(arrangement):\n arrangement = [a.strip(\" \") for a in arrangement.split(\",\")]\n return arrangement", "def separate_comma(s):\n return s.split(',')", "def _strip_leading_comma(descr):\n if len(descr) > 0 and descr.strip()[0] == \",\":\n descr = descr.strip()[1:]\n return descr.strip()", "def _standardize_str(s):\n memory = []\n s = s.replace(\",\", \" \")\n res = \"\"\n for c in s:\n if c != \" \":\n res += c\n memory = []\n elif not memory:\n res += c\n memory.append(\" \")\n return res", "def get_str_arrays(self):\n return self._fin.readline().strip('\\n').strip(' ').split(' ')", "def test_read_line(self):\n\n expected_data = ['\\\"lu, jr\\\"','ming-yuan','\\\"DRUG,1\\\"',135.999,True,3]\n input_string = '001,\\\"LU, JR\\\",MING-YUAN,\\\"DRUG,1\\\",135.999\\n'\n data = read_line(input_string)\n self.assertEqual(expected_data[0],data[0])\n self.assertEqual(expected_data[1],data[1])\n self.assertEqual(expected_data[2],data[2])\n self.assertAlmostEqual(expected_data[3],data[3])\n self.assertEqual(expected_data[4],data[4])\n self.assertAlmostEqual(expected_data[5],data[5])\n\n #Check for odd numers of quotation marks\n input_string = '001,\\\"LU\\\",\\\"MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for missing fields\n input_string = '001,,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for corrupted fields\n input_string = '001x,LU,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,1ag5\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])", "def clean_lines(lines):\n _lines = []\n for l in lines:\n l = l.strip().rstrip()\n if len(l) > 0:\n _lines.append(l)\n return _lines", "def read_names(file_name):\n\twith open(file_name, 'r') as f:\n\t\tnames_raw = f.read()\n\tnames_raw = names_raw.split(',')\n\tnames_stripped = []\n\n\tfor name in names_raw:\n\t\tnames_stripped.append(name.strip('\\\"'))\n\tprint(names_stripped)\n\treturn names_stripped", "def _split_input_list(str_list):\r\n\r\n new_list = re.split(r'[\\n\\r\\s,]', str_list)\r\n new_list = [s.strip() for s in new_list]\r\n new_list = [s for s in new_list if s != '']\r\n\r\n return new_list", "def readStringList( Text, ItemSeparator = ';' ):\n ValuesList = []\n try:\n if Text.find(ItemSeparator) >= 0: \n ValuesList = Text.strip().split(ItemSeparator)\n except:\n pass\n return ValuesList" ]
[ "0.6063498", "0.59767", "0.5961129", "0.58939993", "0.5801716", "0.56955117", "0.56331944", "0.5605717", "0.5415791", "0.53999573", "0.53361046", "0.53183955", "0.5304728", "0.5300823", "0.52928203", "0.52311575", "0.522873", "0.5206089", "0.5194998", "0.5175699", "0.5159555", "0.51487625", "0.51428246", "0.51387495", "0.5130816", "0.5130376", "0.5101209", "0.5083616", "0.5072691", "0.50715953" ]
0.617661
0
create 2 1D gaussian Kernels by taking in same kernel size and sigma value that sigy = 3sigx, and form 2D kernel
def MVgaussian(size,mu1=0,mu2=0, sigma1=3,sigma2 = 1): kernel = np.zeros((size, size), dtype=np.float32) size = int(size) // 2 X = np.arange(-size,size+1) Y = np.arange(-size,size+1) for x in X: for y in Y: Gx = np.exp(-((x-mu1)**2)/(2*(sigma1**2))) Gy = np.exp(-((y-mu2)**2)/(2*(sigma2**2))) Gx = math.exp(-(math.pow(x-mu1,2))/(2*math.pow(sigma1,2))) Gy = math.exp(-(math.pow(y-mu2,2))/(2*math.pow(sigma2,2))) kernel[x+size,y+size] = Gx*Gy return kernel
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gaussian2d(filter_size=5, sig=1.0):\n ax = np.arange(-filter_size // 2 + 1., filter_size // 2 + 1.)\n xx, yy = np.meshgrid(ax, ax)\n kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))\n return kernel / np.sum(kernel)", "def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val", "def gaussian_kernel(size, sigma): \n \n kernel = np.zeros((size, size))\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n k = (size - 1) / 2\n sigma_sq = sigma ** 2\n pi_sigma = 1/(2 * np.pi * sigma_sq)\n for i in range(size):\n for j in range(size):\n kernel[i, j] = pi_sigma * np.exp(-0.5 * ((i-k)**2 + (j-k)**2) / (sigma_sq))\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return kernel", "def gaussian_kernel(size, sigma):\n\n kernel = np.zeros((size, size))\n\n ### YOUR CODE HERE\n k = (size-1)/2\n factor = 1/(2*np.pi*sigma**2)\n for i in range(size):\n for j in range(size):\n exponent = -((i-k)**2 +(j-k)**2)/(2*sigma**2)\n kernel[i,j] = factor*np.exp(exponent)\n ### END YOUR CODE\n\n return kernel", "def cal_gaussian_process(b, sigma2, X_train, y_train, X_test):\n n = X_train.shape[0]\n p = X_test.shape[0]\n\n K_n = np.array([[kernel(X_train[i], X_train[j], b) for i in range(n)] for j in range(n)])\n inv = np.linalg.inv(np.diag([sigma2] * n) + K_n)\n miu = np.zeros(p)\n Sigma = np.zeros(p)\n \n for j in range(p): # for every new point x0 in testing data.\n x0 = X_test[j]\n K_Dn = np.zeros(n) # initialize K_Dn \n for i in range(n):\n K_Dn[i] = kernel(X_train[i], x0, b) # calculate every item in K_Dn\n \n miu[j] = K_Dn.dot(inv).dot(y_train)[0] # calculate new distribution parameters\n Sigma[j] = sigma2 + kernel(x0, x0, b) - K_Dn.dot(inv).dot(K_Dn.T)\n \n return miu, Sigma", "def gaussian_kernel(shape: Tuple[int, int]=(3, 3), sigma: float=0.5):\n m, n = [int((ss - 1.) / 2.) for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n kernel = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n kernel[kernel < np.finfo(kernel.dtype).eps * kernel.max()] = 0\n sumh = kernel.sum()\n if sumh != 0:\n kernel /= sumh\n return kernel", "def gaussian_kernel(dim, sigma):\n kernel = np.zeros(dim)\n\n if dim%2 == 0:\n begin = dim//2-1\n else:\n begin = dim//2\n\n for i in range(dim):\n kernel[i] = gaussian(i-begin, sigma)\n\n return kernel", "def gauss_kernels(size, sigma=1.0):\n if size < 3:\n size = 3\n\n m = size / 2\n x, y = np.mgrid[-m:m + 1, -m:m + 1]\n kernel = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\n kernel_sum = kernel.sum()\n\n if not sum == 0:\n kernel = kernel / kernel_sum\n\n return kernel", "def get_gauss_kernel(sigma, samples):\n p = ny.ceil (2*ny.sqrt(2*ny.log(2))*sigma)\n r = ny.linspace(-p, p, samples)\n x,y = ny.meshgrid(r, r)\n b=bivariate_normal(x,y,sigma,sigma)\n A=(1/ny.sum(b))\n B=A*b\n return x,y,B", "def gaussian_k(x0, y0, sigma, height, width):\n y = np.arange(0, width, 1, float)\n x = np.arange(0, height, 1, float)[:, np.newaxis]\n return np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))", "def gaussian_kernel_2d(mean, std_inv, size):\n if type(mean) is torch.Tensor:\n device = mean.device\n elif type(std_inv) is torch.Tensor:\n device = std_inv.device\n else:\n device = \"cpu\"\n\n # repeat the size for width, height if single number\n if isinstance(size, numbers.Number):\n width = height = size\n else:\n width, height = size\n\n # expand std to (2, 2) matrix\n if isinstance(std_inv, numbers.Number):\n std_inv = torch.tensor([[std_inv, 0], [0, std_inv]], device=device)\n elif std_inv.dim() == 0:\n std_inv = torch.diag(std_inv.repeat(2))\n elif std_inv.dim() == 1:\n assert len(std_inv) == 2\n std_inv = torch.diag(std_inv)\n\n # Enforce PSD of covariance matrix\n covariance_inv = std_inv.transpose(0, 1) @ std_inv\n covariance_inv = covariance_inv.float()\n\n # make a grid (width, height, 2)\n X = torch.cat(\n [\n t.unsqueeze(-1)\n for t in reversed(\n torch.meshgrid(\n [torch.arange(s, device=device) for s in [width, height]]\n )\n )\n ],\n dim=-1,\n )\n X = X.float()\n\n # center the gaussian in (0, 0) and then shift to mean\n X -= torch.tensor([(width - 1) / 2, (height - 1) / 2], device=device).float()\n X -= mean.float()\n\n # does not use the normalize constant of gaussian distribution\n Y = torch.exp((-1 / 2) * torch.einsum(\"xyi,ij,xyj->xy\", [X, covariance_inv, X]))\n\n # normalize\n # TODO could compute the correct normalization (1/2pi det ...)\n # and send warning if there is a significant diff\n # -> part of the gaussian is outside the kernel\n Z = Y / Y.sum()\n return Z", "def _generate_gaussian_kernel(self, size: int, sigma: float = 1.0, mu: float = 0.0) -> ndarray:\n # create the 1D array of equally spaced distance point of given size\n self.kernel_1d = np.linspace(-(size//2), size//2, size)\n # get the gaussian distribution of the 1D array\n self.kernel_1d = self._gaussian_distribution(\n self.kernel_1d, mu, sigma)\n\n # Compute the outer product of kernel1D tranpose and kernel1D\n self.kernel_2d = np.outer(self.kernel_1d.T, self.kernel_1d)\n # normalize the the outer product to suish the values between 0.0-1.0\n self.kernel_2d *= 1.0/self.kernel_2d.max()\n return self.kernel_2d", "def gaussian_kernel(N, mu, sigma):\n # Asserting N is odd and sigma is number\n assert assert_odd(N)\n \n # Create the normal here (with ID covariance) \n normal = multivariate_normal(mean=mu, cov=sigma*np.identity(2))\n \n # Create the position matries (x_1,x_2 in 2D)\n X_1 = np.ones((N,N))*np.arange(N) # x_1 pos\n X_2 = X_1.T #x_2 pos, just transpose the above\n \n # Shift the positions so center is at middle\n s = np.floor(N/2) #shift value\n X_1, X_2 = X_1-s, X_2-s # shifted matrices\n \n # Create holder matrix\n X = np.zeros((N,N)) # Below we have the iterator \n for (i,j) in [(i,j) for i in range(N) for j in range(N)]:\n X[i,j] = normal.pdf([X_1[i,j], X_2[i,j]]) # Normal values\n \n # Finally just return the normalized kernel\n return X*(1/np.sum(X))", "def gaussianKernel(size, sigma=1):\n\n colourers.info(f'Creating gaussian kernel of size {size} with sigma of {sigma}')\n size = int(size) // 2\n x, y = np.mgrid[-size:size+1, -size:size+1]\n normal = 1 / (2.0 * np.pi * sigma**2)\n g = np.exp(-((x**2 + y**2) / (2.0 * sigma ** 2))) * normal\n return g", "def gkern2(kernlen=21, nsig=3):\n # create nxn zeros\n inp = np.zeros((kernlen, kernlen))\n # set element at the middle to one, a dirac delta\n inp[kernlen//2, kernlen//2] = 1\n # gaussian-smooth the dirac, resulting in a gaussian filter mask\n kernel = scipy.ndimage.filters.gaussian_filter(inp, nsig)\n\n return kernel", "def generate_gaussian_kernel(shape=(3,3),sigma=0.8):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def _gaussian_kernel(self, xdata, l1, sigma_f, sigma_noise=2e-2):\n num_total_points = tf.shape(xdata)[1]\n\n # Expand and take the difference\n xdata1 = tf.expand_dims(xdata, axis=1) # [B, 1, num_total_points, x_size]\n xdata2 = tf.expand_dims(xdata, axis=2) # [B, num_total_points, 1, x_size]\n diff = xdata1 - xdata2 # [B, num_total_points, num_total_points, x_size]\n\n # [B, y_size, num_total_points, num_total_points, x_size]\n if self._kernel == 'PER':\n norm = 2*tf.square(tf.math.sin(3.14*diff[:, None, :, :, :])) / l1[:, :, None, None, :]\n norm = tf.reduce_sum(norm, -1) # [B, data_size, num_total_points, num_total_points]\n # [B, y_size, num_total_points, num_total_points]\n kernel = tf.square(sigma_f)[:, :, None, None] * tf.exp(-norm)\n\n else: # if kernel is normal gaussian\n norm = tf.square(diff[:, None, :, :, :] / l1[:, :, None, None, :])\n norm = tf.reduce_sum(norm, -1) # [B, data_size, num_total_points, num_total_points]\n # [B, y_size, num_total_points, num_total_points]\n kernel = tf.square(sigma_f)[:, :, None, None] * tf.exp(-0.5*norm)\n\n # Add some noise to the diagonal to make the cholesky work.\n kernel += (sigma_noise**2) * tf.eye(num_total_points)\n\n return kernel", "def _gaussian_kernel(self, xdata, l1, sigma_f, sigma_noise=2e-2):\n num_total_points = tf.shape(xdata)[1]\n\n # Expand and take the difference\n # [B, 1, num_total_points, x_size]\n xdata1 = tf.expand_dims(xdata, axis=1)\n # [B, num_total_points, 1, x_size]\n xdata2 = tf.expand_dims(xdata, axis=2)\n # [B, num_total_points, num_total_points, x_size]\n diff = xdata1 - xdata2\n\n # [B, y_size, num_total_points, num_total_points, x_size]\n norm = tf.square(diff[:, None, :, :, :] / l1[:, :, None, None, :])\n\n norm = tf.reduce_sum(\n norm, -1) # [B, data_size, num_total_points, num_total_points]\n\n # [B, y_size, num_total_points, num_total_points]\n kernel = tf.square(sigma_f)[:, :, None, None] * tf.exp(-0.5 * norm)\n\n # Add some noise to the diagonal to make the cholesky work.\n kernel += (sigma_noise**2) * tf.eye(num_total_points)\n\n return kernel", "def gaussian_kernel(x1, x2, gamma=1):\n return torch.exp(-gamma * torch.norm(x1-x2)**2)", "def NN_kernel(x1, x2, kernel_params):\n sigma_b = kernel_params['sigma_b']\n sigma_w = kernel_params['sigma_w']\n\n x1 = np.array([x1])\n x2 = np.array([x2])\n x2 = x2.T\n\n K12 = sigma_b**2 + (sigma_w**2)*x1*x2\n K11 = sigma_b**2 + (sigma_w**2)*x1*x1\n K22 = sigma_b**2 + (sigma_w**2)*x2*x2\n theta = np.arccos(K12/np.sqrt(np.multiply(K11, K22)))\n\n return sigma_b**2 + (sigma_w**2/(2*np.pi))*np.sqrt(np.multiply(K11, K22))*(np.sin(theta) + np.multiply((np.pi - theta), np.cos(theta)))", "def gaussian_kernel(kernel_size: (int, tuple, list), width: float):\n\n kernel_size = np.asarray(to_list(kernel_size, 2), np.float)\n half_ksize = (kernel_size - 1) / 2.0\n x, y = np.mgrid[-half_ksize[0]:half_ksize[0] + 1,\n -half_ksize[1]:half_ksize[1] + 1]\n kernel = np.exp(-(x ** 2 + y ** 2) / (2 * width ** 2))\n return kernel / (kernel.sum() + 1e-8)", "def __guassian_kernel(x, sigma=200):\n return (1 / (sqrt(2.*pi) * sigma)) * exp(-x ** 2 / (2.*sigma**2))", "def GaussianKernel(radius, std):\n size = 2 * radius + 1\n weight = torch.ones(size, size)\n weight.requires_grad = False\n for i in range(-radius, radius+1):\n for j in range(-radius, radius+1):\n dis = (i * i) + (j * j)\n weight[i+radius][j+radius] = np.exp(-dis / (2 * std * std))\n weight = weight / weight.sum()\n return weight", "def _gaussian_kernel(kernel_size):\n curr_kernel = _binoms(kernel_size)\n curr_kernel = curr_kernel.reshape(kernel_size, 1)\n kernel2d = convolve2d(curr_kernel.transpose(), curr_kernel)\n kernel2d = np.divide(kernel2d, np.sum(kernel2d))\n return kernel2d", "def compute_kernel_matrix(x,y,sigma):\n m = len(x)\n\n s = np.zeros((m,m))\n for i in range(len(x)):\n for j in range(i+1):\n s[i,j] = np.exp(-((x[i]-y[j])**2)/(2*sigma**2))\n for i in range(2,m):\n for j in range(0,i):\n s[i,j] = s[j,i]\n return s", "def gaussian_kernel(kernel_size: (int, tuple, list), width: float):\n kernel_size = np.asarray(to_list(kernel_size, 2), np.float)\n half_ksize = (kernel_size - 1) / 2.0\n x, y = np.mgrid[-half_ksize[0]:half_ksize[0] + 1, -half_ksize[1]:half_ksize[1] + 1]\n kernel = np.exp(-(x ** 2 + y ** 2) / (2 * width ** 2))\n return kernel / (kernel.sum() + 1e-08)", "def gauss_kernel(n_fwhm,sigma):\n\n x_length = int(n_fwhm * sigma + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n \n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n g = numpy.exp(-(x**2/(2*(float(sigma)**2))+y**2/(2*(float(sigma)**2))))\n return g / g.sum()", "def GaussianKernel(shape=(3, 3), sigma=0.5):\r\n radius_x, radius_y = [(radius-1.)/2. for radius in shape]\r\n y_range, x_range = np.ogrid[-radius_y:radius_y+1, -radius_x:radius_x+1]\r\n h = np.exp(- (x_range*x_range + y_range*y_range) / (2.*sigma*sigma))\r\n h[h < np.finfo(h.dtype).eps*h.max()] = 0\r\n sumofh = h.sum()\r\n if sumofh != 0:\r\n h /= sumofh\r\n return h", "def gaussian_kernel(size, sigma):\n\n m, n = [(s - 1.) / 2. for s in size]\n y, x = np.ogrid[-m:m+1, -n:n+1]\n h = np.exp(-(x*x + y*y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps*h.max()] = 0\n sumh = h.sum()\n if sumh != 0: h /= sumh\n return h", "def gaussian2d(l, sigma=1.0):\n\n ax = np.arange(-l//2 + 1.0, l//2 + 1.0)\n xx, yy = np.meshgrid(ax, ax)\n\n kernel = (1.0 / math.sqrt(2.0 * math.pi * sigma**2)) * np.exp(-(xx**2 + yy**2)/(2.0*sigma**2))\n\n return np.asarray(kernel, dtype=np.float32)" ]
[ "0.7332826", "0.727613", "0.72481734", "0.7187981", "0.7060818", "0.7011949", "0.6993787", "0.69755137", "0.67996466", "0.67077035", "0.6697541", "0.6688304", "0.66812515", "0.6680533", "0.66462684", "0.66387516", "0.6636602", "0.65469486", "0.6504632", "0.647639", "0.64753", "0.64450026", "0.6424776", "0.6418675", "0.6416442", "0.64102334", "0.64066005", "0.6381584", "0.63811755", "0.63408124" ]
0.73550737
0
DoG (Difference of Gaussian) Filter is generated by convolving Sobel Kernel with a Gaussian Kernel under a given size, orientation and scale. ie obtain first derivative of Gaussian Kernel. DoG Filter Bank is a set of DoG filters generated by obtaining first derivative of Gaussian kernels under various orientations and scales for a given size
def OrientedDoG(size=7,scales=[1,2],n_orientations=8): filt_count = 0 # declare the sobel kernel sobel_kernel = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=np.float32) # pre define a dummy filter bank n_filters = len(scales)*n_orientations filterBank = np.zeros((size,size,n_filters), dtype=np.float32) # for a given scale for s in scales: # generate gaussian kernel gaussian_kernel = Gaussiankernel(size, sigma=s) # and obtain first derivative of the gaussian kernel by convolving DoG = cv2.filter2D(gaussian_kernel, -1, sobel_kernel) # split orientations degreeRotation = 360.0/n_orientations for o in range(n_orientations): angle = o*degreeRotation filterBank[:,:,filt_count] = imutils.rotate(DoG,angle) filt_count+=1 if filt_count == n_filters: return filterBank else: print('Error in generating Bank') return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_DOGs(inner_sigma, x, shape):\n DOG = make_DOG(inner_sigma, x)\n result = np.zeros((shape[0]*shape[1], x.size**2))\n for i in range(shape[0]): \n for j in range(shape[1]): \n k = shift_kernel(DOG, shape, (i,j))\n result[i+shape[0]*j,:] = k.flatten()\n \n return result", "def gaussian2d(filter_size=5, sig=1.0):\n ax = np.arange(-filter_size // 2 + 1., filter_size // 2 + 1.)\n xx, yy = np.meshgrid(ax, ax)\n kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))\n return kernel / np.sum(kernel)", "def apply_gaussian_resolution(self,params,data,fwhm=1,dE=0.01,E_max=100):\n print('\\n################### CONVOLUTION #####################\\n')\n print(f'\\n\\tConvolution with Gaussian function, FWHM = {fwhm} meV\\n')\n\n data.fwhm = fwhm\n c = fwhm/2.35482\n\n data.dE = dE\n data.E_max = E_max\n data.spectra_E = np.arange(0,data.E_max+data.dE,data.dE)\n data.spectra_num_E = len(data.spectra_E)\n data.spectra = np.zeros((data.spectra_num_E,params.num_Qpoints))\n data.smooth_spectra = np.zeros((data.spectra_num_E,params.num_Qpoints))\n structure_factors = []\n energies = []\n\n ### sum intensity of degenerate bands\n if params.sum_degenerate_bands == True:\n print('\\n\\tSumming degenerate bands before convolution (using convolution dE as tolerance)\\n')\n for q in range(params.num_Qpoints):\n sfac = data.structure_factors[:,q]\n energy = data.frequencies[f'{q}']\n reduced_energies = []\n summed_sfac = []\n while True:\n if len(energy) == 0:\n break\n test_energy = energy[0]\n reduced_energies.append(test_energy)\n indicies = np.intersect1d(np.argwhere(energy <= (test_energy+data.dE)),\n np.argwhere(energy > (test_energy-data.dE)))\n summed_sfac.append(sfac[indicies].sum())\n sfac = np.delete(sfac,indicies)\n energy = np.delete(energy,indicies)\n energies.append(reduced_energies)\n structure_factors.append(summed_sfac)\n else:\n print('\\n\\tWARNING: You should definitely sum degenerate bands!!!\\n')\n for q in range(params.num_Qpoints):\n energies.append(data.frequencies[f'{q}'])\n structure_factors.append(data.structure_factors[:,q])\n\n ### populate array for heatmap\n ### try statement takes care of negative energies\n for q in range(params.num_Qpoints):\n for b in range(len(structure_factors[q][:])):\n try: # if there are negative modes, argwhere returns an empty vector and the slice crashes\n data.spectra[np.argwhere(data.spectra_E <= \n energies[q][b]).max(),q] = structure_factors[q][b]\n except:\n continue\n\n if params.bose_factor == True:\n print('\\n\\tWARNING: Bose factor isnt verified. Need to compare to SNAXS.\\n')\n if params.temperature < 5:\n temperature = 5\n else:\n temperature = params.temperature\n inds = np.argwhere(data.spectra_E <= 0.5)\n tmp_e = np.copy(data.spectra_E)\n tmp_e[inds] = 0.5\n bose = 1+1/(np.exp(tmp_e/(constants.kb*1000*temperature))-1)\n bose = np.tile(bose.reshape((data.spectra_num_E,1)),reps=(1,params.num_Qpoints))\n data.spectra = np.multiply(data.spectra,bose)\n data.spectra = data.spectra/np.max(data.spectra)\n\n ### gaussian convolution using for loops, slow but very little memory utilization\n g_energy = np.append(data.spectra_E-data.spectra_E.max(),data.spectra_E[1:])\n gaussian = np.exp(-0.5*g_energy**2/c**2)/c/np.sqrt(2*np.pi)\n gaussian = np.tile(gaussian.reshape((gaussian.shape[0],1)),(1,data.num_Qpoints))\n tmp = np.append(data.spectra,data.spectra,axis=0)[1:,:]\n for e in range(data.spectra_num_E):\n if e%50 == 0:\n print(f'\\t------ {e}/{data.spectra_num_E} -------')\n data.smooth_spectra[e,:] = np.trapz(tmp*np.roll(gaussian,shift=e,axis=0),g_energy,axis=0)\n print('\\n\\tDone convolving!\\n')\n data.smooth_spectra = data.smooth_spectra/np.max(data.smooth_spectra)\n\n# if params.random_background == True:\n# data.smooth_spectra = data.smooth_spectra+(np.random.normal(0,1,\n# (data.smooth_spectra.shape[0],data.smooth_spectra.shape[1])))*0.001\n \n plt.imshow(data.smooth_spectra,origin='lower',aspect='auto',cmap='hot')\n plt.show()", "def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H", "def gaussian_filter(size,sigma=-1):\n\n if sigma == -1:\n sigma = np.sqrt(size)\n\n filter = np.zeros((size,size))\n\n for i,j in it.product(range(size),range(size)):\n x = j-size//2\n y = i-size//2\n filter[i,j] = 1/(2*np.pi*sigma**2) * np.exp(-(x**2+y**2)/(2*sigma**2))\n\n filter = filter/filter[0,0]\n filter = filter/filter.sum()\n\n return filter", "def make_DOG(inner_sigma, x):\n y = x\n outer_sigma = inner_sigma*5\n X, Y = np.meshgrid(x, y)\n inner_gaussian = 1./(2.*np.pi*inner_sigma) * np.exp(-(X**2 + Y**2)/2./inner_sigma**2) \n outer_gaussian = 1./(2.*np.pi*outer_sigma) * np.exp(-(X**2 + Y**2)/2./outer_sigma**2) \n return inner_gaussian - outer_gaussian/2 #weaker surround works better with our weights, which don't account for bursts ", "def gaussianBlur(img,ksize=(5,5),sigma=10):\n #kernel = cv2.getGaussianKernel(ksize,sigma)\n dst = np.zeros_like(img)\n cv2.GaussianBlur(src=img,dst=dst,ksize=ksize,sigmaX=0)\n return dst", "def gaussian_kernel(size, sigma):\n\n kernel = np.zeros((size, size))\n\n ### YOUR CODE HERE\n k = (size-1)/2\n factor = 1/(2*np.pi*sigma**2)\n for i in range(size):\n for j in range(size):\n exponent = -((i-k)**2 +(j-k)**2)/(2*sigma**2)\n kernel[i,j] = factor*np.exp(exponent)\n ### END YOUR CODE\n\n return kernel", "def gaussian_kernel(size, sigma): \n \n kernel = np.zeros((size, size))\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n k = (size - 1) / 2\n sigma_sq = sigma ** 2\n pi_sigma = 1/(2 * np.pi * sigma_sq)\n for i in range(size):\n for j in range(size):\n kernel[i, j] = pi_sigma * np.exp(-0.5 * ((i-k)**2 + (j-k)**2) / (sigma_sq))\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return kernel", "def gauss_derivative_kernels(size, size_y=None):\n size = int(size)\n if not size_y:\n size_y = size\n else:\n size_y = int(size_y)\n y, x = mgrid[-size: size + 1, -size_y: size_y + 1]\n\n # x and y derivatives of a 2D gaussian with standard dev half of size\n # (ignore scale factor)\n gx = - x * exp(-(x ** 2 / float((0.5 * size) ** 2) + y ** 2 / float((0.5 * size_y) ** 2)))\n gy = - y * exp(-(x ** 2 / float((0.5 * size) ** 2) + y ** 2 / float((0.5 * size_y) ** 2)))\n\n return gx, gy", "def gaborFilter(img, ksize=31):\n filters = []\n #ksize = 31\n for theta in np.arange(0, np.pi, np.pi / 16):\n kern = cv2.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv2.CV_32F)\n kern /= 1.5*kern.sum()\n filters.append(kern)\n accum = np.zeros_like(img)\n for ker in filters:\n fimg = cv2.filter2D(img, cv2.CV_8UC3, ker)\n np.maximum(accum, fimg, accum)\n return accum", "def create_gaussian_filter(size, sigma):\n h = size[0] #height of the template\n w = size[1] #width of the template \n if h % 2 == 0: h += 1 #add 1 if dimensions are even\n if w % 2 == 0: w += 1\n x = math.floor(h/2)\n y = math.floor(w/2) \n sum = 0\n #create our template\n template = np.zeros((h,w))\n #fill the template in with the numbers from Gaussian distribution\n for i in range(h):\n for j in range(w):\n template[i,j] = math.exp(-((((j-x)**2)+((i-y)**2))/(2*(sigma**2))))\n sum = sum + template[i,j]\n #normalise the numbers\n gaussian_filter = template/sum\n return gaussian_filter", "def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val", "def _gaussian_kernel(kernel_size):\n curr_kernel = _binoms(kernel_size)\n curr_kernel = curr_kernel.reshape(kernel_size, 1)\n kernel2d = convolve2d(curr_kernel.transpose(), curr_kernel)\n kernel2d = np.divide(kernel2d, np.sum(kernel2d))\n return kernel2d", "def gaus_kernel_calc(kernel_size):\n base_gaus_binom = np.array([[1], [1]])\n kernel = base_gaus_binom\n\n if kernel_size == 1:\n # If the kernel size is 1 we need a 2d array that keeps the image the same.\n kernel = np.array([[1]])\n kernel = scipy.signal.convolve2d(kernel, kernel.transpose())\n return kernel\n\n for i in range(kernel_size - 2):\n kernel = scipy.signal.convolve2d(kernel, base_gaus_binom)\n\n kernel = scipy.signal.convolve2d(kernel, kernel.transpose())\n return kernel/kernel.sum()", "def __gaussian_blur(self, img, kernel_size=3):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blurring(self,input_image,kernel_size,sigma):\n #Applying Gaussian Blur filter\n output_image=cv2.GaussianBlur(input_image,kernel_size,sigma)\n return output_image", "def DoG(imp0, kernel1, kernel2):\n imp1 = imp0.duplicate()\n imp2 = imp0.duplicate()\n IJ.run(imp1, \"Gaussian Blur...\", \"sigma=\" + str(kernel1) + \" stack\")\n IJ.run(imp2, \"Gaussian Blur...\", \"sigma=\"+ str(kernel2) + \" stack\")\n ic = ImageCalculator()\n imp3 = ic.run(\"Subtract create stack\", imp1, imp2)\n return imp3", "def dog_filter(stack, sigma_big, sigma_small):\n stack_cp = stack.astype(np.int16)\n return ndi.filters.gaussian_filter(stack_cp, sigma=sigma_big) - ndi.filters.gaussian_filter(stack_cp, sigma=sigma_small)", "def gaussian_blur(device, img, ksize, sigmax=0, sigmay=None, debug=None):\n\n img_gblur = cv2.GaussianBlur(img, ksize, sigmax, sigmay)\n\n device += 1\n if debug == 'print':\n print_image(img_gblur, (str(device) + '_gaussian_blur.png'))\n elif debug == 'plot':\n if len(img_gblur) == 3:\n plot_image(img_gblur)\n else:\n plot_image(img_gblur, cmap='gray')\n\n return device, img_gblur", "def createDefaultFilterbank(window):\n # Gaussians:: G1 = N(0, 1), G2 = N(0, 2), G3 = N(0, 4)\n # Laplacian of Gaussians:: LoG1 = Lap(N(0, 1)), LoG2=Lap(N(0, 2)), LoG3=Lap(N(0, 4)), LoG4=Lap(N(0, 8))\n # Derivative of Gaussian (x):: Div1xG1 = d/dx N(0,2), Div1xG2=d/dx N(0,4)\n # Derivative of Gaussian (y): Div1yG1 = d/dy N(0,2), Div1yG2=d/dy N(0,4)\n \n G1 = gaussian_kernel(window, window, 1)\n G2 = gaussian_kernel(window, window, 2)\n G3 = gaussian_kernel(window, window, 4)\n \n # see http://homepages.inf.ed.ac.uk/rbf/HIPR2/log.htm\n LoG1 = laplacianOfGaussian_kernel(window, window, 1)\n LoG2 = laplacianOfGaussian_kernel(window, window, 2)\n LoG3 = laplacianOfGaussian_kernel(window, window, 4)\n LoG4 = laplacianOfGaussian_kernel(window, window, 8)\n \n dx_G1 = gaussian_1xDerivative_kernel(window, window, 2)\n dx_G2 = gaussian_1xDerivative_kernel(window, window, 4)\n \n dy_G1 = gaussian_1yDerivative_kernel(window, window, 2)\n dy_G2 = gaussian_1yDerivative_kernel(window, window, 4)\n \n return np.array([G1, G2, G3, LoG1, LoG2, LoG3, LoG4, dx_G1, dx_G2, dy_G1, dy_G2])", "def gaussian_filter(stddev, array):\n\n return astropy.convolution.convolve(\n array, astropy.convolution.Gaussian2DKernel(stddev))", "def gaussian_1yDerivative_kernel(windowX, windowY, sigma):\n # See [http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MARBLE/low/edges/canny.htm]\n X, Y = createKernalWindowRanges(windowX, windowY, increment)\n \n g_dy_kernel = gaussianFirstDerivative(Y, 0, sigma) * gaussianNormalised(X, 0, sigma)\n gSum = np.sum(np.abs(g_dy_kernel))\n \n if gSum == 0:\n print \"Warning dy_g_kernel:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (g_dy_kernel)\n else:\n return (g_dy_kernel / gSum)", "def isotropic_Gaussian(ksize=15, l=6):\n\n V = np.array([[1, 0], [0, -1]])\n D = np.array([[l, 0], [0, l]])\n Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))\n k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)\n\n return k", "def gs_blur(self,k,img):\n SIG = self.sigma\n sig = [SIG,k*SIG,k*k*SIG,k*k*k*SIG,k*k*k*k*SIG]\n gsArray = [0,1,2,3,4]\n scaleImages = [0,1,2,3,4]\n \n for i in range(5):\n gsArray[i] = scipy.ndimage.filters.gaussian_filter(img,sig[i])\n\n return gsArray", "def gabor2d(sigma, wfreq, worient, wphase, size, normalize=True):\n gabor = ig.Gabor(frequency=wfreq * size, xdensity=size, ydensity=size, size=2 * sigma / size,\n orientation=worient, phase=wphase)()\n assert gabor.ndim == 2\n if normalize:\n gabor -= gabor.mean()\n # norm(x) when x is 1d or 2d are the same.\n gabor /= norm(gabor)\n\n return gabor", "def convolve2d(img, kernel):\n #Flip the kernel\n kernel = utils.flip2d(kernel) \n #print(len(kernel))\n \n c = copy.deepcopy(img)\n \n #print(len(c))\n #Padd the image\n pad = int((len(kernel)-1)/2)\n\n\n padded_img = utils.zero_pad(img,pad,pad)\n #print(len(padded_img), len(padded_img[0]))\n #print(len(kernel))\n #print(len(img)**2)\n og_img=[]\n#c = copy.deepcopy(img)\n j=0\n offset = 0\n for m in range(len(img) * len(img[0])): # size of kernel x kernel\n x = []\n \n for i in range(len(kernel)): #3 is kernel size\n #print(i,j)\n x.append(padded_img[i+offset][j:j+len(kernel)])\n #print((x))\n sum = 0\n for k in range(len(kernel)):\n for l in range(len(kernel[0])):\n sum+= x[k][l] * kernel[k][l]\n #print(i,j)\n #print(sum)\n og_img.append(sum) \n j+=1\n if (j == len(img[0])):\n j = 0\n offset+= 1\n \n #print(len(img), len(img[0]))\n final_img = []\n for i in range(0,(len(img)*len(img[0])),len(img[0])):\n final_img.append(og_img[i:i+len(img[0])])\n #print(len(final_img)), len(final_img[0])\n return final_img\n\n # TODO: implement this function.", "def create_filter_bank():\r\n kernels = []\r\n for theta in range(0, 2):\r\n theta = theta / 2. * np.pi\r\n for sigma in (3, 5):\r\n for frequency in (0.10, 0.25):\r\n kernel = np.real(gabor_kernel(frequency, theta=theta,\r\n sigma_x=sigma, sigma_y=sigma))\r\n kernels.append(kernel)\r\n print(len(kernels))\r\n return kernels", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)" ]
[ "0.6555714", "0.6427121", "0.6140465", "0.6132629", "0.6105337", "0.60346574", "0.59940916", "0.59735537", "0.595391", "0.595101", "0.5937852", "0.5931722", "0.5903503", "0.58702475", "0.5850902", "0.58302706", "0.57934594", "0.57778287", "0.57675123", "0.5729597", "0.57154083", "0.5697014", "0.56867063", "0.56682044", "0.56670266", "0.5663377", "0.56478477", "0.5634798", "0.5633997", "0.5633997" ]
0.6729979
0
Creates a partial role from the given `role_id`. If the role already exists returns that instead.
def create_partial_role_from_id(role_id, guild_id = 0): try: return ROLES[role_id] except KeyError: pass role = Role._create_empty(role_id, guild_id) ROLES[role_id] = role return role
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_role(self, role_id, role):\n raise exception.NotImplemented() # pragma: no cover", "def get_role(self, role_id):\n raise exception.NotImplemented() # pragma: no cover", "def get_role(self, role_id: int, /) -> Optional[Role]:\n return self.guild.get_role(role_id) if self._roles.has(role_id) else None", "def get_role_by_id(self, role_id):\n try:\n role = self.db_handler.get_role_by_id(role_id)\n\n self.logger.write_to_log('got role by id', 'model')\n return role\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def get_role(role_id: int) -> Optional[Role]:\n return db.session.query(Role).get(role_id)", "def create_role_response(request, role_id):\n role_resource = {\n \"id\": role_id,\n \"name\": request.json.get(\"name\"),\n \"owners\": request.json.get(\"owners\"),\n \"administrators\": request.json.get(\"administrators\"),\n \"members\": [],\n \"tasks\": [],\n \"proposals\": [],\n \"description\": request.json.get(\"description\"),\n }\n\n if request.json.get(\"metadata\"):\n role_resource[\"metadata\"] = request.json.get(\"metadata\")\n\n return json({\"data\": role_resource})", "def role_write(self, fail_on_found=False, disassociate=False, **kwargs):\n\n # Get the role, using only the resource data\n data, self.endpoint = self.data_endpoint(kwargs, ignore=['obj'])\n debug.log('Checking if role exists.', header='details')\n response = self.read(pk=None, fail_on_no_results=True,\n fail_on_multiple_results=True, **data)\n role_data = response['results'][0]\n role_id = role_data['id']\n\n # Role exists, change display settings to output something\n self.configure_display(role_data, kwargs, write=True)\n\n # Check if user/team has this role\n # Implictly, force_on_exists is false for roles\n obj, obj_type, res, res_type = self.obj_res(kwargs)\n debug.log('Checking if %s already has role.' % obj_type,\n header='details')\n data, self.endpoint = self.data_endpoint(kwargs)\n response = self.read(pk=None, fail_on_no_results=False,\n fail_on_multiple_results=False, **data)\n\n msg = ''\n if response['count'] > 0 and not disassociate:\n msg = 'This %s is already a member of the role.' % obj_type\n elif response['count'] == 0 and disassociate:\n msg = 'This %s is already a non-member of the role.' % obj_type\n\n if msg:\n role_data['changed'] = False\n if fail_on_found:\n raise exc.NotFound(msg)\n else:\n debug.log(msg, header='DECISION')\n return role_data\n\n # Add or remove the user/team to the role\n debug.log('Attempting to %s the %s in this role.' % (\n 'remove' if disassociate else 'add', obj_type), header='details')\n post_data = {'id': role_id}\n if disassociate:\n post_data['disassociate'] = True\n client.post('%s/%s/roles/' % (self.pluralize(obj_type), obj),\n data=post_data)\n role_data['changed'] = True\n return role_data", "def set_keystone_v3_role(self, role_id, role_new_name):\n LOG_OBJ.debug(\"Creating the role.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/roles/\" + str(role_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _role_info = {\"role\": {\n \"name\": role_new_name}}\n _body = json.dumps(_role_info)\n response = self.request(\"PATCH\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while set the role\")\n print (\"No response from Server while set the role\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Set role Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\" Set role Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n return True", "async def role_from_id(self, guild: discord.Guild, role_id: int):\n\n return discord.utils.get(guild.roles, id=role_id)", "def start_router_realm_role(self, id, role_id, config, details=None):\n self.log.debug(\"{}.add_router_realm_role\".format(self.__class__.__name__),\n id=id, role_id=role_id, config=config)\n\n if id not in self.realms:\n raise ApplicationError(u\"crossbar.error.no_such_object\", \"No realm with ID '{}'\".format(id))\n\n if role_id in self.realms[id].roles:\n raise ApplicationError(u\"crossbar.error.already_exists\", \"A role with ID '{}' already exists in realm with ID '{}'\".format(role_id, id))\n\n self.realms[id].roles[role_id] = RouterRealmRole(role_id, config)\n\n realm = self.realms[id].config['name']\n self._router_factory.add_role(realm, config)", "def update_role(self, role_id, role):\n raise exception.NotImplemented() # pragma: no cover", "async def fetch(cls, id: Union[str, int]) -> Optional[\"Role\"]:\n query = \"\"\"SELECT * FROM roles WHERE id = $1;\"\"\"\n role = await cls.pool.fetchrow(query, int(id))\n\n if role is not None:\n role = cls(**role)\n\n return role", "def add_role(self, role_id: str, current_user_id=None):\n if RoleModel.is_valid_role(role_id) and not self.has_role(role_id):\n user_role = UserRoleModel(user_id=self.id, role_id=role_id, lastchange_by=current_user_id)\n self.roles.append(user_role)", "def create_role(self, **kwargs):\n role = self.role_model(**kwargs)\n # noinspection PyUnresolvedReferences\n return self.save(role)", "def put(self, id):\n data = request.json\n role = Role.query.filter(Role.id == id).one()\n if 'description' in data:\n role.description = data.get('description')\n if 'name' in data:\n role.name = data.get('name')\n db.session.add(role)\n db.session.commit()\n return None, 204", "async def add_role_member(request, role_id):\n required_fields = [\"id\"]\n utils.validate_fields(required_fields, request.json)\n txn_key, txn_user_id = await utils.get_transactor_key(request)\n proposal_id = str(uuid4())\n batch_list = Role().member.propose.batch_list(\n signer_keypair=txn_key,\n signer_user_id=txn_user_id,\n proposal_id=proposal_id,\n role_id=role_id,\n pack_id=request.json.get(\"pack_id\"),\n next_id=request.json.get(\"id\"),\n reason=request.json.get(\"reason\"),\n metadata=request.json.get(\"metadata\"),\n )\n batch_status = await utils.send(\n request.app.config.VAL_CONN,\n batch_list,\n request.app.config.TIMEOUT,\n request.json.get(\"tracker\") and True,\n )\n if request.json.get(\"tracker\"):\n return utils.create_tracker_response(\"batch_status\", batch_status)\n return json({\"proposal_id\": proposal_id})", "def add_role(role):\n roleOfUser=Role.objects.create(type=role)\n return roleOfUser", "def role_assign(user_id, role_id):\n user = _get_user_or_404(user_id)\n role = _get_role_or_404(role_id)\n initiator_id = g.user.id\n\n authorization_service.assign_role_to_user(\n role.id, user.id, initiator_id=initiator_id\n )\n\n flash_success(\n gettext(\n '%(role_title)s has been assigned to \"%(screen_name)s\".',\n screen_name=user.screen_name,\n role_title=role.title,\n )\n )", "def create(self, role):\n model = models.load('Role', role)\n model.account_id = self.account_id\n\n return self.client.create_role(model)", "def add_employeeRole(self, id, role):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employeeRoles values(%s,%s)',\n (id, role))\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save EmployeeRole!\\n(%s)' % (error))", "def get(self, role_id):\n # Right now the only way is to list them all, then iterate.\n # Perhaps a filter or new endpoint would be useful here.\n roles = self.list()\n for role in roles:\n if role.id == role_id:\n return role\n raise exc.HTTPNotFound()", "def add_role(self, role):\n if role.name not in [r.name for r in self.roles]:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$push': {'roles': role.to_python()}})", "def get(self, role_id):\n return self.client.get_role(role_id)", "def create_role(self, **kwargs):\n\n role = self.role_model(**kwargs)\n return self.put(role)", "async def add_role_admin(request, role_id):\n required_fields = [\"id\"]\n utils.validate_fields(required_fields, request.json)\n\n txn_key, txn_user_id = await utils.get_transactor_key(request)\n proposal_id = str(uuid4())\n batch_list = Role().admin.propose.batch_list(\n signer_keypair=txn_key,\n signer_user_id=txn_user_id,\n proposal_id=proposal_id,\n role_id=role_id,\n next_id=request.json.get(\"id\"),\n reason=request.json.get(\"reason\"),\n metadata=request.json.get(\"metadata\"),\n )\n await utils.send(\n request.app.config.VAL_CONN, batch_list, request.app.config.TIMEOUT\n )\n return json({\"proposal_id\": proposal_id})", "def update_role(self, role_id, name: str) -> Role | None:\n role = self.get_session.get(self.role_model, role_id)\n if not role:\n return None\n try:\n role.name = name\n self.get_session.merge(role)\n self.get_session.commit()\n log.info(const.LOGMSG_INF_SEC_UPD_ROLE.format(role))\n except Exception as e:\n log.error(const.LOGMSG_ERR_SEC_UPD_ROLE.format(e))\n self.get_session.rollback()\n return None\n return role", "def edit_role(role_id, new_name=None, new_arn=None):\n\tsession = get_session()\n\told_data = get_role(role_id)\n\tdata = {}\n\tdata[\"name\"] = new_name or old_data[\"name\"]\n\tdata[\"arn\"] = new_arn or old_data[\"arn\"]\n\tresponse = session.put(\"{url}/api/roles/{role_id}\".format(url=get_registry_url(), role_id=role_id), json=data)\n\treturn response.json()", "def addRole(self, role_id, title='', description=''):\n if self._roles.get(role_id) is not None:\n raise KeyError('Duplicate role: %s' % role_id)\n\n self._roles[role_id] = {'id': role_id, 'title': title,\n 'description': description}", "def role_build(collection=None, **id):\n return RoleFactory.build(\n **id,\n scopes=ScopeFactory.create_batch(randint(0, 3), type=choice(('odp', 'client'))),\n collection=collection or (collection := CollectionFactory() if randint(0, 1) else None),\n collection_id=collection.id if collection else None,\n )", "def get_role(role_id):\n\tsession = get_session()\n\tresponse = session.get(\"{url}/api/roles/{role_id}\".format(url=get_registry_url(), role_id=role_id))\n\treturn response.json()" ]
[ "0.68688715", "0.6774872", "0.6424449", "0.61903214", "0.61115676", "0.60018206", "0.59815186", "0.5958131", "0.59569633", "0.5928142", "0.5925251", "0.5838952", "0.57899237", "0.5773943", "0.57660884", "0.5758392", "0.5740605", "0.5716096", "0.5712389", "0.5708068", "0.5691913", "0.5633791", "0.5631315", "0.5627075", "0.5611877", "0.56084526", "0.5597542", "0.5552838", "0.5549598", "0.55432224" ]
0.8589602
0
If the text is a role mention, returns the respective role if found.
def parse_role_mention(text): parsed = ROLE_MENTION_RP.fullmatch(text) if parsed is None: return role_id = int(parsed.group(1)) return ROLES.get(role_id, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_role(text, guild = None):\n parsed = ID_RP.fullmatch(text)\n if (parsed is not None):\n role_id = int(parsed.group(1))\n try:\n role = ROLES[role_id]\n except KeyError:\n pass\n else:\n return role\n \n role = parse_role_mention(text)\n if (role is not None):\n return role\n \n if (guild is not None):\n if (guild is not None):\n role = guild.get_role_like(text)\n if (role is not None):\n return role\n \n return None", "def token_role(self, role):\n return self.read('auth/token/roles/{0}'.format(role))", "async def pingrole(self, ctx, role: discord.Role, *, text):\n if role.mentionable:\n await ctx.send(inline('Error: role is already mentionable'))\n return\n\n try:\n await role.edit(mentionable=True)\n except Exception as ex:\n await ctx.send(inline('Error: failed to set role mentionable'))\n if ex.text == \"Missing Permissions\":\n message = await ctx.send(inline('Make sure this bot\\'s role is higher than the one you\\'re mentioning'))\n await asyncio.sleep(3)\n await message.delete()\n return\n\n await ctx.message.delete()\n await asyncio.sleep(1)\n await ctx.send('From {}:\\n{}\\n{}'.format(ctx.author.mention, role.mention, text))\n\n try:\n await role.edit(mentionable=False)\n except Exception as ex:\n await ctx.send(inline('Error: failed to set role unmentionable'))\n return", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> str:\n\n assert self.data is not None\n return self.data[\"role\"][\"name\"]", "def get_role(ssm):\n nodes = ssm[\"nodes\"]\n for node in nodes:\n if node[\"type\"] == \"role\":\n return node[\"name\"]\n return \"no role\"", "def find_role(self, roleName):\n try:\n return self.role2index[roleName]\n except:\n raise KeyError(\n f\"The role {roleName} is not in the general list... check your input file!\")", "def get_role(guild: discord.Guild, role_id: int = None, role_name: str = None) -> Optional[discord.Role]:\n if guild is None:\n raise ValueError(\"guild is None\")\n if role_id is None and role_name is None:\n raise ValueError(\"Either role_id or role_name must be specified\")\n for role in guild.roles:\n if role.id == role_id or (role_name is not None and role.name.lower() == role_name.lower()):\n return role\n return None", "def extract_role(name):\n name = sanitize_name(name)\n if ' - prof' in name:\n return \"prof\"\n for l in [1,2,3,4,5,6,7,8,9]:\n for g in [\"A\", \"B\"]:\n role = f'IE-{l}{g}'\n if name.endswith(role):\n return role\n for l in [1,2,3,4]:\n for g in [2*l-1, 2*l]:\n role = f'MA{l}-{g}'\n if name.endswith(role):\n return role\n return \"TODO\"", "def get_role(self, role_name):\n role_record = self.list_roles(('name', role_name))\n if len(role_record) < 1:\n raise Exception('Role \\'%s\\' does not exist.' % role_name)\n return role_record[0]", "def __find_role (label):\n from data import role as mod\n roles = mod.load ( )\n \n for role in roles.get_all ( ):\n if label == role.label:\n return role\n else:\n raise Exception ('Aplikacija ne pozna vloge: ' + label)", "def get_roles(role):", "def find_role(self, name):\n return self.get_session.query(self.role_model).filter_by(name=name).one_or_none()", "def _process_family_role(head_text: str, full_text: str, person_type: str, alet_dict: dict) -> tuple:\n if 'agents' not in alet_dict: # Nothing to check\n return tuple()\n role_matches = []\n if head_text in family_members.keys(): # Looking for singular family role\n for alet in alet_dict['agents']:\n alet_names, alet_type, alet_iri = alet\n if f'_{head_text}' in alet_iri:\n role_matches.append((full_text, person_type, [':Person'], alet_iri))\n if len(role_matches) == 1:\n return role_matches[0]\n # TODO: Handle family role plurals\n # No match or multiple matches found\n return tuple()", "def role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role\")", "def get_role(self, role_name):\n try:\n response = self._client.get_role(RoleName=role_name)\n except Exception as e:\n return False\n\n return response", "def role_arn_lookup(session, role_name):\n if session is None:\n return None\n\n client = session.client('iam')\n response = client.get_role(RoleName=role_name)\n if response is None:\n return None\n else:\n return response['Role']['Arn']", "def find_role(self, *args, **kwargs):\n raise NotImplementedError", "def role(self) -> str:\n return pulumi.get(self, \"role\")", "def get_role(self, name):\n role = Role.query.filter_by(name=name).first()\n\n return role", "def _get_role(self):\n return self.__role", "def get_role(self):\n return self.role", "def getRoleInfo(self, role):", "async def role_from_id(self, guild: discord.Guild, role_id: int):\n\n return discord.utils.get(guild.roles, id=role_id)", "def i_am(user_role):\n return user_role", "def from_string(cls, role: str) -> \"ProjectRole\":\n role = role.lower()\n for r in cls:\n if role == r.name.lower():\n return r\n raise ValueError('No project role matching \"{}\"'.format(role))", "def get_role(self, role_id):\n raise exception.NotImplemented() # pragma: no cover", "def read_role(self, name, mount_point=DEFAULT_MOUNT_POINT):\n api_path = '/v1/{mount_point}/role/{name}'.format(\n mount_point=mount_point,\n name=name,\n )\n return self._adapter.get(\n url=api_path,\n )" ]
[ "0.7939911", "0.6218153", "0.6166001", "0.61075884", "0.61075884", "0.61075884", "0.60433793", "0.6028485", "0.602459", "0.60132086", "0.5959827", "0.5903753", "0.58772725", "0.5834123", "0.58187157", "0.5792916", "0.5744908", "0.5739665", "0.5716778", "0.5713221", "0.5666325", "0.56572384", "0.56339204", "0.56167704", "0.5595827", "0.55712664", "0.55609024", "0.55573404", "0.5545849", "0.5538329" ]
0.8352632
0
Tries to parse a role out from the given text.
def parse_role(text, guild = None): parsed = ID_RP.fullmatch(text) if (parsed is not None): role_id = int(parsed.group(1)) try: role = ROLES[role_id] except KeyError: pass else: return role role = parse_role_mention(text) if (role is not None): return role if (guild is not None): if (guild is not None): role = guild.get_role_like(text) if (role is not None): return role return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_role_mention(text):\n parsed = ROLE_MENTION_RP.fullmatch(text)\n if parsed is None:\n return\n \n role_id = int(parsed.group(1))\n return ROLES.get(role_id, None)", "def from_string(cls, role: str) -> \"ProjectRole\":\n role = role.lower()\n for r in cls:\n if role == r.name.lower():\n return r\n raise ValueError('No project role matching \"{}\"'.format(role))", "def extract_role(name):\n name = sanitize_name(name)\n if ' - prof' in name:\n return \"prof\"\n for l in [1,2,3,4,5,6,7,8,9]:\n for g in [\"A\", \"B\"]:\n role = f'IE-{l}{g}'\n if name.endswith(role):\n return role\n for l in [1,2,3,4]:\n for g in [2*l-1, 2*l]:\n role = f'MA{l}-{g}'\n if name.endswith(role):\n return role\n return \"TODO\"", "def _process_family_role(head_text: str, full_text: str, person_type: str, alet_dict: dict) -> tuple:\n if 'agents' not in alet_dict: # Nothing to check\n return tuple()\n role_matches = []\n if head_text in family_members.keys(): # Looking for singular family role\n for alet in alet_dict['agents']:\n alet_names, alet_type, alet_iri = alet\n if f'_{head_text}' in alet_iri:\n role_matches.append((full_text, person_type, [':Person'], alet_iri))\n if len(role_matches) == 1:\n return role_matches[0]\n # TODO: Handle family role plurals\n # No match or multiple matches found\n return tuple()", "def token_role(self, role):\n return self.read('auth/token/roles/{0}'.format(role))", "def get_roles(role):", "def _parse_edge(self, tokens: TokenIterator):\n role_token = tokens.expect('ROLE')\n role = role_token.text\n if tokens.peek().type == 'ALIGNMENT':\n role += tokens.next().text\n\n target = None\n _next = tokens.peek()\n next_type = _next.type\n if next_type in ('SYMBOL', 'STRING'):\n target = tokens.next().text\n if tokens.peek().type == 'ALIGNMENT':\n target += tokens.next().text\n elif next_type == 'LPAREN':\n target = self._parse_node(tokens)\n # for robustness in parsing, allow edges with no target:\n # (x :ROLE :ROLE2... <- followed by another role\n # (x :ROLE ) <- end of node\n elif next_type not in ('ROLE', 'RPAREN'):\n raise tokens.error('Expected: SYMBOL, STRING, LPAREN', token=_next)\n else:\n logger.warning('Missing target: %s', role_token.line)\n\n return (role, target)", "def test_read_namespaced_role(self):\n pass", "def _set_role(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'4 .. 32']}), is_leaf=True, yang_name=\"role\", rest_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Role of the user'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"role must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'4 .. 32']}), is_leaf=True, yang_name=\"role\", rest_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Role of the user'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__role = t\n if hasattr(self, '_set'):\n self._set()", "def get_role(guild: discord.Guild, role_id: int = None, role_name: str = None) -> Optional[discord.Role]:\n if guild is None:\n raise ValueError(\"guild is None\")\n if role_id is None and role_name is None:\n raise ValueError(\"Either role_id or role_name must be specified\")\n for role in guild.roles:\n if role.id == role_id or (role_name is not None and role.name.lower() == role_name.lower()):\n return role\n return None", "def getRole(self, desired=None):\n strDes = str(desired)\n logging.debug(\"[LaymanAuthLiferay][getRole]: '%s'\"%strDes)\n if not self.authorised:\n logging.error(\"[LaymanAuthLiferay][getRole] The user is not authorised\")\n raise AuthError(401, \"I am sorry, but you are not authorised\")\n if self.authJson[\"userInfo\"] and self.authJson[\"userInfo\"][\"roles\"]:\n roles = self.authJson[\"userInfo\"][\"roles\"]\n if len(roles) < 1:\n logging.error(\"[LaymanAuthLiferay][getRole] Cannot determine the workspace - Liferay provided empty list of roles\")\n raise AuthError(500,\"Cannot determine the workspace - Liferay provided empty list of roles\") \n\n theRole = roles[0]\n for r in roles:\n if desired == r[\"roleName\"]:\n theRole = r\n\n #lower and spaces\n #theRole[\"roleName\"] = theRole[\"roleName\"].lower()\n #theRole[\"roleName\"] = \"_\".join(theRole[\"roleName\"].split(' '))\n roleName = theRole[\"roleName\"]\n logging.debug(\"[LaymanAuthLiferay][getRole] The role: '%s'\"% roleName)\n return theRole\n else: \n logging.error(\"[LaymanAuthLiferay][getRole] Cannot determine the workspace - Liferay did not provide user's roles\")\n raise AuthError(500,\"Cannot determine the workspace - Liferay did not provide user's roles\")", "def role(self):\n try:\n self._role = c_char(self.lib.iperf_get_test_role(self._test)).value.decode('utf-8')\n except TypeError:\n self._role = c_char(chr(self.lib.iperf_get_test_role(self._test))).value.decode('utf-8')\n return self._role", "def get_semantic_roles(self): \n spanishTokenizer = nltk.data.load(\"tokenizers/punkt/spanish.pickle\")\n testData = spanishTokenizer.tokenize(self.inputText)\n for line in testData:\n document = self.googleLanguageClient.document_from_text(line, language='es', encoding=language.Encoding.UTF8)\n tokens = self.correct_token_begin_position(self.analyze_syntax(document), line)\n for triple in self.find_triples(tokens):\n self.show_triple(tokens, line, triple) \n \n self.semanticRoleList = self.extract_semantic_roles()\n return self.semanticRoleList", "def read(cls, text):\n\n\t\treturn cls._parse(cls._tokenize(text))", "def apiref_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n name, namespace = tuple(text.split(\" | \"))\n app = inliner.document.settings.env.app\n uri = api_link(namespace)\n node = nodes.reference(name, name, refuri=uri)\n return [node], []", "def __find_role (label):\n from data import role as mod\n roles = mod.load ( )\n \n for role in roles.get_all ( ):\n if label == role.label:\n return role\n else:\n raise Exception ('Aplikacija ne pozna vloge: ' + label)", "def get_role(ssm):\n nodes = ssm[\"nodes\"]\n for node in nodes:\n if node[\"type\"] == \"role\":\n return node[\"name\"]\n return \"no role\"", "def role(self) -> str:\n\n assert self.data is not None\n return self.data[\"role\"][\"name\"]", "def role_name_from(s: str) -> str:\n return s.lower().replace(\" \", \"-\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def get_role_id(name: str) -> str:\n response = api.get_roles()\n\n if not response.ok:\n print(response.data)\n sys.exit(1)\n\n for role in response.data.get(\"items\"):\n if role.get(\"name\") == ROLE:\n return role.get(\"id\")\n else:\n return None", "def frole(wrapper: MessageDispatcher, message: str):\n pl = get_players(wrapper.game_state)\n\n parts = message.lower().split(\",\")\n for part in parts:\n try:\n (name, role) = part.split(\":\", 1)\n except ValueError:\n wrapper.send(messages[\"frole_incorrect\"].format(part))\n return\n umatch = users.complete_match(name.strip(), pl)\n rmatch = match_role(role.strip(), allow_special=False)\n role = None\n if rmatch:\n role = rmatch.get().key\n if not umatch or not rmatch:\n wrapper.send(messages[\"frole_incorrect\"].format(part))\n return\n FORCE_ROLES[role].add(umatch.get())\n\n wrapper.send(messages[\"operation_successful\"])", "def tests_names_roles(verbose=True):\n # good names\n names = []\n with open(\"tests/good_names.txt\") as f:\n for line in f.readlines():\n line = line.replace('\\n', '')\n if line and not line.startswith(\"#\"):\n names.append(line)\n for name in names:\n if verbose:\n print(f\"'{name}' --> '{sanitize_name(name)}' of group '{extract_role(name)}'\")\n # assert check_name(name)\n role = extract_role(name)\n assert check_role(role) and role != \"TODO\"\n\n # bad names\n names = []\n with open(\"tests/bad_names.txt\") as f:\n for line in f.readlines():\n line = line.replace('\\n', '')\n if line and not line.startswith(\"#\"):\n names.append(line)\n for name in names:\n if verbose:\n print(f\"'{name}' --> '{sanitize_name(name)}' of group '{extract_role(name)}'\")\n # assert check_name(name)\n role = extract_role(name)\n assert (not check_name(name)) or (role == \"TODO\")", "async def pingrole(self, ctx, role: discord.Role, *, text):\n if role.mentionable:\n await ctx.send(inline('Error: role is already mentionable'))\n return\n\n try:\n await role.edit(mentionable=True)\n except Exception as ex:\n await ctx.send(inline('Error: failed to set role mentionable'))\n if ex.text == \"Missing Permissions\":\n message = await ctx.send(inline('Make sure this bot\\'s role is higher than the one you\\'re mentioning'))\n await asyncio.sleep(3)\n await message.delete()\n return\n\n await ctx.message.delete()\n await asyncio.sleep(1)\n await ctx.send('From {}:\\n{}\\n{}'.format(ctx.author.mention, role.mention, text))\n\n try:\n await role.edit(mentionable=False)\n except Exception as ex:\n await ctx.send(inline('Error: failed to set role unmentionable'))\n return", "async def role_from_id(self, guild: discord.Guild, role_id: int):\n\n return discord.utils.get(guild.roles, id=role_id)", "def parse(self, text):\n node = self.match(text)\n if node is None or node.end - node.start != len(text): # TODO: Why not test just end here? Are we going to add a pos kwarg or something?\n # If it was not a complete parse, return None:\n return None\n return node", "def find_role(self, *args, **kwargs):\n raise NotImplementedError", "def get_role(self, role_id):\n raise exception.NotImplemented() # pragma: no cover" ]
[ "0.77921796", "0.5978904", "0.58639723", "0.55597717", "0.54797274", "0.533363", "0.52843875", "0.52097017", "0.52029", "0.5198293", "0.5160467", "0.5152056", "0.51393384", "0.5109315", "0.51079017", "0.51058346", "0.50979936", "0.50877523", "0.508482", "0.50777274", "0.50777274", "0.50777274", "0.50307727", "0.5027737", "0.5014882", "0.5000935", "0.49869236", "0.49812025", "0.49714798", "0.496997" ]
0.8224785
0
Fixture to get minigraph facts
def minigraph_facts(duthosts, rand_one_dut_hostname, tbinfo): duthost = duthosts[rand_one_dut_hostname] return duthost.get_extended_minigraph_facts(tbinfo)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_basic_setup(self):\n random_vars = ['D', 'I', 'G', 'S', 'L']\n\n for rv in random_vars:\n self.assertTrue(rv in self.Gs.nodes)\n self.assertTrue(isinstance(self.Gs.nodes[rv], DiscreteNetworkNode))", "def test_get_hyperflex_node_by_moid(self):\n pass", "def test_setup(self):\n self.setup()\n print(\"Nodes in graph\")\n for node in self.graph.graph.nodes:\n print(node)\n print(\"Edges in graph\")\n for edge in self.graph.graph.edges(data=True):\n print(edge)", "def test_get_hyperflex_cluster_by_moid(self):\n pass", "def test_get_related_nodes(self):\n pass", "def test_get_grid_edge_nodes(flopy_dis_mf6):\n mf6 = flopy_dis_mf6[1]\n mf6.initialize()\n\n with pytest.raises(NotImplementedError):\n mf6.get_grid_edge_nodes(1, np.zeros((1, 1)))", "def test_get_hyperflex_node_profile_by_moid(self):\n pass", "def known_mines(self):\n self.mines", "def test_get_nodes(self):\n wp22_rdf_graph = parse_rdf(WP22)\n wp706_rdf_graph = parse_rdf(WP706)\n wp1871_rdf_graph = parse_rdf(WP1871)\n wp2799_rdf_graph = parse_rdf(WP2799)\n\n nodes_wp22 = _get_nodes(wp22_rdf_graph)\n nodes_wp706 = _get_nodes(wp706_rdf_graph)\n nodes_wp1871 = _get_nodes(wp1871_rdf_graph)\n nodes_wp2799 = _get_nodes(wp2799_rdf_graph)\n\n self.assertEqual(len(nodes_wp22), 17)\n self.assertEqual(len(nodes_wp706), 186)\n self.assertEqual(len(nodes_wp1871), 115)\n self.assertEqual(len(nodes_wp2799), 141)", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 2)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 4)\n self.small_tree.add_edge(1, 4)\n self.small_tree.add_edge(2, 4)\n self.small_tree.add_edge(4, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(1, 2)\n self.deterministic_graph.add_edge(1, 3)\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(2, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(3, 6)\n self.deterministic_graph.add_edge(5, 6)", "def get_minimal_unknown_set(master_soln_json):\n eqbank = { obj[\"id\"]: obj for obj in json.load(open(\"./tools/equation_bank.json\"))}\n \n # print(\"Master JSON has been loaded.\")\n global master_unknown_summary \n global master_dep_graph\n \n master_unknown_summary = get_unknown_summary(master_soln_json)\n master_dep_graph = makeDependencyGraph(master_soln_json, eqbank)\n master_dg_folded = dependencyFolding(master_dep_graph, debug=False)\n\n # Create the list of unknowns and symbols from the folded graph\n # For each of the solution_ids, if the graph contains >1 unknown,\n # add that to the dictionary of unknowns and symbols. These are the ones\n # that need to be explicitly identified by the user in the solution.\n # Doesn't matter if there are duplicates, i.e. same unknowns appearing\n # in multiple solutions, ultimately they're all in the same system anyway,\n # so they still need to be identified/var mapped.\n\n minimal_set = {}\n unknown_summary = get_unknown_summary(master_soln_json)\n\n for _, g_sub in master_dg_folded.items():\n # filter out the nodes that are the unknowns and count them\n l_gsub_unknowns = [\n n for n in g_sub if g_sub.nodes[n]['group'] == 'unknown' \n and nx.degree(g_sub, n) > 0\n ]\n # print(_,l_gsub_unknowns)\n if len(l_gsub_unknowns) == 1:\n # only onw unknown, no var map needed.\n pass\n else:\n # we need a varmap, store this reference.\n minimal_set.update({k:\"\" for k in l_gsub_unknowns})\n \n for k in minimal_set:\n value = unknown_summary[next(iter(unknown_summary[k]))]\n if isinstance(value['value'], dict):\n # print(k,value['value']['varDisplay'])\n minimal_set[k] = value['value']['varDisplay']\n else:\n # print(k,value['symbol_context']['parentSymbol'])\n minimal_set[k] = value['symbol_context']['parentSymbol']\n\n return minimal_set if len(minimal_set) > 0 else None", "def fixtures():", "def test_nxgraph(self):\n self._build_sample_graph()\n # Adding singleton\n sg = self.skill_graph.add(Skill.build('g', ''))\n skill_map = SkillMap.load(self.course)\n nxgraph = SkillMapMetrics(skill_map).nxgraph\n self.assertIsInstance(nxgraph, DiGraph)\n successors = skill_map.build_successors()\n # Check nodes\n self.assertEqual(len(nxgraph), len(successors))\n for skill in successors:\n self.assertIn(skill, nxgraph.nodes(),\n msg='Node {} not found in nx graph.'.format(skill))\n # Check edges\n original_edges = sum(len(dst) for dst in successors.values())\n self.assertEqual(len(nxgraph.edges()), original_edges)\n for src, dst in nxgraph.edges_iter():\n self.assertIn(src, successors)\n self.assertIn(dst, successors[src],\n msg='Extra {},{} edge in nx graph.'.format(src, dst))", "def test_get_hyperflex_cluster_profile_by_moid(self):\n pass", "def test_minicity(self):\n # import the experiment variable from the example\n exp = minicity_example(render=False)\n\n # run the experiment for a few time steps to ensure it doesn't fail\n exp.run(1, 5)", "def test_graph1():\n mol_graph = DGLGraph([(0, 1), (0, 2), (1, 2)])\n node_feats = torch.arange(mol_graph.number_of_nodes()).float().reshape(-1, 1)\n edge_feats = torch.arange(2 * mol_graph.number_of_edges()).float().reshape(-1, 2)\n\n complete_graph = get_complete_graph(mol_graph.number_of_nodes())\n atom_pair_feats = torch.arange(complete_graph.number_of_edges()).float().reshape(-1, 1)\n\n return mol_graph, node_feats, edge_feats, complete_graph, atom_pair_feats", "def inference(self):\n logging.info('RandomizeMincut inference')\n\n perturbation_num = 4\n noise_rate = 0.3\n noise_range = (-0.6, 0.4)\n base_adj_matrix = copy.deepcopy(self._graph.get_adjacency('weight').data)\n base_adj_matrix = np.array(base_adj_matrix)[:-2, :-2] # remove pseudo vertices\n vertices_number = self._graph.vcount()\n y = [] # list of return results\n\n # basic mincut first\n self._cut = self._graph.st_mincut(source=self._v_plus, target=self._v_minus, capacity='weight')\n [positive_label, _] = self._cut.partition\n\n # ignore this cut if it has unbalance separation\n if len(positive_label) > 0.06 * vertices_number or \\\n len(positive_label) < 0.94 * vertices_number:\n labels = np.zeros(vertices_number) # re-construct y\n labels[positive_label] = 1\n y.append(labels[:-1]) # omit v_plus\n\n for _ in range(perturbation_num):\n # init noise\n noise_decision = np.random.binomial(\n n=1,\n p=noise_rate,\n size=base_adj_matrix.shape\n )\n noise_value = np.random.uniform(\n low=noise_range[0],\n high=noise_range[1],\n size=base_adj_matrix.shape\n )\n noise_value = noise_decision * noise_value\n\n # add noise\n noise_weight = np.triu(base_adj_matrix + noise_value) # only get upper triangle\n self._graph.es['weight'] = noise_weight[noise_weight.nonzero()]\n\n # find mincut\n self._cut = self._graph.st_mincut(source=self._v_plus, target=self._v_minus, capacity='weight')\n [positive_label, _] = self._cut.partition\n\n # ignore this cut if it has unbalance separation\n if len(positive_label) < 0.06*vertices_number or \\\n len(positive_label) > 0.94*vertices_number:\n continue\n\n labels = np.zeros(vertices_number) # re-construct y\n labels[positive_label] = 1\n y.append(labels[:-1]) # omit v_plus\n\n self._graph.delete_vertices([self._v_minus, self._v_plus])\n vote = np.array(y).sum(axis=0) # if sum > cases//2: label 1, else label 0\n self._graph.vs['label'] = (vote > len(y)//2).astype(int)", "def test_get_hyperflex_node_list(self):\n pass", "def test_expected_integration_sample():\n\n path_to_metadata = Path(__file__).parent.joinpath(\"metadata-northwind-v2.xml\")\n metadata_file_contents = path_to_metadata.read_bytes()\n # do not pass metadata as python string but read as bytes, usually ends with Unicode vs xml encoding mismatch.\n\n restrictions = RestrictionsGroup(None)\n builder = DirectBuilder(metadata_file_contents, restrictions,\"GET\")\n entities = builder.build()\n\n ''' uncomment for code sample purposes\n print('\\n entity count: ', len(entities.all()) )\n for x in entities.all():\n print(x.__class__.__name__, ' -- ', x.entity_set)\n print('\\n--End of listing the parsed entities--')\n '''\n\n queryable_factory = SingleQueryable\n\n for queryable in entities.all():\n URL_COUNT_PER_ENTITYSET = len(queryable.entity_set.entity_type.proprties()) * 1\n #Leaving as 1 instead of default 20, so the test output is more understandable and each property has one URL generated\n\n ''' uncomment for code sample purposes\n print('Population range for entity \\'{}\\' - {} - is set to {}'.format(queryable.entity_set.name, queryable.__class__, URL_COUNT_PER_ENTITYSET))\n '''\n\n for _ in range(URL_COUNT_PER_ENTITYSET):\n q = queryable_factory(queryable, logger, 1)\n queries = q.generate()\n ''' uncomment for code sample purposes \n print(queries[0].query_string) \n #hardcoded 0, since SingleQueryable is used and therefore generate only one URL\n '''\n assert queries[0].query_string != \"\"", "def test_getitem():\n world, bodies, _ = example_world()\n\n geometry = metis.geometry.ManyShapeGeometry(world, bodies)\n dynamics = metis.dynamics.MagneticDynamics(bodies)\n factored_graph = FactoredRandomGeometricGraph(\n geometry, dynamics, default_count=5,\n blacklist=NoObjectContactBlacklist())\n\n vertices = [\n {'robot': (None, 0), 'box1': (None, 0), 'box2': (None, 0)},\n {'robot': ('box1', 0), 'box1': (None, 0), 'box2': (None, 0)},]\n\n for vertex in vertices:\n configuration = factored_graph[vertex]\n assert all(name in configuration for name in factored_graph.names)\n assert all(len(pose) == 3 for pose in configuration.itervalues())", "def test_mining_train():\n g = clondike_transshipment_problem()\n assert isinstance(g, Graph)\n\n equipment_deliveries = [\n (\"L-1\", \"L-1-1\"),\n (\"L-1\", \"L-1-2\"), # origin, destination\n (\"L-1\", \"L-1-3\"),\n (\"L-1\", \"L-1-4\")\n ]\n\n mineral_deliveries = [\n (\"L-1-1\", \"L-1\"),\n (\"L-1-2\", \"L-1\"),\n (\"L-1-3\", \"L-1\"),\n (\"L-1-4\", \"L-1\"),\n ]\n\n access_nodes = {\"L-1\", \"L-1-1\", \"L-1-2\", \"L-1-3\", \"L-1-4\"}\n\n train = Train(rail_network=g, start_location=\"L-1\", access=access_nodes)\n\n s1 = train.schedule(equipment_deliveries)\n s2 = train.schedule(mineral_deliveries)\n s3 = train.schedule(equipment_deliveries[:] + mineral_deliveries[:])\n\n s1_expected = [\n ('L-1', 'L-1-1'), ('L-1', 'L-1-2'), ('L-1', 'L-1-3'), ('L-1', 'L-1-4')\n ] # shortest jobs first.!\n\n s2_expected = [\n ('L-1-1', 'L-1'), ('L-1-2', 'L-1'), ('L-1-3', 'L-1'), ('L-1-4', 'L-1')\n ] # shortest job first!\n\n s3_expected = [\n ('L-1', 'L-1-1'), ('L-1-1', 'L-1'), # circuit 1\n ('L-1', 'L-1-2'), ('L-1-2', 'L-1'), # circuit 2\n ('L-1', 'L-1-3'), ('L-1-3', 'L-1'), # circuit 3\n ('L-1', 'L-1-4'), ('L-1-4', 'L-1') # circuit 4\n ] # shortest circuit first.\n\n assert s1 == s1_expected\n assert s2 == s2_expected\n assert s3 == s3_expected", "def testBasic1(self):\n nodes = self.G.nodes()\n assert len(nodes) == len( set(nodes) )", "def test_metrostations_get(self):\n pass", "def test__graph_structure():\n assert PES_GRAPH == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'),\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n assert pgraph.species(PES_GRAPH) == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'))\n assert pgraph.channels(PES_GRAPH) == (\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n print('\\npes graph')\n print(PES_GRAPH)", "def test_tracker_graph():\n\n objects = _load_csv()\n ground_truth_graph = _load_ground_truth_graph()\n\n # run the tracking\n tracker = full_tracker_example(objects)\n _, _, graph = tracker.to_napari(ndim=2)\n\n assert ground_truth_graph == graph", "def test_trainGenerator():\n\n # check type\n assert isinstance(trainset, surprise.trainset.Trainset)\n\n # the number of users in trainset should be equal to the user from database plus 1\n assert len(trainset.all_users()) == len(svd.song_df.user_id.unique())+1", "def test_getCpfRelations(self):\n pass", "def _randomize(self):\n return self.graph", "def get_nodes():\n return conf.config.get_nodes(RELATIVE_PATH_FIXTURES_HOST)", "def generate(self):\r\n # prepare data\r\n banknote_quantity_max = [int(math.floor(self.money / self.banknotes[i])) for i in range(0, self.n)]\r\n # model\r\n mdl = Model(name='MinSetGenerator')\r\n # decision variables\r\n mdl.banknote_quantity = {i: mdl.integer_var(lb=0, ub=banknote_quantity_max[i]) for i in range(0, self.n)}\r\n # decision expressions\r\n money_amount = mdl.sum(mdl.banknote_quantity[i] * self.banknotes[i] for i in range(0, self.n))\r\n notes_quantity = mdl.sum(mdl.banknote_quantity[i] for i in range(0, self.n))\r\n # constraints\r\n mdl.add_constraint(money_amount == self.money)\r\n # strategy\r\n mdl.minimize(notes_quantity)\r\n # solve model: return quantity of each banknotes and a set with a minimal number of banknotes\r\n if not mdl.solve():\r\n print('*** No solution!')\r\n return None, None\r\n else:\r\n return [int(mdl.banknote_quantity[i].solution_value) for i in range(0, self.n)], \\\r\n [self.banknotes[i] for i in range(0, self.n) if mdl.banknote_quantity[i].solution_value > 0]" ]
[ "0.5542392", "0.53636146", "0.53595406", "0.5312273", "0.5284532", "0.521184", "0.5182062", "0.51748407", "0.5132129", "0.5122965", "0.5092739", "0.50812584", "0.50746775", "0.50527835", "0.5052672", "0.5042752", "0.50389737", "0.50388426", "0.5033967", "0.5026703", "0.502106", "0.5018668", "0.5000699", "0.49803", "0.4949654", "0.4937079", "0.49362832", "0.49334133", "0.49155778", "0.49014255" ]
0.6679881
0
Parse a blog post comment using `commentmarkup.parse` function.
def parse_comment(value): try: return mark_safe(commentmarkup.parse(value)) except ValueError: return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_comment(comment, postid):\n urls = get_links_from_body(comment.body)\n if urls:\n # Only insert comment into DB if it contains a link\n comid_db = db.insert('Comments',\n (None,\n postid,\n comment.id,\n comment.author,\n comment.body,\n comment.upvotes,\n comment.downvotes,\n comment.created_utc))\n for url in urls:\n parse_url(url, postid=postid, commentid=comid_db)\n # Recurse over child comments\n for child in comment.children:\n parse_comment(child, postid)", "def each_comment_from_post(post):\n # first yield the post text body, if any\n if post['text']:\n yield post['text']\n # then yield each comment\n for comment in post['comments']:\n yield comment['text']", "def parse_comment(self, comment):\n created_utc = datetime.utcfromtimestamp(comment.created_utc).isoformat()\n\n if comment.author is not None:\n author = comment.author.name\n else:\n author = \"\"\n\n com_obj = CommentMessage(\n comment.id,\n comment.link_id,\n self.subreddit,\n author,\n created_utc,\n comment.body,\n comment.score,\n )\n\n return com_obj", "def parse_comment(self, node):\n\n data = []\n\n if node is not None:\n comment_id_pattern = re.compile('comment-(\\d+)')\n for comment_node in node.find_all('div', class_='comment'):\n item = {}\n item['is_deletable'] = False\n item['is_editable'] = False\n \n comment_id_result = comment_id_pattern.search(comment_node.get('id'))\n if comment_id_result:\n item['id'] = int(comment_id_result.group(1))\n \n comment_body_node = comment_node.find('div', class_='comment-body')\n if comment_body_node is not None:\n item['content'] = ''\n for p in comment_body_node.find_all(recursive=False):\n if 'class' in p.attrs and 'author' in p['class']:\n item['author'] = p.get_text()\n item['profile_url'] = self.get_link(p.get('href'))\n author_id = self._parse_user_id_from_url(item['profile_url'])\n if self.userId == author_id:\n item['is_deletable'] = True\n item['is_editable'] = True\n elif 'class' in p.attrs and 'age' in p['class']:\n item['date'] = p.abbr['title']\n item['date_ago'] = timeago.format(self._parse_datetime(item['date']), datetime.now(TIMEZONE))\n elif 'class' in p.attrs and 'edit' in p['class']:\n continue\n elif p.name == 'form':\n continue\n else:\n item['content'] += str(p)\n\n data.append(item)\n\n return data", "def parse_post(post):\n # Ignore posts less than 24 hours old\n if time.time() - post.created < 60 * 60 * 24:\n logger.debug('Ignoring post (too new)')\n return False\n\n # Add post to database\n postid_db = db.insert('Posts',\n (None,\n post.id,\n post.title,\n post.url,\n post.selftext,\n post.author,\n post.permalink,\n post.subreddit,\n post.num_comments,\n post.upvotes,\n post.downvotes,\n post.score,\n post.created_utc,\n int(post.is_self),\n int(post.over_18)))\n # If post already exists, we've already indexed it; skip!\n if postid_db == -1:\n logger.debug('Ignoring post (already indexed)')\n return False\n # Write post to DB so we don't hit it again\n\n # NOTE: postid_db is the ID of the post in the database; NOT on reddit\n\n # Check for self-post\n if post.selftext != '':\n urls = get_links_from_body(post.selftext)\n for url in urls:\n parse_url(url, postid=postid_db)\n else:\n # Attempt to retrieve hash(es) from link\n parse_url(post.url, postid=postid_db)\n\n # Iterate over top-level comments\n if post.num_comments > 0:\n reddit.fetch_comments(post)\n for comment in post.comments:\n parse_comment(comment, postid_db)", "def parseComment(self):\n libxml2mod.xmlParseComment(self._o)", "def process_comment(request, comment, post):\n\n if request.user.is_authenticated:\n # We already set auth user's name and email in the form's inital vals.\n comment.author = request.user\n\n # Is this a threaded comment?\n if request.POST.get(\"parent_id\"):\n comment.parent = Comment.objects.get(id=request.POST.get(\"parent_id\"))\n\n # If commenter is logged in, override name and email with stored values from User object\n if request.user.is_authenticated:\n comment.name = request.user.get_full_name()\n comment.email = request.user.email\n\n # Set required relationship to Post object\n comment.post = post\n\n # Get commenter's IP and User-Agent string\n # ip = get_ip(request)\n # if ip is not None:\n # comment.ip_address = ip\n comment.user_agent = request.META.get(\"HTTP_USER_AGENT\", \"\")\n\n # Run spam check\n comment.spam = spam_check(comment)\n\n # Strip disallowed HTML tags. See tangerine docs to customize.\n comment.body = sanitize_comment(comment.body)\n\n # Call comment approval workflow\n comment.approved = get_comment_approval(comment.email, request.user.is_authenticated)\n if comment.approved:\n messages.add_message(request, messages.SUCCESS, \"Your comment has been posted.\")\n else:\n messages.add_message(request, messages.INFO, \"Your comment has been held for moderation.\")\n\n comment.save()\n\n # Alert post author that comment needs moderation, or that it's been auto-published:\n send_comment_moderation_email(comment)", "def comment_to_object(self, comment, post_author_id=None):\n # the message_tags field is different in comment vs post. in post, it's a\n # dict of lists, in comment it's just a list. so, convert it to post style\n # here before running post_to_object().\n comment = dict(comment)\n comment['message_tags'] = {'1': comment.get('message_tags', [])}\n\n obj = self.post_to_object(comment)\n if not obj:\n return obj\n\n obj['objectType'] = 'comment'\n\n match = self.COMMENT_ID_RE.match(comment.get('id', ''))\n if match:\n post_author, post_id, comment_id = match.groups()\n obj['url'] = self.comment_url(post_id, comment_id,\n post_author_id=post_author_id)\n obj['inReplyTo'] = [{'id': self.tag_uri(post_id)}]\n\n return self.postprocess_object(obj)", "def auto_transform_markup(comment):\r\n try:\r\n from django.utils.html import escape\r\n from threadedcomments.models import MARKDOWN, TEXTILE, REST, PLAINTEXT\r\n if comment.markup == MARKDOWN:\r\n from django.contrib.markup.templatetags.markup import markdown\r\n return markdown(comment.comment)\r\n elif comment.markup == TEXTILE:\r\n from django.contrib.markup.templatetags.markup import textile\r\n return textile(comment.comment)\r\n elif comment.markup == REST:\r\n from django.contrib.markup.templatetags.markup import restructuredtext\r\n return restructuredtext(comment.comment)\r\n# elif comment.markup == HTML:\r\n# return mark_safe(force_unicode(comment.comment))\r\n elif comment.markup == PLAINTEXT:\r\n return escape(comment.comment)\r\n except ImportError:\r\n # Not marking safe, in case tag fails and users input malicious code.\r\n return force_unicode(comment.comment)", "def parse_article(self, response):\n\n raw_post = response.css(\"div.blog.post > div.inner > div.row > article\")\n\n post_loader = ItemLoader(item=BlogPostItem(), selector=raw_post)\n post_loader.default_output_processor = TakeFirst()\n\n post_title = raw_post.css(\"div#postcontent > h1::text\").extract_first()\n post_loader.add_value(\"title\", post_title)\n post_loader.add_value(\"url\", response.request.url)\n\n post_text_selector = raw_post.css(\"div#postcontent > div#mypost\")\n post_text = post_text_selector.xpath('string(.)').extract_first()\n post_loader.add_value(\"content\", post_text[:160])\n\n pub_date_text = raw_post.css(\"div#postcontent > div.no-mobile > div.posttag.right.nomobile > span::text\").extract_first()\n pub_date = parse_date(pub_date_text)\n post_loader.add_value(\"publication_date\", pub_date)\n\n initial_author_list = raw_post.css(\n \"div#postcontent > div.no-mobile > div.postauthor > span > a.goauthor > span::text\").extract()\n author_list = [name.strip() for name in initial_author_list]\n post_authors = \"::\".join(author_list)\n post_loader.add_value(\"author\", post_authors)\n\n post_tags = raw_post.css(\"div#postcontent > a.tag.secondary::attr(title)\").extract()\n post_tags_str = \"::\".join(post_tags)\n post_loader.add_value(\"tags\", post_tags_str)\n\n return post_loader.load_item()", "def read_comment(comment):\n comment_dict = {}\n\n debug(\"parse tab in comment.\")\n comment_dict_from_tab, comment = parse_tab_in_comment(comment)\n debug(\"parsed dict: %s.\" % comment_dict_from_tab)\n comment_dict.update(comment_dict_from_tab)\n\n debug(\"parse space in comment.\")\n comment_dict_from_space, comment = parse_space_in_comment(comment)\n debug(\"parsed dict: %s.\" % comment_dict_from_space)\n comment_dict.update(comment_dict_from_space)\n\n debug(\"parse keyword in comment.\")\n comment_dict_from_keyword, comment = parse_keyword_in_comment(comment)\n debug(\"parsed dict: %s.\" % comment_dict_from_keyword)\n comment_dict.update(comment_dict_from_keyword)\n # keyword based separation.\n return comment_dict", "def parse_markdown(filename):\n if not os.path.exists(filename):\n error('File not found', filename)\n\n posts = list()\n with open(filename, encoding='utf-8') as f:\n line = next(f)\n if line.startswith('# '):\n title = line[2:].strip()\n record = []\n next(f)\n else:\n title = None\n record = [line]\n for line in f:\n if not line.startswith('___'):\n record.append(line)\n else:\n posts.append(Post.from_markdown(record))\n record = []\n\n # set rank of posts in date\n daterank = defaultdict(int)\n for post in posts:\n daterank[post.date] += 1\n post.daterank = daterank[post.date]\n\n # check post order\n for post1, post2 in zip(posts[:-1], posts[1:]):\n if post1.date > post2.date:\n error('Posts are not ordered', f'{post1.date} > {post2.date}')\n\n return title, posts", "def read_post(request, post_id, author_id, slug):\n if request.method == 'POST':\n form_comment = request.POST['comment']\n user = myUser.objects.get(id=author_id)\n post = Post.objects.get(id=post_id)\n comment = BlogComment(user=user, blogpost=post)\n comment.content = form_comment\n comment.save()\n\n post = Post.objects.get(slug=slug)\n comments = BlogComment.objects.filter(blogpost=post_id)\n like = Likes.objects.filter(post_id=post_id)\n return render(request, 'posts/read_post.html',\n {'comments': comments, 'post': post,'like': like})\n\n post = Post.objects.get(slug=slug)\n comments = BlogComment.objects.filter(blogpost=post_id)\n like = Likes.objects.filter(post_id=post_id)\n return render(request, 'posts/read_post.html',\n {'comments': comments, 'post': post, 'like': like })", "def parse_comments(submission):\n comments = []\n submission.replace_more_comments()\n for c in praw.helpers.flatten_tree(submission.comments):\n comment_dict = c.__dict__\n\n # NOTE: author is a special case (and must be present)\n author = c.author.name if hasattr(c.author, \"name\") else None\n if not author:\n continue\n\n comment = {\n \"submission_id\": submission.id,\n \"author\": author\n }\n del comment_dict[\"author\"] # no longer needed\n for k in _model_columns(Comment):\n if k in comment_dict:\n comment[k] = comment_dict[k]\n comments.append(comment)\n\n return comments", "def do_comment(self, data={}):\n\n try:\n comment = data['comment'] if 'comment' in data else ''\n post_type = data['post_type'] if 'post_type' in data else ''\n post_id = int(data['post_id']) if 'post_id' in data else ''\n\n if not comment or not post_type or not post_id:\n raise Exception('Invalid parameter')\n\n submit_comment_url = BASE_URL + 'post_comments/'\n response = self.request('POST', submit_comment_url, params={\n 'comment': comment, 'post_type': post_type, 'post_id': post_id\n })\n response = response.json()\n output = []\n for item in response:\n output.append(self._convert_comment(item))\n return output\n except Exception as e:\n Utils.log(traceback.format_exc())\n Utils.error(e.args[0])", "def parse_post_text(formatted_content):\n post = {}\n # Parse Mod comments and remove them from the text.\n potential_comments = re.finditer(\"\\[.+?\\]\", formatted_content, re.DOTALL)\n comments = []\n for comment_match in potential_comments:\n comment = comment_match.group()\n mod = re.search(r\"\\-\\s?Mod\\.\\s?(?P<mod>\\w+\\b)\", comment)\n if mod:\n comments.append({\n \"comment\" : comment,\n \"mod\" : mod.group(\"mod\")\n })\n post[\"modComments\"] = comments\n \n # Comments are removed from the post test so that\n # links, reports, etc. mentioned by mods are not extracted.\n no_comment_txt = formatted_content\n for comment in comments:\n no_comment_txt = no_comment_txt.replace(comment[\"comment\"], \"\")\n \n metadata, header_end = parse_post_metadata(no_comment_txt)\n post.update(metadata)\n \n sections = re.split(r\"^[\\*#]{3,}\\s*$\", no_comment_txt[header_end:], flags=re.M)\n articles = []\n \n # Some posts have articles which are parsed into multiple sections:\n # Ex: http://www.promedmail.org/direct.php?id=2194235\n # The section parsing code tries to recombine these by concatenating\n # unrecognized sections onto the previous sections if they form an article.\n # article_start_idx keeps track of the first section in the article.\n article_start_idx = None\n \n for idx, section in enumerate(sections):\n section = section.strip()\n article = parse_article_text(section, post_date=post['promedDate'])\n # Check if the section contains an actual article by seeing which\n # properties could be parsed.\n if article.get('source') or article.get('date'):\n articles.append(article)\n article_start_idx = idx\n else:\n # When a section cannot be parsed as an article the following code\n # tries to determine what it is. If the type cannot be determined\n # an error or warning is thrown.\n # These warnings can be used to find sections which are not being\n # correctly parsed.\n # Posts with known issues:\n # http://www.promedmail.org/direct.php?id=19990512.0773\n if re.search(r\"Visit ProMED-mail\\'s web site at|\"\n r\"Please support (the \\d{4}\\s)?ProMED\\-mail|\"\n r\"Donate to ProMED\\-mail. Details available at|\"\n r\"ProMED\\-mail makes every effort to verify the reports|\"\n r\"PROMED\\-MAIL FREQUENTLY ASKED QUESTIONS|\"\n r\"Become a ProMED\\-mail Premium Subscriber|\"\n r\"A ProMED\\-mail post\",\n section, re.I):\n # boilerplate promed notice section\n pass\n elif re.search(r\"In this (update|post(ing)?)\", section):\n # table of contents section\n pass\n elif re.search(r\"Cases in various countries\", section):\n # This type of post typically has links to several articles\n # with single sentence summaries.\n # Ex: http://www.promedmail.org/direct.php?id=20131125.2073661\n pass\n elif section == \"\":\n # empty section\n pass\n elif idx == 0 and section.count(\"\\n\") < 2:\n # probably the article title\n pass\n else:\n if article_start_idx != None:\n article = parse_article_text(\n \"\\n#####\\n\".join(\n sections[article_start_idx:idx]).strip(),\n post_date=post['promedDate'])\n assert article.get('source') or article.get('date')\n articles[-1] = article\n continue\n else:\n print \"Unexpected Section (%s):\" % post['archiveNumber'], [section[0:50] + \"...\"]\n article_start_idx = None\n post['articles'] = articles\n return post", "def getPostComment(self, address: ghidra.program.model.address.Address) -> unicode:\n ...", "def parseComments(data):\n global comments\n reviewBegins = '<div style=\"margin-left:0.5em;\">'\n reviewEnds = '<div style=\"padding-top: 10px; clear: both; width: 100%;\">'\n stars_line = 'margin-right:5px;'\n stars = re.compile('\\d+.\\d+ out of 5 stars')\n header_line = '<span style=\"vertical-align:middle;\"'\n helpful_line ='people found the following review helpful'\n helpful = re.compile('\\d+ of \\d+ people found the following review helpful')\n reviewText = '<span class=\"h3color tiny\">' # Actual review\n\n boundaries = commentsStartStopLineNmbr(data)\n for i in range(boundaries[0], boundaries[1] + 1):\n if reviewBegins in data[i]:\n curcomment = Comment()\n while reviewEnds not in data[i]:\n # Parse stars\n if stars_line in data[i]:\n stars_found = re.search(stars, data[i])\n if stars_found != None:\n curcomment.stars = stars_found.group()\n # Parse header\n elif header_line in data[i]:\n line = data[i]\n begin = line.find('<b>') + 3\n end = line.find('</b>')\n curcomment.header = line[begin : end]\n # Parse helpfulness\n elif helpful_line in data[i]:\n helpful_found = data[i].replace(\",\", \"\")\n helpful_found = re.search(helpful, helpful_found)\n if helpful_found != None:\n curcomment.helpful = helpful_found.group()\n # Parse body text\n elif reviewText in data[i]:\n i += 3\n if '<span class=\"small\"' in data[i]: # Yep, dirty trick :(\n i += 3\n data[i] = stripHtmlTags(data[i])\n curcomment.comment = re.sub(\"\\s+\", \" \", data[i])\n i += 1\n comments.append(curcomment.getonelinecomment())\n #comments.append(curcomment.__repr__())", "def post_comment(self, entry, body, **args):\n args.update(entry=entry, body=body)\n return self.fetch(\"/comment\", post_args=args)", "def parse_comments_html(advertise: Dict[str, Any]) -> Optional[List[str]]:\n if \"comments_html\" in advertise.keys():\n\n filtred_comments: str = advertise[\"comments_html\"][200::]\n\n tmp: List[str] = re.split(\"[ \\n\\t]{2,}\", filtred_comments)\n if '' in tmp:\n tmp.remove('')\n\n # Breaking comments\n master: List[List[str]] = []\n tmp_vec: List[str] = []\n for line in tmp:\n\n if re.search(\"de \\d{4,}\", line): # matches 'de 2018' that signals the end of comment\n master.append(tmp_vec)\n tmp_vec = []\n else:\n tmp_vec.append(line)\n\n # Cleaning comments\n for comment in master:\n if \"...\" in comment:\n comment.remove(\"...\")\n if \"O usuário contratou o serviço em\" in comment:\n comment.remove(\"O usuário contratou o serviço em\")\n\n return [\" \".join(m) for m in master]", "def new_comment(self, post_id, comment):\n # *don't* pass in username and password. if you do, that wordpress user's\n # name and url override the ones we provide in the xmlrpc call.\n #\n # also, use '' instead of None, even though we use allow_none=True. it\n # converts None to <nil />, which wordpress's xmlrpc server interprets as\n # \"no parameter\" instead of \"blank parameter.\"\n #\n # note that this requires anonymous commenting to be turned on in wordpress\n # via the xmlrpc_allow_anonymous_comments filter.\n return self.proxy.wp.newComment(self.blog_id, '', '', post_id, comment)", "def get_post_comments(post, user_agent=default_user_agent):\n post_permalink = post['permalink']\n\n response_data = requests.get(post_permalink, headers = {'User-agent': user_agent})\n post_data = response_data.json()[1]\n\n # right now this gets the title, eventually convert to unique id for each title\n post_id = post['post_id']\n\n return get_post_comments_recur(post_data, [], -1, post_id)", "def parse_comment(comment: Union[Token, PsuedoToken]) -> str:\n # Happens when there is no documentation comment in the source file for the\n # item.\n spelling = comment.spelling\n if spelling is None:\n return \"\"\n\n # Comments from clang start at the '/*' portion, but if the comment itself\n # is indented subsequent lines will have too much indent.\n # Transform::\n #\n # \"/**\\n * hello some comment\\n * on multiple lines\\n */\"\n #\n # into::\n #\n # \"/**\\n * hello some comment\\n * on multiple lines\\n */\"\n indent = \" \" * (comment.extent.start.column - 1)\n indented_comment = indent + spelling\n dedented_comment = textwrap.dedent(indented_comment)\n\n # Notes on the regex here.\n # Option 1 '\\s?\\*/?'\n # This piece will match comment lines that start with '*' or ' *'.\n # This will also match a trailing '*/' for the end of a comment\n #\n # Option 2 '^/\\*+<?'\n # This will match the start of a comment '/*' and consume any\n # subsequent '*'. This is also meant to catch '/**<' for trailing comments.\n #\n # Option 3 '\\*+/'\n # Matches any and all '*' up to the end of the comment string.\n contents = re.sub(\n r\"^\\s?\\*/?|^/\\*+<?|\\*+/\",\n lambda x: len(x.group(0)) * \" \",\n dedented_comment,\n flags=re.MULTILINE,\n )\n\n contents = textwrap.dedent(contents)\n\n # there may still be left over newlines so only strip those, but leave any\n # whitespaces.\n contents = contents.strip(\"\\n\")\n\n return contents", "def comment_content(c):\n content = str(c)[4:-3]\n return content.strip()", "def post_process_post(self, post):\r\n post.article = self.rewrite_ob_urls(post.article)\r\n post._commit()\r\n \r\n comments = Comment._query(Comment.c.link_id == post._id, data = True)\r\n for comment in comments:\r\n comment.body = self.rewrite_ob_urls(comment.body)\r\n comment._commit()", "def sanitize_comment(comment):\n\n if hasattr(settings, \"BLEACH_ALLOWED_TAGS\"):\n allowed_tags = settings.BLEACH_ALLOWED_TAGS\n else:\n allowed_tags = bleach.sanitizer.ALLOWED_TAGS\n\n return bleach.clean(comment, tags=allowed_tags, strip=True)", "def addPost(self,text,id,url,date):\n self.topComments.append(Post(text,id,url,date))\n return None", "def run_get_post(m):\n\n doc = get_doc(m)\n assert doc is not None\n\n wp = get_wp(m)\n\n post = find_post(wp, doc.identifier)\n\n if post:\n post.content = \"…content elided…\"\n from pprint import pprint\n pprint(post.struct)\n return\n else:\n warn(f\"Didn't find post for identifier {doc.identifier}\")\n return", "def fParseHTMLComments(self, match):\n before, commenttext, after = match.groups()\n commenttext = self.shelve(commenttext)\n return '<!--%s-->' % commenttext", "def test_comments(self):\n\n comment_str = \"# This is a comment\\n# This is another comment\"\n doc = parser.parse(comment_str)\n\n self.assertEqual(len(doc.children()), 2)" ]
[ "0.7271946", "0.63812876", "0.62457097", "0.606756", "0.59673697", "0.58122927", "0.5781495", "0.5712194", "0.55278826", "0.54838717", "0.5460556", "0.5456942", "0.5434908", "0.5390793", "0.53862107", "0.5372897", "0.53674394", "0.5357156", "0.5336541", "0.53325605", "0.53119266", "0.5298221", "0.5293657", "0.52386034", "0.522429", "0.51947874", "0.5162473", "0.5162208", "0.5145462", "0.5137305" ]
0.65297025
1
Parse the given text using the markup parser defined in `settings.MARKUP_PARSER` (or `limited.markup.pygtile`, if none was set).
def parse(*args, **kwargs): if hasattr(settings, 'MARKUP_PARSER'): parser = settings.MARKUP_PARSER if not callable(parser): parser = MARKUP_PARSERS.get(parser, 'textile') else: parser = MARKUP_PARSERS['textile'] try: return mark_safe(parser(*args, **kwargs)) except Exception as e: raise RuntimeError(str(e))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(text):\n md = markdown.Markdown(['codehilite', 'tables', ])\n\n for iref in re.findall(img_ref_re, text):\n img_id = iref[7]\n try:\n image = FlatPageImage.objects.get(pk=int(img_id))\n md.references[img_id] = (image.image_path.url, '')\n except ObjectDoesNotExist:\n pass\n\n for lref in re.findall(reference_re, text):\n doc_name = lref[7]\n try:\n doc = File.objects.get(name=doc_name)\n md.references[doc_name]= (doc.url, doc.name)\n except ObjectDoesNotExist:\n pass\n\n return md.convert(text)", "def parse_text(self, text):\n self._text_paragraph = text.split(\"\\n\")\n self._render()", "def parselite(text):\n p = BaseParser()\n return p.parse(text)", "def parse(self, text: str) -> Tree:\n return self.parser.parse(text)", "def parse(self, text, html=True):\r\n self._urls = []\r\n self._users = []\r\n self._lists = []\r\n self._tags = []\r\n\r\n reply = REPLY_REGEX.match(text)\r\n reply = reply.groups(0)[0] if reply is not None else None\r\n\r\n parsed_html = self._html(text) if html else self._text(text)\r\n return ParseResult(self._urls, self._users, reply,\r\n self._lists, self._tags, parsed_html)", "def __call__(self, parser, text):\n try:\n nltk_parse_tree = parser(text)\n\n self._evaluate(nltk_parse_tree)\n except:\n self.player.respond(\"I'm sorry, I don't understand.\")", "def parse_media_tags(id, text, files, markup):\n\n # SHOW [[label]]\n for tag in re.findall('\\[\\[[^\\]]+\\]\\]', text):\n ptag = parse_tag(id, tag, files, type=SHOW)\n if ptag:\n src = get_src(ptag, markup)\n if src:\n text = text.replace(tag, src)\n\n # LINK {{label}}\n for tag in re.findall('\\{\\{[^\\}]+\\}\\}', text):\n ptag = parse_tag(id, tag, files, type=LINK)\n if ptag:\n src = get_src(ptag, markup)\n if src:\n text = text.replace(tag, src)\n\n return text", "def from_text(text):\n return parse(text)", "def parse(self, text, start=None):\n return self.parser.parse(text, start=start)", "def parse(self):\n\n text = self.text.li\n\n # helper function to parse both BeautifulSoup tags and NavigableStrings\n def extract_text(x):\n if type(x).__name__ == \"NavigableString\":\n return x\n elif x.name == 'br':\n return '\\n'\n else:\n return x.get_text()\n\n # helper function to get text from a bullet, ignoring potential\n # sub-bullets or images\n def get_bullet_parts(q):\n parts = []\n for c in q.children:\n if c.name == 'ul':\n break\n elif c.name == 'div' and 'thumb' in c['class']:\n pass\n elif c.name == 'a' and 'class' in c.attrs and 'autonumber' in c['class']:\n pass\n else:\n parts.append(c)\n return parts\n\n def is_english(quote, quote_parts=None):\n # reject quotes not in latin alphabet\n alpha = 'abcdefghijklmnopqrstuvwzyz'\n spaceless = quote.replace(' ', '')\n if not len(spaceless):\n print(quote)\n return False\n prop_latin = sum(map(lambda x: x in alpha, spaceless.lower())) / len(spaceless)\n if prop_latin < .6:\n print(quote)\n return False\n\n # figure out whether quote is in italics\n textlen = len(quote)\n try:\n italiclen = len(''.join([extract_text(x) for x in quote_parts if x.name=='i']))\n except:\n italiclen = 0\n if italiclen + 5 > textlen:\n is_italic = True\n else:\n is_italic = False\n\n is_en_list = [en_dict.check(s.strip('\\'\"(){}[].?!-—’,<>')) for s in quote.split() if len(s.strip('\\'\"(){}[].?!-—’,<>'))]\n en_proportion = (sum(is_en_list)+2)/len(is_en_list)\n if en_proportion > .6 and not is_italic:\n return True\n elif en_proportion > .8 and is_italic:\n return True\n else:\n print(quote)\n return False\n\n\n # get sub-bullets which might include source name\n meta_info = text.ul\n quote_parts = get_bullet_parts(text)\n try:\n quote = ''.join(map(extract_text, quote_parts)).strip()\n # quote in foreign language, try next subbullet\n if not is_english(quote, quote_parts):\n if meta_info:\n old_quote = quote\n bullets = meta_info.find_all('li')\n quote_parts = get_bullet_parts(bullets[0])\n quote = ''.join(map(extract_text, quote_parts)).strip()\n # check if subbullet seems to be in english\n if is_english(quote, quote_parts) and len(quote) > len(old_quote)*.6:\n badwords = ['pp.', 'p.', 'ch.', 'chapter', 'page', 'chap.', 'act', 'book']\n if sum([quote.lower().startswith(b) for b in badwords]) > 0:\n self.invalid = True\n else:\n self.quote = quote\n if len(bullets) > 1:\n source_parts = get_bullet_parts(bullets[1])\n self.potential_source = ''.join(map(extract_text, source_parts)).strip()\n else:\n self.invalid = True\n else:\n self.invalid = True\n print(\"foreign with no meta-info:\", quote)\n else:\n self.quote = quote\n if meta_info:\n source_parts = get_bullet_parts(meta_info.li)\n self.potential_source = ''.join(map(extract_text, source_parts)).strip()\n # try to catch things like chapter headings that get through from bad parses\n badwords = ['p.', 'pp.', 'ch.', 'chapter', 'page', 'chap.']\n if len(quote) < 25 and sum([(b in quote.lower().split()) for b in badwords]) > 0:\n self.invalid = True\n if ('\\\\displaystyle' in quote):\n self.invalid = True\n badwords = ['pp.', 'p.', 'ch.', 'chapter', 'page', 'chap.', 'act', 'book']\n if self.potential_source and sum([self.potential_source.lower().startswith(b) for b in badwords]) > 0:\n self.potential_source = None\n except Exception as e:\n print(e)\n print(quote_parts, meta_info)\n self.invalid = True", "def _text(self, text):\r\n URL_REGEX.sub(self._parse_urls, text)\r\n USERNAME_REGEX.sub(self._parse_users, text)\r\n LIST_REGEX.sub(self._parse_lists, text)\r\n HASHTAG_REGEX.sub(self._parse_tags, text)\r\n return None", "def parse_text(self, page):\n text = page.find(self.tag_prefix + self.revision_tag).find(self.tag_prefix + self.text_tag).text\n title = page.find(self.tag_prefix + self.title_tag).text\n categories = []\n #\n text = self.parse_archivo(text)\n text = self.parse_foto(text)\n text = self.parse_by_line(text)\n text = self.parse_link(text)\n text = self.parse_url(text)\n text = self.parse_fecha(text)\n text = self.parse_bracketed_word(text)\n #\n if text:\n categories = re.findall(self.category_finder_regex, text)\n #\n text = self.parse_category(text)\n text = self.parse_other_language(text)\n text = self.parse_table_regex(text)\n text = self.parse_ver_fuente(text)\n text = self.remove_extra_text(text)\n text = self.remove_extra_characters(text)\n\n categorias = []\n for cat in categories:\n categorias.append(cat[6])\n\n if text:\n if 'REDIRECT' in text or 'redirect' in text:\n return None\n\n return Article(title=title, content=text, categories=categorias)", "def parse(self, text, style):\r\n # AR 20040612 - when we feed Unicode strings in, sgmlop\r\n # tries to coerce to ASCII. Must intercept, coerce to\r\n # any 8-bit encoding which defines most of 256 points,\r\n # and revert at end. Yuk. Preliminary step prior to\r\n # removal of parser altogether.\r\n enc = self._enc = 'utf8' #our legacy default\r\n self._UNI = type(text) is UnicodeType\r\n if self._UNI:\r\n text = text.encode(enc)\r\n\r\n self._setup_for_parse(style)\r\n # the xmlparser requires that all text be surrounded by xml\r\n # tags, therefore we must throw some unused flags around the\r\n # given string\r\n if not(len(text)>=6 and text[0]=='<' and _re_para.match(text)):\r\n text = \"<para>\"+text+\"</para>\"\r\n self.feed(text)\r\n self.close() # force parsing to complete\r\n return self._complete_parse()", "def _html(self, text):\r\n html = URL_REGEX.sub(self._parse_urls, text)\r\n html = USERNAME_REGEX.sub(self._parse_users, html)\r\n html = LIST_REGEX.sub(self._parse_lists, html)\r\n return HASHTAG_REGEX.sub(self._parse_tags, html)", "def unhtml(cls, text):\n parser = cls()\n parser.feed(text)\n return parser", "def formatMarkup(request, text, currentStack=[]):\n try:\n currentStack.index(text)\n raise Exception(\"Formatting a text that is being formatted?!\")\n except ValueError:\n pass\n currentStack.append(text)\n\n from MoinMoin.parser.wiki import Parser\n from MoinMoin.formatter.text_html import Formatter\n import StringIO\n\n origtext = text\n out = StringIO.StringIO()\n request.redirect(out)\n parser = Parser(text, request, line_anchors=False)\n formatter = Formatter(request, terse=True)\n reqformatter = None\n if hasattr(request, 'formatter'):\n reqformatter = request.formatter\n request.formatter = formatter\n p = Page(request, \"$$$$i18n$$$$\")\n formatter.setPage(p)\n parser.format(formatter)\n text = out.getvalue()\n if reqformatter == None:\n del request.formatter\n else:\n request.formatter = reqformatter\n request.redirect()\n del currentStack[-1]\n text = text.strip()\n return text", "def _parse_markdown(self):\n renderer = MyRenderer()\n md = mistune.Markdown(renderer=renderer)\n md.render(self._markdown_text)\n self._bash_commands = renderer._bash_commands", "def __init__(\n self, text: str = \"\", path: Optional[pathlib.Path] = None, **kwargs,\n ):\n if text == \"\" and path:\n text = path.read_text()\n\n text_with_no_leading_spaces = \"\\n\".join([line.lstrip() for line in text.splitlines()])\n\n text_html = markdown.markdown(\n text_with_no_leading_spaces, extensions=MARKDOWN_EXTENSIONS, output_format=\"html5\"\n )\n\n super().__init__(text_html, **kwargs)", "def parse(\n self, text: str, labels: istr = None, pipeline: str = \"default\"\n ) -> Doc:", "def parse(self, text):\n parse_results = self._parse(text)\n if self.resolve_corefs:\n self._resolve_corefs(parse_results)\n return parse_results", "def tokenize_html(self, path):\n with open(path, errors=u'ignore') as f:\n soup = BeautifulSoup(f, u'lxml')\n if soup.title:\n self.title = soup.title.text\n junk = [u'head', u'script', u'style']\n for e in soup(junk):\n e.decompose()\n text = soup.get_text(separator=u' ')\n self.tokenize(text)\n if self._config[u'index_urls']:\n self.tokenize_href(soup)", "def parse_text(text=None, file=None):\n if not text:\n text = open(file).readlines()\n parsed_text = re.split(ARTICLE_TOKEN, text)\n return parsed_text", "def parse(self, src, **opts):\n if not is_string(src):\n raise TypeError(\"input must be a string\")\n\n self.input = src\n scanner = markup_scanner(src)\n if opts.get('skip_white_text', False):\n self.objects = list(\n t for t in scanner.scan()\n if t[0] != 'text' or not src[t[1]:t[2]].isspace())\n else:\n self.objects = list(scanner.scan())\n self.wrappers = self.make_wrappers()\n self.__linepos = []\n self.find_partners()", "def textile(text, **kwargs):\n from django.contrib.markup.templatetags.markup import textile\n return textile(text)", "def parse(intLanguageName, content, formatDetails, threadstop):\r\n\r\n if len(content) == 0:\r\n return buildSyntaxNode([], 0, \"text\")\r\n\r\n if formatDetails.noFormat:\r\n return buildSyntaxNode([buildSyntaxNode(content, 0, \"plainText\")],\r\n 0, \"text\")\r\n\r\n baseDict = _buildBaseDict(formatDetails=formatDetails)\r\n\r\n## _prof.start()\r\n try:\r\n print content\r\n print baseDict\r\n t = text.parseString(content, parseAll=True, baseDict=baseDict,\r\n threadstop=threadstop)\r\n print t\r\n t = buildSyntaxNode(t, 0, \"text\")\r\n print t\r\n\r\n finally:\r\n## _prof.stop()\r\n pass\r\n\r\n return t", "def simple_parse(self, text_or_tokens):\n parses = self.parse(text_or_tokens)\n return str(parses[0].ptb_parse)", "def htmltomarkdown(text):\n\n try:\n content = html2markdown.convert(text)\n except Exception as exc:\n logger.warning(f\"error={exc};text={text[:100]}\")\n # Return escaped text\n content = html.escape(text)\n\n return content", "def __init__(self, txt):\n self.dirty_html = txt\n self.cleaned_html = ''\n self.current_parent_element = {}\n self.current_parent_element['tag'] = ''\n self.current_parent_element['attrs'] = {}\n self.parsing_li = False\n\n HTMLParser.__init__(self)", "def parse(self, text, debug=0):\n if not isinstance(text, basestring):\n raise TypeError('%r is not a string.' % (text,))\n results = self.parse_tokens(self.tokenize(text), debug=debug)\n if len(results) > 1:\n results = utils.remove_duplicates(results)\n for result in results:\n result.text = text\n return results", "def parse(self, text_to_parse):\r\n simple_sentence, kana_sentence = self.get_sentences(text_to_parse)\r\n parts = self.build_parts(simple_sentence, kana_sentence)\r\n return KanaSentence(parts)" ]
[ "0.63776726", "0.6272165", "0.61974937", "0.61344326", "0.6089678", "0.6000796", "0.5909587", "0.590035", "0.5859011", "0.58447737", "0.58194363", "0.5794619", "0.57134396", "0.5710515", "0.57059056", "0.56801045", "0.56688005", "0.5572823", "0.5505512", "0.5502471", "0.5495942", "0.5491047", "0.5465237", "0.54066855", "0.53978586", "0.537991", "0.5333984", "0.53328896", "0.53211296", "0.53036106" ]
0.6886756
0
Class method. Instanciates several synapses with various dimensions.
def instantiate(cls, dim_list, **kwargs): instances = (cls(dim=dim, **kwargs) for dim in dim_list) syn_dict = {str(inst.dim):inst for inst in instances} print("Synapses instanciated: {}".format(syn_dict.keys())) return syn_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_synapses(self):\n pass # Ignore if child does not implement.", "def __init__(self, center, leads, connections):\n\n if not center.is_square():\n raise ValueError(\"Center is not a square TightBinding\")\n\n self.center = center.copy()\n self.dims = center.dims\n self.leads = []\n self.connections = []\n\n for l,c in zip(leads,connections):\n self.append_lead(l,c)", "def __init__(self, num_synapses=0, weights=0.0, delays=1,\n connection_array=None):\n self._num_synapses = num_synapses\n self._weights = weights\n self._delays = delays\n self._connection_array = connection_array", "def __init__(self, input_dims, output_dims):\n # Dimension attributes\n # Note that the tuples of input and output dims are ordered\n # from least-significant to most-significant subsystems\n self._qargs = None # qargs for composition, set with __call__\n self._input_dims = None # tuple of input dimensions of each subsystem\n self._output_dims = None # tuple of output dimensions of each subsystem\n self._input_dim = None # combined input dimension of all subsystems\n self._output_dim = None # combined output dimension of all subsystems\n self._set_dims(input_dims, output_dims)", "def add_synapses(self, obj):\n synapse_ct = lems.ComponentType('Synapse')\n dynamics_synapse = lems.Dynamics()\n synapse_ct.add(dynamics_synapse)", "def connect_synfluct(self, PF_BG_rate=6, PF_BG_cv=1, STL_BG_rate=20, STL_BG_cv=1):\n \n if self.do_run:\n \n for m in self.ST_stims:\n del m \n del self.ST_stims\n \n for m in self.PF_stims:\n del m \n del self.PF_stims\n \n self.ST_stims = []\n self.PF_stims = []\n \n \n for n in range(self.n_celltypes):\n \n for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist \n \n PF_syn_list = self.cells[n][i].createsyn_PF()\n \n for d in PF_syn_list:\n d.input.newnetstim.number = 1e9\n d.input.newnetstim.noise = PF_BG_cv\n d.input.newnetstim.interval = 1000.0 / PF_BG_rate\n d.input.newnetstim.start = 0\n \n self.PF_stims.append(PF_syn_list)\n \n ST_stim_list = self.cells[n][i].createsyn_ST(record_all=0)\n\n for d in ST_stim_list:\n d.newnetstim.number = 1e9\n d.newnetstim.noise = STL_BG_cv\n d.newnetstim.interval = 1000.0 / STL_BG_rate\n d.newnetstim.start = 0\n \n self.ST_stims.append(ST_stim_list)\n \n if self.id == 0: print \"- PF and ST stimulation added.\"", "def buildSystem(self, shape ):\n\t\tfor s in self.scatters:\n\t\t\tfor i,n in enumerate( s._nodes ):\n\t\t\t\tsoftMod = sf.SoftModCluster( 'lip_' + '_%i'%i + '_SFM', shape )\n\t\t\t\tsoftMod.create( n.a.t.v[0] )", "def initialize(self,t0=0.0):\n \n # An connection_distribution_list (store unique connection(defined by weight,syn,prob))\n self.connection_distribution_collection = ConnectionDistributionCollection() # this is \n self.t = t0\n \n # put all subpopulation and all connections into the same platform\n for subpop in self.population_list:\n subpop.simulation = self\n for connpair in self.connection_list:\n connpair.simulation = self\n \n \n \n # initialize population_list, calculate \n \n \n for p in self.population_list:\n p.initialize() # 2 \n \n for c in self.connection_list:\n print 'initialize population'\n c.initialize() # 1", "def _build_topology(self):\n\t\t# childSection.connect(parentSection, [parentX], [childEnd])\n\t\tfor i in range(self._axonNodes-1):\n\t\t\tself.node[i].connect(self.mysa[2*i],0,1)\n\t\t\tself.mysa[2*i].connect(self.flut[2*i],0,1)\n\t\t\tself.flut[2*i].connect(self.stin[6*i],0,1)\n\t\t\tself.stin[6*i].connect(self.stin[6*i+1],0,1)\n\t\t\tself.stin[6*i+1].connect(self.stin[6*i+2],0,1)\n\t\t\tself.stin[6*i+2].connect(self.stin[6*i+3],0,1)\n\t\t\tself.stin[6*i+3].connect(self.stin[6*i+4],0,1)\n\t\t\tself.stin[6*i+4].connect(self.stin[6*i+5],0,1)\n\t\t\tself.stin[6*i+5].connect(self.flut[2*i+1],0,1)\n\t\t\tself.flut[2*i+1].connect(self.mysa[2*i+1],0,1)\n\t\t\tself.mysa[2*i+1].connect(self.node[i+1],0,1)", "def connect_stim(self):\n self.stim = h.NetStim()\n self.stim.number = self.stim_number\n self.stim.start = 9\n self.ncstim = h.NetCon(self.stim, self.cells[0].synlist[0])\n self.ncstim.delay = 1\n self.ncstim.weight[0] = self.stim_w # NetCon weight is a vector.", "def __init__(self, dims):\n\t\tself.layersNumber = len(dims) - 1\n\t\tself.weights = []\n\t\tself.biases = []\n\t\tnp.random.seed(42)\n\t\tfor d in range(self.layersNumber):\n\t\t\tself.weights.append(np.random.randn(dims[d+1], dims[d]))\n\t\t\tself.biases.append(np.random.randn(dims[d+1], 1))", "def __init__(self, shape, dtype = 'd'):\n self.shape = shape\n self.dtype = dtype\n \n ncell = int(np.prod(self.shape))\n self.shared_array_base = Array(dtype, ncell,lock=False) \n pass", "def setup_class(self):\n self.g1 = models.Gaussian1D(10, mean=14.9, stddev=0.3)\n self.g2 = models.Gaussian1D(10, mean=13, stddev=0.4)\n self.jf = JointFitter(\n [self.g1, self.g2], {self.g1: [\"amplitude\"], self.g2: [\"amplitude\"]}, [9.8]\n )\n self.x = np.arange(10, 20, 0.1)\n y1 = self.g1(self.x)\n y2 = self.g2(self.x)\n\n with NumpyRNGContext(_RANDOM_SEED):\n n = np.random.randn(100)\n\n self.ny1 = y1 + 2 * n\n self.ny2 = y2 + 2 * n\n self.jf(self.x, self.ny1, self.x, self.ny2)", "def __init__(self):\n # Call parent initialisers\n # SecmUtilityCore.__init__(self)\n Node.__init__(self, \"vehicle_sim\")\n # super().__init__('vehicle_sim')\n\n self.vehicle_marker_array = MarkerArray()\n self.vehicle_marker = Marker()\n self.pose_msg = Pose()\n self.control_msg = Control()\n\n self.model = Model()\n\n # Create subscribers to listen to SECM output\n self.create_subscription(\n msg_type=Control,\n topic=\"/control\",\n callback=self.receive_control_msg,\n qos_profile=BEST_EFFORT_QOS_PROFILE\n )\n\n # Create pose publisher\n self.pose_publisher = self.create_publisher(\n msg_type=Pose,\n topic=\"/pose\",\n qos_profile=BEST_EFFORT_QOS_PROFILE\n )\n\n # Create marker publisher\n self.vehicle_marker_publisher = self.create_publisher(\n msg_type=Marker,\n topic=\"/vehicle_marker\",\n qos_profile=BEST_EFFORT_QOS_PROFILE\n )\n\n # Setup timers to spin the execution loop. \n self.create_timer(1.0/30.0, self.execute)", "def _create_common_connections(self):\n\t\tfor muscle,muscAfferentDelay in self._infoMuscles:\n\t\t\tfor connection in self._infoCommonMuscleConnections:\n\t\t\t\t# List of source cells ids\n\t\t\t\tsourcesId = self.cellsId[muscle][connection[0]]\n\t\t\t\t# gather the sources all together\n\t\t\t\tsourcesId = comm.gather(sourcesId,root=0)\n\t\t\t\tif rank==0: sourcesId = sum(sourcesId,[])\n\t\t\t\tsourcesId = comm.bcast(sourcesId,root=0)\n\t\t\t\t# List of taget cells ids\n\t\t\t\ttargetsId = self.cellsId[muscle][connection[1]]\n\t\t\t\t# Ratio of connection\n\t\t\t\tconRatio = connection[2]\n\t\t\t\t# Number of connections\n\t\t\t\tconNum = int(connection[3])\n\t\t\t\t# Weight of connections\n\t\t\t\tconWeight = float(connection[4])\n\t\t\t\t# Type of synapse\n\t\t\t\tsynType = connection[5]\n\t\t\t\t# connect sources to targets\n\t\t\t\tself._connect(sourcesId,targetsId,conRatio,conNum,conWeight,synType)", "def _build(self):\n samp, loc, cov = self.build_outputs()\n \n self.fill_oslot_with_tensor(0, samp)\n self.fill_oslot_with_tensor(1, loc)\n self.fill_oslot_with_tensor(2, cov)\n\n self._is_built = True", "def construct_network(self):\n r = 0\n n = self.nbr_0_splxs\n for k in range(n):\n self.splxs.append((0, (0, k)))\n self.nbr_splxs += 1\n r, edge = self.find_next_edge(r)\n # this while loop finds the new edge to treat and add it to the 1-splx list and then finds out if a 2-splx is created\n while edge != (-1, -1):\n # Add the new edge\n self.one_splxs.append((edge, self.nbr_splxs))\n self.splxs.append((1, self.nbr_1_splxs))\n self.nbr_1_splxs += 1\n self.nbr_splxs += 1\n self.dist_appearance.append(r)\n a, b = edge\n # find out if a 2-splx has been created\n for i in range(self.nbr_1_splxs - 1):\n c, d = self.one_splxs[i][0]\n if d == a:\n for j in range(i + 1, self.nbr_1_splxs - 1):\n e, f = self.one_splxs[j][0]\n if e == c and f == b:\n self.two_splxs.append((self.nbr_1_splxs - 1, i, j))\n self.splxs.append((2, self.nbr_2_splxs))\n self.nbr_2_splxs += 1\n self.nbr_splxs += 1\n self.dist_appearance.append(r)\n # find the next edge to treat\n r, edge = self.find_next_edge(r)\n print(\"Network created\")\n return ()", "def __init__(self, dimensions = (3,1)):\n self.dimensions = dimensions\n self.states = self.__init_states()\n self.actions = self.__init_actions()", "def __init__(self,\n mass_1, mass_2,\n width_1, width_2,\n x0_1, x0_2,\n v0_1=0, v0_2=0,\n h=0.1):\n self.box_1 = box.box(mass_1, width_1, x0_1, v0_1)\n self.box_2 = box.box(mass_2, width_2, x0_2, v0_2)\n self.h = h\n self.coll_counter = 0", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n out_dataset[1].create_dataset(in_dataset[1])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'SINOGRAM',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')\n\n in_pData[1].plugin_data_setup( 'PROJECTION',)\n out_pData[1].plugin_data_setup( 'PROJECTION','multiple')", "def __init__(self, direct=1):\n if not direct:\n self.physicsClient = p.connect(p.GUI)\n else:\n self.physicsClient = p.connect(p.DIRECT)\n \n #The max_dist variables define the size of the arena\n #n defines the number of agents in the world.\n self.max_dist_x = 4\n self.max_dist_y = 4\n self.n = 2\n\n #This defines the max force that can be applied\n #to the motor joints of our robot.\n self.maxForce = 30 \n \n self.observation_space = np.array([[1.0,1.0,1.0],[1.0,1.0,1.0]])\n\n #Action Space\n #For all the agents, the action space is same.\n #The first action is a continous action which determines the speed of the wheels.\n #The second action is a continous action which determines the angle by which the front wheel turns.\n self.action_space = np.array([spaces.Box(low=np.array([0,-0.5]), high=np.array([30,0.5]))]*self.n)\n self.reduce = 1\n self.step_counter=0", "def __init__( self, owner, shoulderindex, wristindex, ctrlindex=0 ):\n\t\tself.shoulder = ServoJoint( owner, shoulderindex, ctrlindex ) \n\t\tself.wrist = ServoJoint( owner, wristindex, ctrlindex )", "def __init__(self, sizes):\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [cp.array(cp.random.randn(y, 1)) for y in sizes[1:]]\n self.weights = [cp.array(cp.random.randn(y, x))\n for x, y in zip(sizes[:-1], sizes[1:])]", "def __init__(self, number_of_stools):\n self._stools = {}\n for i in range(number_of_stools):\n self._stools[i] = []\n self._move_seq = MoveSequence([])", "def setup_class(self):\n args = {'pdb_path':'/sdf/home/a/apeck/tomoxtal/examples/input/193l.pdb', 'resolution':6.0, 'size':250}\n\n # generate structure factors and retrieve associated cell information\n sf = cctbx_tools.reference_sf(args['pdb_path'], args['resolution'], expand_to_p1=True)\n sf_data = cctbx_tools.reformat_sf(sf)\n sg_symbol, sg_no, self.cell, cs = cctbx_tools.unit_cell_info(args['pdb_path'])\n \n # add random phase shifts\n hklIp1, hklIp2, hklIp3 = sf_data.copy(), sf_data.copy(), sf_data.copy()\n hklIp2[:,-1], self.shifts2 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n hklIp3[:,-1], self.shifts3 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n\n # retain subset of Millers\n for data in [hklIp1,hklIp2,hklIp3]:\n keep_idx = np.unique(np.random.randint(0, high=data.shape[0], size=args['size']))\n data = data[keep_idx]\n \n self.data1, self.data2, self.data3 = hklIp1, hklIp2, hklIp3\n fshifts_list = np.random.uniform(size=(4,3))\n self.fshifts_list = np.vstack((fshifts_list, 1-self.shifts2, 1-self.shifts3))", "def __init__(self, solar, onshore, load, solar_df, onshore_df, load_df, n_clusters_dim_1, n_clusters_dim_2, batch_size, year_start):\n super().__init__(solar, onshore, load, solar_df, onshore_df, load_df)\n\n # self.solar = solar\n # self.onshore = onshore\n # self.load = load\n\n # self.np_data_list = [self.solar, self.onshore, self.load]\n\n self.solar_som_calculator = SOMCalculator(\n self.solar, n_clusters_dim_1, n_clusters_dim_2, batch_size)\n self.solar_som = self.solar_som_calculator.train_som()\n\n self.onshore_som_calculator = SOMCalculator(\n self.onshore, n_clusters_dim_1, n_clusters_dim_2, batch_size)\n self.onshore_som = self.onshore_som_calculator.train_som()\n\n self.load_som_calculator = SOMCalculator(\n self.load, n_clusters_dim_1, n_clusters_dim_2, batch_size)\n self.load_som = self.load_som_calculator.train_som()\n\n self.calculators = [self.solar_som_calculator,\n self.onshore_som_calculator, self.load_som_calculator]\n self.som_objects = [self.solar_som, self.onshore_som, self.load_som]\n\n self.year_start = year_start", "def __init__(self,\n num_masks=1,\n num_fake=0,\n epsilon_greedy=0.0,\n num_classes = 2,\n num_samples = 1000,\n max_length = 30,\n ambiguity=False,\n num_bondtypes=1):\n\n self.num_masks = num_masks\n self.num_fake = num_fake\n self.epsilon_greedy = epsilon_greedy\n self.num_classes = num_classes\n self.num_samples = num_samples\n\n self.molecule_generator = MoleculeGenerator(num_classes=num_classes, \n max_length=max_length, \n ambiguity=ambiguity,\n num_bondtypes=num_bondtypes)\n\n self.corruption = CorruptionTransform(num_masks=num_masks, num_fake=num_fake, epsilon=epsilon_greedy)\n\n\n self.data = []\n\n\n for i in range(self.num_samples):\n molecule = self.molecule_generator.generate_molecule()\n molecule = chem.AddHs(molecule)\n\n\n Adj = chem.rdmolops.GetAdjacencyMatrix(molecule)\n atoms = np.asarray([periodic_table[atom.GetAtomicNum()] for atom in molecule.GetAtoms()])\n smiles = chem.MolToSmiles(molecule)\n self.data += [MoleculeSample(atoms, Adj, {}, smiles)]", "def __init__(self, structure):\n # weight matrices\n self.ws = [np.random.randn(m, n) for n, m in zip([0] + structure, structure)]\n # biases\n self.bs = [np.random.rand(n, 1) for n in structure]\n # activations\n self.ys = [np.zeros((n, 1)) for n in structure]\n # z values\n self.zs = [np.zeros((n, 1)) for n in structure]", "def __init__(self, sizes):\r\n self.num_layers = len(sizes)\r\n self.sizes = sizes\r\n\r\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\r\n self.weights = [np.random.randn(y, x)\r\n for x, y in zip(sizes[:-1], sizes[1:])]", "def _setup(self, start_t: float, end_t: float):\n add_init_conditions = True\n delta_t = (end_t - start_t) / self._num_time_blocks\n for ndx in range(self._num_time_blocks):\n _start_t = delta_t * ndx\n _end_t = delta_t * (ndx + 1)\n (pyomo_model,\n start_states,\n end_states) = self.build_model_for_time_block(ndx=ndx,\n start_t=_start_t,\n end_t=_end_t,\n add_init_conditions=add_init_conditions)\n self._nlps[ndx] = nlp = InteriorPointInterface(pyomo_model=pyomo_model)\n assert len(start_states) == len(end_states)\n if self._num_states is not None:\n assert self._num_states == len(start_states)\n else:\n self._num_states = len(start_states)\n\n self._link_forward_matrices[ndx] = self._build_link_forward_matrix(nlp, ndx, end_states)\n self._link_forward_coupling_matrices[ndx] = self._build_link_forward_coupling_matrix(ndx)\n self._link_backward_matrices[ndx] = self._build_link_backward_matrix(nlp, ndx, start_states)\n self._link_backward_coupling_matrices[ndx] = self._build_link_backward_coupling_matrix(ndx)\n\n add_init_conditions = False" ]
[ "0.65790576", "0.6057899", "0.59684527", "0.595147", "0.594852", "0.5947397", "0.59022313", "0.5834885", "0.57945764", "0.5773133", "0.5739251", "0.57297397", "0.5662556", "0.5654211", "0.5625765", "0.5613609", "0.56102866", "0.56065047", "0.5592228", "0.5544771", "0.55325526", "0.5523166", "0.55091494", "0.55084956", "0.54989386", "0.5497842", "0.549742", "0.5493646", "0.54872835", "0.5474294" ]
0.6679907
0
Class method. Loads a particular synapse which has been previousely saved.
def retrieve_synapse(cls, synindex): try : synapses_register = load_register('all_syn') # Retrieving the desired attributed attrs = synapses_register[synapses_register["Index"]==synindex].drop(['Index'], axis=1).to_dict('records')[0] # Intanciation of the synapse: syn = cls(**attrs) except : print("No synapse saved under this index.") syn = None finally : return syn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_workflow( self, workflow_id ):\n id = self.app.security.decode_id( workflow_id )\n stored = self.app.model.context.query( self.app.model.StoredWorkflow ).get( id )\n return stored.latest_workflow", "def load_inst(self):\n self.sanity_check()\n\n fname_pub_auth_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_auth_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_top, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_top, '_',\n self.config.experiment_id, '.pk'])\n self.pub_auth_all = pickle.load(open(fname_pub_auth_all, 'rb'))\n self.pub_auth_top = pickle.load(open(fname_pub_auth_top, 'rb'))\n self.pub_inst_all = pickle.load(open(fname_pub_inst_all, 'rb'))\n self.pub_inst_top = pickle.load(open(fname_pub_inst_top, 'rb'))\n\n fname_pub_history = ''.join([self.config.dir_data, '/history_',\n self.config.experiment_id, '.pk'])\n self.history = pickle.load(open(fname_pub_history, 'rb'))\n\n fname_pub_staff = ''.join([self.config.dir_data, '/staff_',\n self.config.experiment_id, '.pk'])\n self.staff = pickle.load(open(fname_pub_staff, 'rb'))", "def load_latest_save(self, device=None):\n return torch.load(str(self.previous_saves()[-1].absolute()), map_location=device)", "def LoadStationDefinition(cls, metadata_object):\n pass", "def load(self):\n return None", "def _load(self) -> None:\n self.record = self._saved_record\n self.counter = self._saved_counter\n self.current_objects = self._saved_objects", "def _load_restored(self, dataset_path):\n for group in ['knowledge', 'source', 'target']:\n if getattr(self, group + '_format') != 'none':\n text_data = load_restored(dataset_path, group + '.', ignore_file='vocab')[0]\n setattr(self, group + '_text_data', text_data)\n idx2token, token2idx = load_restored(dataset_path, ignore_file='data')\n setattr(self, 'idx2token', idx2token)\n setattr(self, 'token2idx', token2idx)\n self.max_vocab_size = len(self.idx2token)\n self.logger.info(\"Restore finished!\")", "def load(self,previous=True):\n\n\t\tincoming = pickle.load(open(self.filename,'rb'))\n\t\t#---reconstitute things that were bootstrapped\n\t\t#---we do not load spots because e.g. paths might have changed slightly in paths.yaml\n\t\tself.post = incoming.post\n\t\tself.groups = incoming.groups\n\t\tself.slices = incoming.slices\n\t\tself.vars = incoming.vars\n\t\tself.meta = incoming.meta\n\t\tself.calc = incoming.calc\n\t\tself.toc = incoming.toc\n\n\t\t#---retain the incoming workspace for comparison\n\t\tif previous: self.previous = incoming", "def load_station(fname):\n with open(fname, 'rb') as f:\n so = pickle.load(f)\n return so", "def load(self):\n with self.__lock:\n self._d.update(self.backend.load())\n log.debug(\"load: {}\".format(self.backend.filename))", "def load(self):\n return", "def load(self, sess, step=None):\n if step==None:\n ckpt_path = tf.train.latest_checkpoint(self.model.ckpt_dir)\n else:\n ckpt_path = os.path.join(self.model.ckpt_dir, 'model-'+str(step))\n self.saver.restore(sess, ckpt_path)\n step = tf.train.global_step(sess, self.gstep)\n print('Load model at step {} from check point {}.'.format(step, ckpt_path))", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word = pickle.load(open(self.save_file, 'rb'))", "def update_Synapse(self, synapse,\n N = None, NHP = None,\n morphology = None,\n arborization = None,\n data_source = None):\n self._database_writeable_check()\n connect_DataSource = self._default_DataSource if data_source is None \\\n else self._get_obj_from_str(data_source)\n if connect_DataSource is None:\n raise TypeError('Default DataSource is missing.')\n\n synapse = self._get_obj_from_str(synapse)\n if isinstance(synapse, [models.Synapse, models.InferredSynapse]):\n synapse_to_update = synapse\n if safe:\n if not self._is_in_datasource(connect_DataSource, synapse):\n raise DataSourceError(\n 'The synapse specified {} is not owned by the DataSource {}'.format(\n synapse.uname, connect_DataSource.name))\n elif isinstance(synapse, str):\n synapse_name = synapse\n try:\n synapse_to_update = self.get('Synapse', synapse_name, connect_DataSource)\n except RecordNotFoundError:\n synapse_to_update = self.get('InferredSynapse', synapse_name, connect_DataSource)\n else:\n raise TypeError('Parameter synapse must be either a str or a Synapse object.')\n\n synapse_info = copy.deepcopy(synapse_to_update.get_props())\n if isinstance(N, int):\n synapse_info['N'] = N\n elif N is not None:\n raise TypeError('N must be of integer type.')\n\n if isinstance(NHP, int):\n if NHP > synapse_info[\"N\"]:\n raise ValueError('NHP cannot be greater than N')\n synapse_info['NHP'] = NHP\n elif NHP is not None:\n raise TypeError('NHP must be of integer type.')\n\n synapse_to_update.update(**synapse_info)\n\n q_synapse = QueryWrapper.from_objs(self.graph, synapse_to_update)\n\n if arborization is not None:\n arborization_data = get_data(q_synapse, data_types = 'ArborizationData')\n if len(arborization_data):\n self._remove_by_rids(arborization_data.rids)\n self.add_synapse_arborization(synapse_to_update, arborization, data_source = data_source)\n\n if morphology is not None:\n if not isinstance(morphology, list):\n morphology = [morphology]\n morphology_types_to_update = [m['type'] for m in morphology]\n morphology_data = get_data(q_synapse, data_types = 'MorphologyData')\n if len(morphology_data):\n nodes_to_remove = [m._id for m in morphology_data.node_objs if m.type in morphology_types_to_update]\n self._remove_by_rids(nodes_to_remove)\n self.add_morphology(synapse_to_update, morphology, data_source = data_source)\n return True", "def load(self):\n return self._load", "def load(self, epoch='best'):\n self.logger.info(f'Using model loaded from {self.config[\"squad_model_path\"].format(epoch)}')\n\n state_dict = torch.load(self.config[\"squad_model_path\"].format(epoch))\n self.qa_module.load_state_dict(state_dict)", "def load(self):\n self._really_load()", "def load(self):\n raise NotImplementedError", "def load(self):\n raise NotImplementedError", "def save(self):\n if self.loaded:\n list_embeddingNames = [self.embeddings.vsm_name, self.synset_embeddings.vsm_name, self.imagined_embeddings.vsm_name]\n full_file_name = self.resource_manager.get_multimodal_dataset(self.corpus, list_embeddingNames)\n logging.info('Saving dataset to [%s]', full_file_name)\n with lzma.open(full_file_name, 'wb') as f:\n pickle.dump(self, f)\n else:\n logging.error('Dataset not loaded, call \"build\" method first!')", "def load(self):\n self._load()", "def load_from_mysql(self):\n\n self._logger.info('Reading data from MySQL database')\n\n # open the database connection\n data = mysql_data.database(self.dataConfig['user'],\n self.dataConfig['password'],\n self.dataConfig['host'],\n self.dataConfig['database'],\n self.dataConfig['port'])\n\n # ---------------------------------------------------\n # determine if it's stations or client\n sta = self.stations\n\n c = None\n stable = None\n if 'client' in self.dataConfig.keys():\n c = self.dataConfig['client']\n stable = self.dataConfig['station_table']\n\n # Determine what table for the metadata\n mtable = self.dataConfig['metadata']\n\n # Raise an error if neither stations or client provided\n if (sta is None) & (c is None):\n raise Exception('Error in configuration file for [mysql],'\n ' must specify either \"stations\" or \"client\"')\n\n self._logger.debug('Loading metadata from table %s' % mtable)\n\n # ---------------------------------------------------\n # load the metadata\n self.metadata = data.metadata(mtable, station_ids=sta,\n client=c, station_table=stable)\n\n self._logger.debug('%i stations loaded' % self.metadata.shape[0])\n\n # ---------------------------------------------------\n # get a list of the stations\n station_ids = self.metadata.index.tolist()\n\n # get the correct column names if specified, along with variable names\n db_var_names = [val for key, val in self.dataConfig.items()\n if key not in self.db_config_vars]\n variables = [x for x in self.dataConfig.keys()\n if x not in self.db_config_vars]\n\n # get the data\n\n dp = data.get_data(self.dataConfig['data_table'], station_ids,\n self.start_date, self.end_date, db_var_names)\n\n # go through and extract the data\n for v in variables:\n setattr(self, v, dp[self.dataConfig[v]])", "def __load_model(self):\n loaded = load(self.__file_name)\n self.__model = loaded['model']\n self.__meta_data = loaded['metadata']\n self.__is_ready = True", "def loadSavedModel(folder, spark_session):\n from sparknlp.internal import _LongformerLoader\n jModel = _LongformerLoader(folder, spark_session._jsparkSession)._java_obj\n return LongformerEmbeddings(java_model=jModel)", "def load(self):\n raise NotImplementedError()", "def load(self):\n raise NotImplementedError()" ]
[ "0.61228395", "0.54798925", "0.5319807", "0.5294654", "0.52732694", "0.52658606", "0.5130691", "0.5092706", "0.50370383", "0.5030796", "0.50277567", "0.50262105", "0.5024152", "0.5024152", "0.5024152", "0.5024152", "0.50175834", "0.5012333", "0.4983431", "0.49762696", "0.49731192", "0.49716312", "0.49716312", "0.49533147", "0.49511", "0.49422753", "0.4930606", "0.4910649", "0.4887926", "0.4887926" ]
0.64493716
0
Loads a particular saved response. It fills the dictionary attribute "resp" with different components of the simulation. See the attribute "resp" for more details.
def retrieve_response(self, respindex): try : # Unpacking the data in the .resp attribute: self.indexes['resp'] = respindex path_dir = path_directory('resp', self) self.resp['coords'] = pd.read_csv(os.path.join(path_dir, 'coords.csv'), index_col=0).to_numpy() coords_ref = create_coords_ref(self.resp['coords']) self.resp['glus'] = tuple(pd.read_csv(os.path.join(path_dir, 'resglu{}.csv'.format(ref)), index_col=0) for ref in coords_ref) self.resp['AMPAtot'] = pd.read_csv(os.path.join(path_dir, 'resAMPAtot.csv'), index_col=0) self.resp['V'] = pd.read_csv(os.path.join(path_dir, 'resV.csv'), index_col=0) print("Response retrieved.") except FileNotFoundError: print("No response saved under this index.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_response(self):\n self.indexes['resp'] = attribute_index('resp', self)\n # Checking if the attribute \"resp\" is not empty:\n if not type(self.resp['coords']) == np.ndarray:\n print(\"Response is empty. Please run a simulation.\")\n # Checking if the target response has already been registered:\n elif self.indexes['resp'] == None:\n # Registering the synapse if necessary:\n self.indexes['syn'] = register_instance('syn', self)\n # Registering the response and setting its new index:\n self.indexes['resp'] = register_instance('resp', self)\n create_directory('resp', self)\n # Exporting the contents of the attribute \"resp\" to csv files:\n path_dir = path_directory('resp', self)\n coords_ref = create_coords_ref(self.resp['coords'])\n pd.DataFrame(self.resp['coords']).to_csv(os.path.join(path_dir, 'coords.csv'))\n for i in range(len(coords_ref)) :\n self.resp['glus'][i].to_csv(os.path.join(path_dir, 'resglu{}.csv'.format(coords_ref[i])), header=True)\n self.resp['AMPAtot'].to_csv(os.path.join(path_dir, 'resAMPAtot.csv'), header=True)\n self.resp['V'].to_csv(os.path.join(path_dir, 'resV.csv'), header=True)\n print(\"Saved: response at index {} for synapse {}.\".format(self.indexes['resp'], self.indexes['syn']))\n else:\n print(\"Response already registered at index {} for synapse {}.\".format(self.indexes['resp'], self.indexes['syn']))", "def loadMotorResponses(subj,hand='Right'):\n \n hands = {'Left':[0,1],'Right':[2,3]}\n\n x = tgp.loadTaskTiming(subj,'ALL')\n stimIndex = np.asarray(x['stimIndex'])\n ind = np.where(stimIndex=='motorResponse')[0]\n \n datadir = projectdir + 'data/postProcessing/hcpPostProcCiric/'\n h5f = h5py.File(datadir + subj + '_glmOutput_data.h5','r')\n data = h5f['taskRegression/ALL_24pXaCompCorXVolterra_taskReg_betas_canonical'][:].copy()\n data = data[:,ind].copy()\n h5f.close()\n \n # Isolate hand responses\n hand_ind = hands[hand]\n tmpdat = np.zeros((data.shape[0],2))\n if hand=='Right':\n #tmpdat[:,0] = data[:,3] #rmid -- need to flip this once glm is re-run -- check the new reffunc\n tmpdat[:,0] = np.real(data[:,2]) \n tmpdat[:,1] = np.real(data[:,3]) \n elif hand=='Left':\n tmpdat[:,0] = np.real(data[:,0]) #lmid\n tmpdat[:,1] = np.real(data[:,1]) #lind\n data = tmpdat.copy()\n\n return data", "def store_response(resp, response_dict):\n if response_dict is not None:\n response_dict['status'] = resp.status\n response_dict['reason'] = resp.reason\n response_dict['headers'] = resp_header_dict(resp)", "def load_response(self, category):\n self.response = requests.get(f\"{self.settings.BASE_URL}/{category}\")\n if self.response.status_code == 200:\n self.response_info = self.response.json()", "def simulate_stimulation(self, patt):\n # Defining the response:\n self.identities['resp'] = identity_summary('resp', patt)\n respindex = attribute_index('resp', self)\n # Running the simulation if no response has been computed for this pattern:\n if respindex == None :\n print('Running the simulation. It may take some time.')\n self.resp['coords'], self.resp['glus'], self.resp['AMPAtot'], self.resp['V'] = execute_c_code(self, patt)\n print(\"Simulation completed.\")\n # Retrieving the existing response otherwise:\n else:\n print(\"Response already computed.\")\n self.retrieve_response(respindex)", "def readResponseFile(self):\n resp_filename = QFileDialog.getOpenFileName(self, \"Open Response File\", str(Path.home()), '') \n try:\n resp_file = open(resp_filename[0], 'r')\n except:\n print(\"Couldn't get any files from QFileDialog\")\n return\n\n try:\n response = readResponseArrayToResponse(resp_file.read().split('\\n'), resp_filename[0].split('/')[-1])\n except Exception as e:\n response = None\n print(\"Not a valid response file: {0}\".format(e))\n\n resp_file.close()\n\n if response is not None:\n self.setResponse(response)", "def response(self, response: Dict) -> None:\n\n if \"satisfied\" in response and response[\"satisfied\"]:\n if not response[\"solution_index\"] and not response[\"solution_index\"] == 0:\n raise RPMException(\n \"If you are satisfied with one of the solutions, please specify the index of the \"\n \"solution as 'solution_index'.\"\n )\n if not (0 <= response[\"solution_index\"] <= self._f_current.shape[0]):\n msg = (\n \"Solution index must range from 0 to number of objectives - 1 '{}'. \" \"Given solution index: '{}.\"\n ).format(self._f_current.shape[0], response[\"solution_index\"])\n raise RPMException(msg)\n else:\n if \"reference_point\" not in response:\n raise RPMException(\"New reference point information missing. Please specify it as 'reference_point'.\")\n else:\n validate_reference_point(response[\"reference_point\"], self._ideal, self._nadir)\n\n self._response = response", "def api_response():\n return load_fixture(\"smhi.json\", DOMAIN)", "def load_next_response(self):\n if self.response_info['next']:\n self.response = requests.get(self.response_info['next'])\n self.response_info = self.response.json()", "def simulate_response(self, documents):", "def program_response_fixture() -> dict[str, Any]:\n return cast(dict[str, Any], json.loads(load_fixture(\"program_response.json\")))", "def get_initial_response(self):\n json_response = self.testapp.get(\n '/learn/%s/data' % self.exploration_id)\n self.assertEqual(json_response.status_int, 200)\n self.assertEqual(json_response.content_type, 'application/json')\n\n response = json.loads(json_response.body)\n\n self.last_block_number = response['block_number']\n self.last_params = response['params']\n self.last_state_id = response['state_id']\n self.state_history = [self.last_state_id]\n self.assertEqual(response['state_history'], self.state_history)\n\n return response", "def get_response(self, sentence):\n user_message = ParserSentence().clean(sentence)\n data_here_api = HereApi().get_request(user_message)\n if not data_here_api:\n return {\n \"grandpy_error\": choice(grandpy_error)\n }\n else:\n data_wiki_api = WikiApi().get_description(user_message)\n if not data_wiki_api:\n return {\n \"grandpy_address\": choice(grandpy_response),\n \"address\": data_here_api[\"address\"],\n \"grandpy_descript\": \"\",\n \"descriptif\": choice(grandpy_no_description),\n \"lat\": data_here_api[\"lat\"],\n \"lng\": data_here_api[\"lng\"],\n \"apikey\": HERE_API_KEY\n }\n else:\n return {\n \"grandpy_address\": choice(grandpy_response),\n \"address\": data_here_api[\"address\"],\n \"grandpy_descript\": choice(grandpy_story),\n \"descriptif\": data_wiki_api,\n \"lat\": data_here_api[\"lat\"],\n \"lng\": data_here_api[\"lng\"],\n \"apikey\": HERE_API_KEY\n }", "def setResponse(self, response):\n if(Debug_Level==2):\n print'response =',response \n #removing the end line and splitting \n words = response.replace('\\'','').strip().split(',') #Stripping and Splitting \n\n if(len(words)>1):\n self.RC_COM = int(words[1])\n words2 = words[2].split(':')\n self.TrID = int(words2[0])\n self.RC = int(words2[1])\n self.parameters = words[3:len(words)]\n if(self.RC!=0 and Debug_Level==1):\n print 'Problem, Error code:', self.RC", "def respond(self, response):\n self.response = response", "def response(self, response: Dict) -> None:\n\n if \"reference_point\" not in response:\n msg = \"Reference point missing. Please specify a reference point as 'reference_point.\"\n raise RPMException(msg)\n else:\n validate_reference_point(response[\"reference_point\"], self._ideal, self._nadir)\n\n self._response = response", "def save_response(self, res) -> None:\n file = open(\"response_{}.json\".format(self.num_res), \"w\")\n file.write(str(res))\n file.close()", "def get_response(self, response, pack):\n\n pass", "def _Dynamic_Fetch(self, request, response):\n print \"Request:\"\n print (\"Request: {}\").format(request)\n response.set_content(self.mock_response_issue)\n response.set_statuscode(200)\n new_header = response.add_header()\n new_header.set_key('Content-type')\n new_header.set_value('application/json')\n\n response.set_finalurl(request.url)\n response.set_contentwastruncated(False)\n\n # allow to query the object after it is used\n # pylint: disable=attribute-defined-outside-init\n self.request = request\n self.response = response", "def get_sample_data(self):\n with open('/data/pollination-sample/ui.json') as f:\n content = f.read()\n resp = Response(content, mimetype='application/json')\n return resp", "def readresp(self, cmd):\n\t\tdata = self.read(22)\n\t\tresponse = data[0]\n\t\t#print \"laser response\", self.mylaser, response\n\t\tgstt.lstt_dacanswers[self.mylaser] = response\n\t\tcmdR = data[1]\n\t\tstatus = Status(data[2:])\n\t\tr.set('/lack/'+str(self.mylaser), response)\n\n\t\tif cmdR != cmd:\n\t\t\traise ProtocolError(\"expected resp for %r, got %r\"\n\t\t\t\t% (cmd, cmdR))\n\n\t\tif response != \"a\":\n\t\t\traise ProtocolError(\"expected ACK, got %r\"\n\t\t\t\t% (response, ))\n\n\t\tself.last_status = status\n\t\treturn status", "def test_store_single_response(self):\n self.my_survey.store_response(self.responses[0])\n self.assertIn('English', self.my_survey.responses)", "def __init__(self, response_dict={}):\n self.id = response_dict.get('id')\n self.name = response_dict.get('name')\n self.image_url = response_dict.get('imageUrl')\n self.subtype = response_dict.get('subtype')\n self.supertype = response_dict.get('supertype')\n self.ability = response_dict.get('ability')\n self.hp = response_dict.get('hp')\n self.retreat_cost = response_dict.get('retreatCost')\n self.number = response_dict.get('number')\n self.artist = response_dict.get('artist')\n self.rarity = response_dict.get('rarity')\n self.series = response_dict.get('series')\n self.set = response_dict.get('set')\n self.set_code = response_dict.get('setCode')\n self.types = response_dict.get('types')\n self.attacks = response_dict.get('attacks')\n self.weaknesses = response_dict.get('weaknesses')\n self.resistances = response_dict.get('resistances')", "def init_response(res_str=None, data=None):\n response = {}\n response[\"res_str\"] = \"\"\n response[\"res_data\"] = {}\n if res_str is not None:\n response[\"res_str\"] = res_str\n if data is not None:\n response[\"res_data\"] = data\n return response", "def print_resp(self, resp: dict):\n if \"details\" in resp:\n if isinstance(resp[\"details\"], str):\n self.write_string(resp[\"details\"])\n if isinstance(resp[\"details\"], Table):\n self.write_table(resp[\"details\"])\n\n if \"data\" in resp:\n for item in resp[\"data\"]:\n if not isinstance(item, dict):\n continue\n item_type = item.get(\"type\")\n if item_type == \"string\":\n self.write_string(item[\"data\"])\n elif item_type == \"table\":\n table = Table(None)\n table.set_rows(item[\"rows\"])\n self.write_table(table)\n elif item_type == \"error\":\n self.write_error(item[\"data\"])\n elif item_type == \"dict\":\n self.write_dict(item[\"data\"])\n\n if \"details\" not in resp and \"data\" not in resp:\n self.write_string(\"Response is not correct.\")", "def test_process_response(self):\n t = self.create_request_object()\n response_content = u\"\"\" <Response ReferenceNumber=\"82e942b0-48e8-4cf4-b299-51e2b6a89a1b\"\n InboundODMFileOID=\"\"\n IsTransactionSuccessful=\"1\"\n SuccessStatistics=\"Rave objects touched: Subjects=0; Folders=0; Forms=0; Fields=0; LogLines=0\" NewRecords=\"\">\n </Response>\n \"\"\"\n req = mock.Mock(requests.Request, text=response_content)\n response = t.result(req)\n self.assertTrue(isinstance(response, RWSResponse))", "def update_response(self, response):\r\n self.stri.update_response(response)", "def update_response(self, response):\r\n self.stri.update_response(response)", "def update_response(self, response):\r\n self.stri_ext.update_response(response)\r\n self.stri_int.update_response(response)", "def setup_response(self, system, location, definition, descriptor):\r\n pass" ]
[ "0.67248696", "0.56677526", "0.5657209", "0.56208336", "0.5567731", "0.5557515", "0.54890513", "0.54850507", "0.54704213", "0.5448182", "0.54024094", "0.5309183", "0.52602434", "0.5231793", "0.5204311", "0.51842964", "0.5177023", "0.5176254", "0.515962", "0.5152318", "0.5118795", "0.51022357", "0.5095564", "0.50860524", "0.50852555", "0.50675714", "0.50527775", "0.50527775", "0.5037819", "0.5026467" ]
0.6242779
1
Runs a simulation with the current instance. If the simulation has already been run previousely, the response is retrived in the attribute "resp".
def simulate_stimulation(self, patt): # Defining the response: self.identities['resp'] = identity_summary('resp', patt) respindex = attribute_index('resp', self) # Running the simulation if no response has been computed for this pattern: if respindex == None : print('Running the simulation. It may take some time.') self.resp['coords'], self.resp['glus'], self.resp['AMPAtot'], self.resp['V'] = execute_c_code(self, patt) print("Simulation completed.") # Retrieving the existing response otherwise: else: print("Response already computed.") self.retrieve_response(respindex)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_run_simulation_stores_result(self):\n sim = ss.Simulation()\n assert sim.results == []\n sim.run_simulation(10)\n assert sim.results != []\n assert len(sim.results) == 10", "def _simulation_run(model_instance, observations, actions, rewards):\r\n\r\n for observation, action, reward in zip(observations, actions, rewards):\r\n model_instance.observe(observation)\r\n model_instance.overrideActionChoice(action)\r\n model_instance.feedback(reward)\r\n\r\n return model_instance", "def run_simulator(self):\n\n self.update_settings()\n\n # Pass in the progress bar and the master so that the simulator can\n # update the progress bar and then refresh the screen when the progress\n # checkpoints are hit\n\n self.sim_results = self.sim.run(self.progress_bar, self.master)\n self.graph_results()", "def run_simulation(self):\n self.market.market_session(True)\n return \"\"", "def run_simulation(run):\n # Write the argument file used by metrosim.\n simulation = run.simulation\n metrosim_dir = settings.BASE_DIR + '/metrosim_files/'\n metrosim_file = '{0}execs/metrosim'.format(metrosim_dir)\n arg_file = (\n '{0}arg_files/simulation_{1!s}_run_{2!s}.txt'.format(metrosim_dir,\n simulation.id,\n run.id)\n )\n with open(arg_file, 'w') as f:\n database = settings.DATABASES['default']\n db_host = database['HOST']\n db_name = database['NAME']\n db_user = database['USER']\n db_pass = database['PASSWORD']\n log = metrosim_dir + 'logs/run_{}.txt'.format(run.id)\n tmp = metrosim_dir + 'output'\n stop = metrosim_dir + 'stop_files/run_{}.stop'.format(run.id)\n arguments = ('-dbHost \"{0}\" -dbName \"{1}\" -dbUser \"{2}\" '\n + '-dbPass \"{3}\" -logFile \"{4}\" -tmpDir \"{5}\" '\n + '-stopFile \"{6}\" -simId \"{7!s}\" -runId \"{8!s}\"'\n ).format(db_host, db_name, db_user, db_pass, log, tmp,\n stop, simulation.id, run.id)\n f.write(arguments)\n\n # Run the script 'prepare_run.py' then run metrosim then run the script \n # 'run_end.py'.\n # The two scripts are run with the run.id as an argument.\n prepare_run_file = settings.BASE_DIR + '/metro_app/prepare_run.py'\n build_results_file = settings.BASE_DIR + '/metro_app/build_results.py'\n log_file = (\n '{0}/website_files/script_logs/run_{1}.txt'.format(\n settings.BASE_DIR, run.id\n )\n )\n # Command looks like: \n #\n # python3 ./metro_app/prepare_results.py y\n # 2>&1 | tee ./website_files/script_logs/run_y.txt\n # && ./metrosim_files/execs/metrosim\n # ./metrosim_files/arg_files/simulation_x_run_y.txt \n # && python3 ./metro_app/build_results.py y \n # 2>&1 | tee ./website_files/script_logs/run_y.txt\n #\n # 2>&1 | tee is used to redirect output and errors to file.\n command = ('python3 {first_script} {run_id} 2>&1 | tee {log} && '\n + '{metrosim} {argfile} && '\n + 'python3 {second_script} {run_id} 2>&1 | tee {log}')\n command = command.format(first_script=prepare_run_file, run_id=run.id,\n log=log_file, metrosim=metrosim_file,\n argfile=arg_file,\n second_script=build_results_file)\n subprocess.Popen(command, shell=True)", "def run(self):\n self._display_sims(self._compute_sims())", "def run_single(self):\n self.run_sim_time(1)", "def testSimCompletes(self):\n sim = Simulation()\n self.assertEqual(25, sim.run_simple(1, 11, \"output\", 0.1, 2, 10))", "def run_simulation(self, num_games=10):\n for _ in range(num_games):\n self.result.append(self.single_game())", "def _simulate(self):\n # sample incident and update status of vehicles at new time t\n self.sim.t, self.time, type_, loc, prio, req_vehicles, func, dest = self.sim._sample_incident()\n\n self.sim._update_vehicles(self.sim.t, self.time)\n\n # sample dispatch time\n dispatch = self.sim.rsampler.sample_dispatch_time(type_)\n\n # keep track of minimum TS response time\n min_ts_response = np.inf\n\n # get target response time\n target = self.sim._get_target(type_, func, prio)\n\n # sample rest of the response time for TS vehicles\n for v in req_vehicles:\n if v == \"TS\":\n\n vehicle, estimated_time = self.sim._pick_vehicle(loc, v)\n if vehicle is None:\n turnout, travel, onscene, response = [np.nan]*4\n else:\n vehicle.assign_crew() # always full time in this case\n\n turnout, travel, onscene = self.sim.rsampler.sample_response_time(\n type_, loc, vehicle.current_station_name, vehicle.type, vehicle.current_crew,\n prio, estimated_time=estimated_time)\n\n response = dispatch + turnout + travel\n vehicle.dispatch(dest, self.sim.t + (response + onscene + estimated_time) / 60)\n\n # we must return a numerical value\n if np.isnan(response):\n response = self.worst_response\n\n if response < min_ts_response:\n min_ts_response = response\n\n return min_ts_response, target", "def run(self, steps):\n self.sim.run(steps)", "def simulate(self, **args):\n snr = ct.c_double * 3\n self.sim_params = {**self.sim_params, **args}\n snr = snr(*self.sim_params[\"snr\"])\n dec_param = decoder_param(self.sim_params[\"earlyTerm\"], self.sim_params[\"iterations\"], self.sim_params[\"decoding\"].encode(\"utf-8\"))\n ch_param = channel_param(self.sim_params[\"seed\"], snr, self.sim_params[\"channel\"].encode(\"utf-8\"))\n sim_param = simulation_param(self.sim_params[\"threads\"], self.sim_params[\"maxFrames\"], self.sim_params[\"fec\"], \"\".encode(\"utf-8\"))\n\n def sim_thread():\n self.sim_stop_flag.value = False\n\n self.lib.argtypes = (decoder_param, channel_param, simulation_param, sim_results_t, ct.c_bool)\n self.lib.simulate(\n dec_param,\n ch_param,\n sim_param, \n ct.byref(self.sim_results_struct),\n ct.byref(self.sim_stop_flag)\n )\n \n th_sim = threading.Thread(target=sim_thread)\n th_sim.start()", "def simulation_run_action(request, simulation):\n # Check that there is no run in progress for this simulation.\n running_simulations = SimulationRun.objects.filter(\n simulation=simulation\n ).filter(status__in=('Preparing', 'Running', 'Ending'))\n if not running_simulations.exists():\n # Create a SimulationRun object to keep track of the run.\n run_form = RunForm(request.POST)\n if run_form.is_valid():\n run = run_form.save(simulation)\n run_simulation(run)\n return HttpResponseRedirect(\n reverse('metro:simulation_run_view', args=(simulation.id, run.id,))\n )\n return HttpResponseRedirect(reverse(\n 'metro:simulation_run_list', args=(simulation.id,)\n ))", "def start_simulation(self):\n regime_name = str(self.regime_list.item(self._current_regime_index).text())\n self.statusLabel.setText(u\"simulating {}\".format(regime_name))\n self._logger.info(u\"Simulating: {}\".format(regime_name))\n\n self.actSimulate.setDisabled(True)\n self.shortRunSimulation.setEnabled(False)\n self.shortRunRegimeBatch.setEnabled(False)\n self.actExecuteRegimes.setDisabled(True)\n self.guiProgress = QtGui.QProgressBar(self)\n self.sim.simulationProgressChanged.connect(self.guiProgress.setValue)\n self.statusBar().addWidget(self.guiProgress)\n self.runSimulation.emit()", "def run(\n self, progress_bar: bool = False, **qutip_options: Any\n ) -> SimulationResults:\n return self._sim_obj.run(progress_bar=progress_bar, **qutip_options)", "def simulateOneTimeStep(self):\n\n self.susceptibleToInfected()\n self.infectedToRecovered()\n\n # add the new values of healthy/infected/recovered to the arrays keeping track\n SIR_t = np.array([self.getSusceptible(), self.getInfected(), self.getRecovered()])\n #update SIR time series\n self.SIR = np.concatenate([self.SIR, SIR_t[:,np.newaxis]], axis=1)\n\n # add the new snapshot of the simulation\n self.snapshots.append(self.getSpace().copy())", "def test_run_simulation_returns_nothing(self):\n sim = ss.Simulation()\n assert sim.run_simulation(10) is None", "def startSimulation(self):\n self.saveParameters()\n self.simulation.main()", "def make_simulation(self):\n pass", "def simulate(self, start='0', stop='86400', step='60', solver='rungekutta', args=[]):\n\t\tstart = str(parse_var_val(start, 's'))\n\t\tstop = str(parse_var_val(stop, 's'))\n\t\tstep = str(parse_var_val(step, 's'))\n\t\tsim_args = [\n\t\t\t'-override',\n\t\t\t'startTime='+start+',stopTime='+stop+',stepSize='+step,\n\t\t\t'-s', solver,\n\t\t\t'-f', self.init_out_fn,\n\t\t\t'-r', self.res_fn,\n\t\t\t]\n\t\tsp.call(['./'+self.model] + sim_args + args)", "def _simulation_replayer(hoverfly_instance: Hoverfly, request, _patch_env):\n # so that requests to hoverfly admin endpoint are not proxied :)\n session = requests.Session()\n session.trust_env = False\n\n filename = extract_simulation_name_from_request(request)\n\n # noinspection PyTypeChecker\n with open(get_simulations_path(request.config) / filename) as f:\n data = f.read()\n\n res = session.put(f\"{hoverfly_instance.admin_endpoint}/simulation\", data=data)\n res.raise_for_status()\n\n res = session.put(f\"{hoverfly_instance.admin_endpoint}/hoverfly/mode\", json={\"mode\": \"simulate\"})\n res.raise_for_status()\n\n yield\n\n # see pytest_runtest_makereport\n if request.node.rep_setup.passed and request.node.rep_call.failed:\n resp = session.get(f\"{hoverfly_instance.admin_endpoint}/logs\")\n resp.raise_for_status()\n logs = resp.json()[\"logs\"]\n last_log = logs[-1]\n if \"error\" in last_log:\n print(\"----------------------------\")\n print(\"Hoverfly's log has an error!\")\n print(last_log[\"error\"])\n\n r = session.delete(f\"{hoverfly_instance.admin_endpoint}/simulation\")\n r.raise_for_status()", "def replicate(self,simulation_run):\n\n return self._runModel(params=simulation_run.params)", "def single_simulation(simulation_args):\n window = simulation_args['window']\n cycle_count = simulation_args['cycle_count']\n model_dir = simulation_args['model_dir']\n test_x = simulation_args['test_x']\n test_y = simulation_args['test_y']\n\n # The sensor domain\n domain = 0\n\n # The model's prediction horizon\n horizon = 1\n\n # The model's buffer size\n buffer = window\n\n # Create the device\n device = HapDev(buffer_size=buffer,\n network_delay=meta.network_delay,\n window=window,\n horizon=horizon)\n\n # Create a sensor\n sensor = Sensor(domain=domain,\n buffer_size=buffer,\n dataset=test_x,\n dataset_labels=test_y,\n label_counter=window)\n device.add_sensor(sensor)\n\n # Load the ML model\n custom_objects = {'BL': BL,\n 'TABL': TABL,\n 'MaxNorm': tf.keras.constraints.max_norm}\n try:\n model = load_model(Path('../Zoo/Results/runs/' + model_dir + '/model'), custom_objects=custom_objects)\n except:\n model = load_model(Path(model_dir + '/model'), custom_objects=custom_objects)\n\n device.receive_model(model)\n\n # Lists to hold simulation results\n accuracy_list = []\n predicted_labels = []\n true_labels = []\n run_times = []\n debug_times = []\n\n # Run x steps of simulation\n # for i in tqdm.tqdm(range(cycle_count), desc='Running simulation cycles'):\n for i in range(cycle_count):\n if sensor.check_end():\n break\n accuracy, predicted_label, true_label, run_time, debug_results = device.run_one_cycle(domain)\n debug_times.append(debug_results)\n accuracy_list.append(accuracy)\n predicted_labels.append(predicted_label)\n true_labels.append(true_label)\n run_times.append(run_time)\n\n model_name = PurePath(model_dir)\n results_path = '../Results/' + model_name.name\n Path(results_path).mkdir(parents=False, exist_ok=True)\n\n # Save the simulation data\n simulation_results = {'True_labels': true_labels,\n 'Predicted_labels': predicted_labels,\n 'Accuracy': accuracy_list,\n 'Run_times': run_times}\n\n # Save debug data\n debug_df = pd.DataFrame(debug_times)\n debug_df.to_csv(results_path + '/debug_times.csv')\n\n simulation_results_df = pd.DataFrame(simulation_results)\n simulation_results_df.to_csv((results_path + '/Results_cycles-{cycle_count}_sensorID-{sensor}.csv').format(\n cycle_count=cycle_count,\n sensor=domain\n ), index_label='Cycle')\n\n # Plot the results\n plot_simulation_history(predicted_labels, true_labels, accuracy_list, run_times, results_path, domain, cycle_count)", "def performSimulation(self):\n \n if self.parameters['verbose']:\n print(\"=====================\\nStarting simulation with parameters\\n\",self.parameters)\n print(\"=====================\\nInitial Graph\\n\")\n self.showState()\n print(\"=====================\")\n\n while self.parameters['steps'] > 0:\n if self.parameters['verbose']: print(\"Performing step\")\n self.performStep()\n if self.parameters['verbose']: self.showState()\n\n if self.parameters['verbose']:\n print(\"=====================\\nFinished Simulation\\n\\nResult graph:\")\n self.showState()\n #self.showGraph(self.parameters['file_name'])\n #self.showState()\n #self.showStats()", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')", "def simulate(self, query):\n return self.master.simulate(query)", "def simulateExperiment(self,experimentSet,results=dict()):\n results[\"PolymerParams\"] = self.get_params()\n results[\"ExperimentParams\"] = experimentSet.get_params()\n experimentSet.run(self, results)", "def runSim(self):\n self.simKillResults = {}\n self.simHitResults = {}\n if self.fromArmy == False:\n self.attackingSquad = copy.deepcopy(squad.Squads[self.attackingSpin.get()])\n for num in range(eval(self.simulationSpin.get())):\n defSquad = copy.deepcopy(squad.DefSquads[self.defendingSpin.get()])\n result = self.attackingSquad.squadFire(defSquad)\n if result[0] not in self.simHitResults:\n self.simHitResults[result[0]] = 0\n self.simHitResults[result[0]] += 1\n if result[1] not in self.simKillResults:\n self.simKillResults[result[1]] = 0\n self.simKillResults[result[1]] += 1\n self.simResultsFrame = Frame(self.__mainWindow, padx=15, pady=15)\n self.simResultsFrame.grid(row=2,column=0,sticky=\"nsew\")\n self.hitResultsFrame = Frame(self.simResultsFrame, padx=10, pady=15)\n self.hitResultsFrame.grid(row=0, column=0,sticky=\"nsew\")\n self.killResultsFrame = Frame(self.simResultsFrame, padx=10, pady=15)\n self.killResultsFrame.grid(row=0, column=1,sticky=\"nsew\")\n self.maxPosFrame = Frame(self.simResultsFrame, padx=10, pady=15)\n self.maxPosFrame.grid(row=1, sticky=\"nsew\")\n numHitPoss = 0\n numWoundsPoss = 0\n if isinstance(self.attackingSquad, squad.Squad):\n for unit in self.attackingSquad.units:\n numHitPoss += eval(unit.ranged_weapon.attacks)\n else:\n for i in range(self.attackingSquad.current_size):\n for weapon in self.attackingSquad.ranged_weapons:\n numHitPoss += eval(weapon.attacks)\n for unit in squad.DefSquads[self.defendingSpin.get()].units:\n numWoundsPoss += unit.wounds\n rf = 1\n Label(self.hitResultsFrame, text=\"{} hits possible\".format(min(numWoundsPoss,numHitPoss)), font=__item_format__).grid(row=0)\n for hit in self.simHitResults:\n percent = self.simHitResults[hit]/eval(self.simulationSpin.get())*100\n t = \"{} hits: {:6.2f}%\".format(hit, percent)\n Label(self.hitResultsFrame, text=t, font=__item_format__).grid(row=rf)\n rf+=1\n Label(self.killResultsFrame, text=\"{} kills possible\".format(defSquad.current_size), font=__item_format__).grid(row=0)\n for kill in self.simKillResults:\n percent = self.simKillResults[kill]/eval(self.simulationSpin.get())*100\n t = \"{} kills: {:6.2f}%\".format(kill, percent)\n Label(self.killResultsFrame, text=t, font=__item_format__).grid(row=rf)\n rf+=1", "def _exe_(self):\n print(\"\\n Start simulation (using Pharlap) ...\")\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n self._estimate_edens_()\n self._compute_()\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"bgc\")\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"flare\")\n if self.verbose: print(\"\\n Processing Doppler.\")\n self._compute_doppler_()\n rec = self._compute_velocity_()\n return rec", "def run_simulation(self):\n\n # Create agents for simulation\n self.spawn_agents(self.num_agents)\n\n if self.force_personalities != None:\n self.force_personalities(self)\n\n if self.visualizer == True:\n V.Visualizer.createVisualizer(types=self.visualizerOptions, showAtEnd=True)\n\n TM.TimeManager.createManager()\n for x in range (self.time_to_run):\n for agent in self.agents:\n agent.take_turn()\n while self.agents_to_settle:\n self.agents_to_settle.pop().settle_reposts()\n if self.data_collector != None:\n self.data_collector.collector_turn(x, agent)\n if self.visualizer == True:\n self.generate_visualizations(x)\n TM.TimeManager.sharedManager.increaseTime()\n if self.data_collector != None:\n self.data_collector.collector_round(x)\n self.generate_statistics(x)\n\n if self.visualizer == True:\n V.Visualizer.sharedVisualizer.updateEverything()\n\n if self.data_collector != None:\n self.data_collector.finalize()" ]
[ "0.68201935", "0.65608263", "0.64012814", "0.61906785", "0.6135423", "0.60619414", "0.6045807", "0.6036227", "0.6035245", "0.60110086", "0.59889466", "0.59382665", "0.59206253", "0.5891769", "0.5887811", "0.58801687", "0.5842534", "0.5816389", "0.58069575", "0.58062744", "0.5786942", "0.5773373", "0.5750719", "0.57239497", "0.572144", "0.5720262", "0.5715682", "0.57151896", "0.5714322", "0.57060456" ]
0.7099714
0
Saves the response of the current instance, stored in the attribute "resp". It registers the synapse in the register of all synapses, if it has never been saved before. It registers the response in the register of responses of the synapse. It creates a subfolder for the response in the directory of the synapse, to drop the data in .csv files.
def save_response(self): self.indexes['resp'] = attribute_index('resp', self) # Checking if the attribute "resp" is not empty: if not type(self.resp['coords']) == np.ndarray: print("Response is empty. Please run a simulation.") # Checking if the target response has already been registered: elif self.indexes['resp'] == None: # Registering the synapse if necessary: self.indexes['syn'] = register_instance('syn', self) # Registering the response and setting its new index: self.indexes['resp'] = register_instance('resp', self) create_directory('resp', self) # Exporting the contents of the attribute "resp" to csv files: path_dir = path_directory('resp', self) coords_ref = create_coords_ref(self.resp['coords']) pd.DataFrame(self.resp['coords']).to_csv(os.path.join(path_dir, 'coords.csv')) for i in range(len(coords_ref)) : self.resp['glus'][i].to_csv(os.path.join(path_dir, 'resglu{}.csv'.format(coords_ref[i])), header=True) self.resp['AMPAtot'].to_csv(os.path.join(path_dir, 'resAMPAtot.csv'), header=True) self.resp['V'].to_csv(os.path.join(path_dir, 'resV.csv'), header=True) print("Saved: response at index {} for synapse {}.".format(self.indexes['resp'], self.indexes['syn'])) else: print("Response already registered at index {} for synapse {}.".format(self.indexes['resp'], self.indexes['syn']))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __save_response(self, method, extras, data):\n\n import os, re\n to = \"/tmp/lex/\"\n if not os.path.exists(to):\n os.mkdir(to)\n\n removeables = re.compile('[/&?:]')\n filename = method + '-' + '_'.join(\"%s=%s\" % kv for kv in extras.iteritems())\n filename = os.path.join(to, removeables.sub('_', filename))\n with open(filename, 'w') as f:\n f.write(data)", "def save_response(self, res) -> None:\n file = open(\"response_{}.json\".format(self.num_res), \"w\")\n file.write(str(res))\n file.close()", "def save_metadata(self, response_dict: dict, dn: DepositorName,\n out_file_prefix: str = 'qualtrics'):\n\n current_stage = self.mc.get_source_stage(dn.folderName)\n\n root_directory = join(\n self.curation_dict[self.curation_dict['parent_dir']],\n current_stage,\n dn.folderName\n )\n metadata_directory = self.curation_dict['folder_metadata']\n\n metadata.save_metadata(response_dict, out_file_prefix,\n metadata_source='QUALTRICS',\n root_directory=root_directory,\n metadata_directory=metadata_directory,\n log=self.log)", "def save_response(self, key, response):\n self.responses[key] = response, datetime.now(timezone.utc)", "def save_response(self, request, response):\n response_dict = self.process_response(request.path, response)\n try:\n self.ser.info(pickle.dumps(response_dict))\n self.ser.info(RESPONSE_UNIQUE_STRING)\n except (TypeError, pickle.PicklingError):\n #Can't pickle wsgi.error objects\n pass", "def save_response(response, file_name, path='~/tmp/fcb-analyzer'):\n \n path = ensure_path(path)\n f = open(path + '/' + file_name, 'w')\n f.write(response.text)", "def write_response_to_lib_folder(self, label: Optional[str], response: Response) -> None:\n cleaned_label = label.replace(\"/\", \"|\") if label else \"response\"\n file_name = cleaned_label + \" \" + str(datetime.now())\n file_ending = \".json\"\n if not os.path.exists(RECORD_PATH):\n os.mkdir(RECORD_PATH)\n proposed_file_name = os.path.join(RECORD_PATH, file_name + file_ending)\n # Cover files with the same name case\n while os.path.exists(proposed_file_name):\n length_of_file_type = len(file_ending)\n proposed_file_name = proposed_file_name[:-length_of_file_type] + \" (1)\" + file_ending\n with open(proposed_file_name, 'w') as f:\n f.write(response.text)\n if 'X-Trace-Id' in response.headers:\n log.info(cleaned_label + ' | X-Trace-Id: ' + response.headers['X-Trace-Id'])", "def update_response(self, response):\n try:\n self.set_workspace()\n with open(self.RESPONSE_FILE, 'wb') as fobj:\n fobj.write(self.encoder.serialize(response)[0])\n path, url = self.publish(self.RESPONSE_FILE)\n except Exception as error:\n self.logger.warning(\"Failed to update the WPS execute response! %s\", error)\n raise\n self.logger.debug(\"Response updated.\")\n return path, url", "def store_response(self, new_response):\n self.responses.append(new_response)", "def save_inst(self):\n self.sanity_check()\n self.data_loaded_check()\n\n fname_pub_auth_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_auth_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_top, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_top, '_',\n self.config.experiment_id, '.pk'])\n\n pickle.dump(self.pub_auth_all, open(fname_pub_auth_all, 'wb'))\n pickle.dump(self.pub_auth_top, open(fname_pub_auth_top, 'wb'))\n pickle.dump(self.pub_inst_all, open(fname_pub_inst_all, 'wb'))\n pickle.dump(self.pub_inst_top, open(fname_pub_inst_top, 'wb'))\n\n fname_pub_history = ''.join([self.config.dir_data, '/history_',\n self.config.experiment_id, '.pk'])\n pickle.dump(self.history, open(fname_pub_history, 'wb'))\n\n fname_pub_staff = ''.join([self.config.dir_data, '/staff_',\n self.config.experiment_id, '.pk'])\n pickle.dump(self.staff, open(fname_pub_staff, 'wb'))", "def save(self, dirpath):\n if not self.replications:\n raise ValueError('There are no replications to save')\n\n # TODO: better top-level log\n logger.info(f'Saving {len(self.replications)} replications to {dirpath}')\n os.makedirs(dirpath)\n\n replications_path = os.path.join(dirpath, 'replications')\n os.makedirs(replications_path)\n\n replication_metadata = self.write_index(dirpath)\n self.write_replication_metrics(dirpath, replication_metadata)", "def writeResponse(response):", "def save_file(self, response):\r\n # Extract filename from response url\r\n filename = re.search('[^/]+(?=/$|$)', response.url).group(0)\r\n\r\n # Prepend download folder name to the filename\r\n filename = self.config[\"folder\"] + filename\r\n os.makedirs(os.path.dirname(filename), exist_ok=True)\r\n\r\n # Write contents to file\r\n with open(filename, 'wb') as f:\r\n f.write(response.content)\r\n\r\n # Print message displaying the absolute filepath for convenience\r\n print(\"Downloaded file to \" + os.path.abspath(filename))", "def store_response(self, resource):\n\n \"\"\"Get the content from the POST request.\"\"\"\n content_length = int(self.headers.getheader('Content-Length'))\n body = self.rfile.read(content_length)\n response = json.loads(body)\n\n \"\"\"Add the content to the configured resource queue\"\"\"\n if resource not in self.responses_qeues:\n self.responses_qeues[resource] = []\n self.responses_qeues[resource].append(response)\n else:\n self.responses_qeues[resource].append(response)\n\n \"\"\"Add the content to the dictionary of responses.\"\"\"\n #self.responses_dict.update(response)\n\n \"\"\"Send the response to the request.\"\"\"\n self.send_response(204)\n self.end_headers()", "def create_response_info(self, response):\n output_path = os.path.join(self.output_folder, self.file_name)\n output_path += \".response.txt\"\n with open(output_path, 'w') as file:\n file.write(json.dumps(response))", "async def save_response(self, key: str, response: ClientResponse):\n if not self.is_cacheable(response):\n return\n logger.info(f'Saving response for key: {key}')\n\n expires = self.get_expiration_date(response)\n cached_response = await CachedResponse.from_client_response(response, expires)\n await self.responses.write(key, cached_response)\n\n # Alias any redirect requests to the same cache key\n for r in response.history:\n await self.redirects.write(self.create_key(r.method, r.url), key)", "def test_store_single_response(self):\n self.my_survey.store_response(self.responses[0])\n\n self.assertIn(self.responses[0], self.my_survey.response)", "def save(self):\n output = self.prepare_results()\n\n override_name = output[\"config\"][\"sysconfig\"].get(\"output_filename\", None)\n scenario_name = (\n override_name if override_name else output[\"config\"][\"scenario\"][\"name\"]\n )\n filename = f\"{scenario_name}_{output['timestamp']}.json\"\n log.info(\n \"Saving evaluation results to path \"\n f\"{self.scenario_output_dir}/{filename} \"\n \"inside container.\"\n )\n output_path = os.path.join(self.scenario_output_dir, filename)\n with open(output_path, \"w\") as f:\n json_utils.dump(output, f)\n if os.path.getsize(output_path) > 2**27:\n log.warning(\n \"Results json file exceeds 128 MB! \"\n \"Recommend checking what is being recorded!\"\n )", "def _save_metadata(self, result_dir: Path):\n id_path = result_dir / SerializationAttributes.ID_FILENAME\n with open(id_path, 'w') as f:\n json.dump({SerializationAttributes.ID_KEY: self.id}, f)\n\n version_path = result_dir / SerializationAttributes.VERSION_FILENAME\n with open(version_path, 'w') as f:\n json.dump({SerializationAttributes.VERSION_KEY: self.version}, f)", "def store_response(resp, response_dict):\n if response_dict is not None:\n response_dict['status'] = resp.status\n response_dict['reason'] = resp.reason\n response_dict['headers'] = resp_header_dict(resp)", "def setup_save_point(self):\n\n # figure out the rel path we should save down\n n = datetime.datetime.now()\n r_path = os.sep.join([n.year,n.month,n.day, self.stream_id,\n n.hour,n.minute])\n\n # get our full path\n save_root = self.server.config.get('stream_save_root')\n out_path = os.path.join(save_root,r_path)\n\n\n # keep it around\n self.save_path = out_path", "def _save(self):\n\t\t\n\t\tdirectory = self.Output_path\n\n\t\t# replace with \n\t\t# file_name = hermes.mk_themis_file_name(themis_obj = self)\n\t\tfile_name = f'Themis_{self.CELL_ID[\"experiment\"]}_u{self.CELL_ID[\"unit\"]}_c{self.CELL_ID[\"cell\"]}_r{self.CELL_ID[\"run\"]}.pkl'\n\n\t\tsave_path = directory / file_name\n\n\t\t# Atomic saving (helpful?)\n\t\ttemp_path = save_path.with_suffix(save_path.suffix + '.tmp')\n\t\t\n\t\tself.SavePath = save_path\n\n\t\t\n\t\twith open(temp_path, 'wb') as f:\n\t\t\tpickle.dump(self, f)\n\n\t\ttemp_path.rename(save_path)\n\n\t\tprint(f'Saved {self.RUN_KEY} as {save_path}')", "def test_store_single_response(self):\n self.my_survey.store_response(self.responses[0])\n self.assertIn('English', self.my_survey.responses)", "def update_response(self, response):\r\n self.stri.update_response(response)", "def update_response(self, response):\r\n self.stri.update_response(response)", "def responses(resp_def):\n def decorator(fn): # pylint: disable=missing-docstring\n meta = RouteMeta.load(fn)\n meta.responses = resp_def\n meta.save()\n return fn\n return decorator", "def retrieve_response(self, respindex):\n try :\n # Unpacking the data in the .resp attribute:\n self.indexes['resp'] = respindex\n path_dir = path_directory('resp', self)\n self.resp['coords'] = pd.read_csv(os.path.join(path_dir, 'coords.csv'), index_col=0).to_numpy()\n coords_ref = create_coords_ref(self.resp['coords'])\n self.resp['glus'] = tuple(pd.read_csv(os.path.join(path_dir, 'resglu{}.csv'.format(ref)), index_col=0) for ref in coords_ref)\n self.resp['AMPAtot'] = pd.read_csv(os.path.join(path_dir, 'resAMPAtot.csv'), index_col=0)\n self.resp['V'] = pd.read_csv(os.path.join(path_dir, 'resV.csv'), index_col=0)\n print(\"Response retrieved.\")\n except FileNotFoundError:\n print(\"No response saved under this index.\")", "def _update(self):\n print(\"Saving prediction json files...\")\n self._dump_json()\n print(\"Saving prediction json files done...\")\n print(\"Saving prediction images...\")\n self._dump_image()\n print(\"Saving prediction images done...\")", "def _save_file(json_response, path, filename):\n if path is not None:\n if path[-1] != \"/\":\n path = path+\"/\"\n filepath = os.path.join(path, filename)\n if not os.path.exists(path):\n os.makedirs(path)\n\n with open(filepath+'.json', 'w') as output_file:\n output_file.write(json_response.text)", "def save_response(experiment, response):\n print(\"saving response in experiment\", experiment)\n participant = MturkParticipantColBG.query.filter(\n MturkParticipantColBG.id == experiment[\"participant_id\"]\n ).one()\n colour_response = MturkColourResponseColBG(\n participant=participant,\n target_id=response[\"target_id\"],\n name=response[\"name\"],\n response_time=response[\"response_time\"],\n background_id=experiment[\"background_id\"],\n )\n print(colour_response)\n db.session.add(colour_response)\n db.session.commit()\n return MturkColourResponseColBG.query.filter(\n MturkColourResponseColBG.participant == participant\n ).count()" ]
[ "0.6496459", "0.63103664", "0.6086072", "0.58819896", "0.5703142", "0.56734663", "0.5585821", "0.5573588", "0.545367", "0.54131037", "0.54019815", "0.53985244", "0.5360619", "0.5357478", "0.53520054", "0.53313655", "0.5265266", "0.5248387", "0.5233007", "0.5187448", "0.51630014", "0.51533073", "0.51459914", "0.5137858", "0.5137858", "0.5110168", "0.5108159", "0.5101494", "0.5096044", "0.50856227" ]
0.79284173
0
Displays the matrixes representing the synapse.
def visualize_synapse(self, print_coords=False): S = self.S.copy() I = self.I.copy() if print_coords : print_matrix(S, I, self.res, print_coords=True, coords=self.resp['coords']) else : print_matrix(S, I, self.res)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show(self):\n\t\tprint(\"Square Matrix:\")\n\t\tfor i in range(0, len(self.lables)):\n\t\t\tprint(self.matrix[i])", "def print_matrices(self):\n\n \"\"\"\n Print Optimal Matrix\n \"\"\"\n print(\"\\n\", \"_\"*7, \"Optimal Matrix\", \"_\"*7)\n print(\"\\t\\t\" + \"\\t\".join(list(self.sequenceB)))\n for i in range(0, len(self.sequenceA)+1):\n\n if i >= 1:\n print(self.sequenceA[i-1] + '\\t', end=\"\")\n else:\n print('\\t', end=\"\")\n for j in range(0, len(self.sequenceB)+1):\n print(str(self.optimal[i][j]) + '\\t', end=\"\"),\n print(\"\")\n\n \"\"\"\n Print Direction Matrix\n \"\"\"\n print(\"\\n\", \"_\"*7, \"Direction Matrix\", \"_\"*7)\n print(\"\\t\\t\" + \"\\t\".join(list(self.sequenceB)))\n for i in range(0, len(self.sequenceA)+1):\n if i >= 1:\n print(self.sequenceA[i-1] + '\\t', end=\"\"),\n else:\n print('\\t', end=\"\"),\n for j in range(0, len(self.sequenceB)+1):\n print(str(self.direction[i][j]) + '\\t', end=\"\"),\n print(\"\")", "def showMatrix(self, frame, matrix, label=''): \n M = self.matrix2Table(matrix)\n mtable = self.showTable(frame, M, label)\n return mtable", "def show_transform_matrices(self):\n\n print(f'Transform Matrices are: {self.tf_matrices_list}')", "def _print_matrix(self):\n print(self.matrix)", "def display(self, message=\"\"):\n print(\"-\" * (79 - len(message)), end=\" \")\n print(message)\n if self.mat is None:\n print(\"None\")\n else:\n print(self.__repr__())\n print(\"=\" * 80)", "def display(self):\n count = 0\n self.displays[0].start() # call only once to support shift chain\n for d in self.displays:\n d.output(self.data[count])\n count += 1\n self.displays[0].latch() # call only once to support shift chain", "def displayAsMatrix(lists):\r\n for lst in lists:\r\n print(lst)", "def visualize(self):\n # TODO\n #pyLDAvis.enable_notebook()\n #vis = pyLDAvis.gensim.prepare(self.lda_model, self.stemmed_corpus)\n return", "def show(self):\n tmpstrip = self.matrixtostrip(self.matrix)\n for i in range(len(tmpstrip)):\n self.strip[i] = tmpstrip[i]\n self.strip.show()", "def display_board(self):\n print('*' + '*'.join(['**']*len(self.board[0])) + '*')\n for row in self.board:\n print('|' + ' '.join([('%s' % square) for square in row]) + '|')\n print('*' + '*'.join(['**']*len(self.board[0])) + '*')", "def PrintMatrix(self):\n # loop through the rows\n for i in range(self.rows):\n # intialise the matrix\n mat = []\n # loop through the column\n for j in range(self.cols):\n # append matrix element\n mat.append(self.matrix[i][j])\n # print the matrix\n print(mat)", "def display(self):\n for row in self.tile_rows:\n print(row)", "def visualize(self):\n\n self.check_model()\n show(prepare(self.model, self.vectorized_data, self.vectorizer, mds='tsne'))", "def display(self):\n for row in self._board_area:\n print(row, end=\"\\n\")", "def display(self):\n self.display_divider()\n self.display_row(self.column_names)\n self.display_divider()\n for row in self.rows:\n self.display_row(row)\n self.display_divider()", "def show_matrix(matrix,kind=\"temperature\"):\n if kind==\"temperature\":\n cmap = \"bwr\"\n plt.title(\"Temperature\")\n elif kind==\"habitat\":\n cmap = \"Greens\"\n plt.title(\"Habitat\")\n else:\n cmap = \"Blues\"\n plt.imshow(matrix,\n interpolation='None',\n cmap=cmap,\n vmin=0,\n vmax=1,\n aspect=\"equal\",)\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n\n plt.xticks([])\n plt.yticks([])\n plt.colorbar(orientation=\"horizontal\", fraction=0.045)", "def setDisplay(self):\n self.graph_display=[self.complexCompose(self.coefficients,(t+1)/self.display_number)[-1] for t in range(self.display_number)]", "def show_board(self):\n for i in range(self.num_rows):\n print(' ----'*8)\n s = \"\"\n for j in range(self.num_cols):\n s += '| {} '.format(self._show_piece(i, j))\n print(\"{}|\".format(s))\n print(' ----'*8)", "def show(self):\r\n \r\n clear() \r\n print \" \" + \"-\" * self.__width + \" \"\r\n \r\n for row in self.__buffer:\r\n rowData = \"\".join(str(i) for i in row)\r\n print \"|\" + rowData + \"|\"\r\n\r\n print \" \" + \"-\" * self.__width + \" \"\r\n self.clearBuffer()", "def display(self):\n logging.info(\"Display Carte : {}\".format(self.name))\n for row in self.map:\n #print(row)\n for cell in row:\n print(cell, end = \"\")\n print(\"\")", "def display(self):\n for i in range(0, len(self.top_row)):\n self.top_row[i].display()\n for i in range(0, len(self.bottom_row)):\n self.bottom_row[i].display()\n for i in range(0, len(self.left_col)):\n self.left_col[i].display()\n for i in range(0, len(self.right_col)):\n self.right_col[i].display()", "def display_board(self):\n\n for i in range(len(self._board[0])):\n row = ''\n for j in range(len(self._board)):\n if self._board[j][i] == '':\n row += ' - '\n else:\n row += ' '+str(self._board[j][i])+' '\n print(row)\n print('............................................')", "def analyze_show():\n def mat_to_title(mat_file):\n mat_split = mat_file.split('_')\n while (mat_split.pop() not in ANALYSIS_METHODS):\n pass\n return string.join(mat_split,'_') + '*.mat'\n\n plotables = []\n for mat_file in Args.plotable_files:\n plotables.extend(\n [\n ((val.squeeze(),key), \"{0}: {1}\".format(mat_to_title(mat_file),key))\n for key,val in scipy.io.loadmat(mat_file).viewitems()\n if not (key.startswith('__') and key.endswith('__'))\n ]\n )\n ana_plot_graphs(*zip(*plotables),show=True)", "def show2(self):\n #zfactor = 4\n xb, yb = self.bary.T\n sol0 = self.dat[0]['sol'][:,0]\n triangles = self.tri_pnts_b\n import mayavi.mlab as mlab\n fig = mlab.figure(bgcolor = (0.1, 0.1, 0.1),\n size = (1280, 800))\n @mlab.animate()\n def showdat():\n \"\"\"Example from:\n http://github.enthought.com/mayavi/mayavi/tips.html#animating-a-visualization\n \"\"\"\n # triangular_mesh see:\n # http://github.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html?highlight=triangular_mesh#mayavi.mlab.triangular_mesh\n img = mlab.triangular_mesh(xb, yb, sol0, triangles, scalars=sol0)\n #fig = mlab.gcf()\n ms = img.mlab_source\n for t, s in self.dat:\n # see: http://github.enthought.com/mayavi/mayavi/mlab_animating.html?highlight=animating\n ms.set(scalars=s[:,0])\n yield\n a = showdat()", "def displayScene(self):\n sceneprint = \"\"\n sceneprint += \" \"*40 + Back.LIGHTRED_EX + Fore.LIGHTCYAN_EX + Style.BRIGHT + \"M A N D A L O R I A N\\n\" + RESET\n sceneprint += Fore.LIGHTBLUE_EX +\"SCORE : \" +\\\n str(self.__score) + \" \"*30 +\"TIME : \" + str(self.__remaining_time) + \" \"*30 +\\\n \"LIVES:\" + str(self.__lives)+\"\\n\"+ RESET\n if self.__start >= self.__fullwidth - self.__width:\n self.__start = self.__fullwidth - self.__width\n for i in range(0, self.__height):\n for j in range(self.__start, self.__start + self.__width):\n sceneprint += str(self.__matrix[i][j])\n sceneprint += '\\n'\n \n if self.__start + sc_span < sc_full - 5:\n self.__start = self.__start + 1\n if self.__score < 420420420:\n self.__score += 1\n pass\n\n return sceneprint", "def show():\n\tplt.show()", "def display(self, index):\n img = self.img(index)\n transcription = self.transcript(index)\n plt.imshow(self.norm_img(img), cmap='bone')\n plt.title(transcription, fontdict={'fontsize': 64})\n plt.show()", "def show_boards(self):\n\n self.board.render_board()\n print(\"--------------------------------\")\n self.board_to_shots.render_board()", "def show(self):\n\n print(\"\\n---------------------------------------------------------\")\n\n print(\"\\n{0}\".format(self.name))\n print(\"\\n\\tMonitoring the following Mechanism OutputPorts:\")\n if self.objective_mechanism is None:\n print(\"\\t\\tNone\")\n else:\n for port in self.objective_mechanism.input_ports:\n for projection in port.path_afferents:\n monitored_port = projection.sender\n monitored_port_Mech = projection.sender.owner\n monitored_port_index = self.monitored_output_ports.index(monitored_port)\n\n weight = self.monitored_output_ports_weights_and_exponents[monitored_port_index][0]\n exponent = self.monitored_output_ports_weights_and_exponents[monitored_port_index][1]\n\n print(\"\\t\\t{0}: {1} (exp: {2}; wt: {3})\".\n format(monitored_port_Mech.name, monitored_port.name, weight, exponent))\n\n print(\"\\n\\tModulating the following parameters:\".format(self.name))\n # Sort for consistency of output:\n port_Names_sorted = sorted(self.output_ports.names)\n for port_Name in port_Names_sorted:\n for projection in self.output_ports[port_Name].efferents:\n print(\"\\t\\t{0}: {1}\".format(projection.receiver.owner.name, projection.receiver.name))\n\n print(\"\\n---------------------------------------------------------\")" ]
[ "0.7430056", "0.65280557", "0.6505762", "0.63814694", "0.6345823", "0.6343931", "0.63282037", "0.62735105", "0.6241176", "0.62010705", "0.61724424", "0.6158194", "0.61249804", "0.6123386", "0.60994476", "0.60917485", "0.60899925", "0.60591555", "0.60288167", "0.60245425", "0.6021247", "0.60048014", "0.5983952", "0.5978475", "0.5969093", "0.59610486", "0.59588706", "0.5897476", "0.58750427", "0.58662665" ]
0.6918936
1
Make a proper FQDN from name
def _ensure_fqdn(self, name): if name[-1:] != ".": return "%s." % name else: return name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_dns_name ( base_name, name ) :\n return create_r53_name( base_name, name) + '.mse-esp.com'", "def create_domain_name(self, name):\n return (\"%s.%s.%s\" % (name, \"net\", self.domain)).lower()", "def create_internal_dns_name ( base_name, name ) :\n name = name + '.internal'\n return create_dns_name( base_name, name )", "def normalize_fqdn(fqdn):\n if not fqdn:\n return None\n\n if fqdn.endswith('/'):\n fqdn = fqdn.strip('/')\n\n # bare fqdn, fallback to http://\n if not fqdn.startswith('http'):\n fqdn = \"http://%s\" % fqdn\n return fqdn", "def create_internal_elb_dns_name ( base_name, name ) :\n return 'lb.' + create_dns_name( base_name, name )", "def slicename_to_hostname(vs_name):\n fields = vs_name.split('_')\n if len(fields) == 1:\n prefix = vs_name\n else:\n # The vs_name prefix is the PlanetLab site name.\n # The rest is user-chosen. Place the site name after user-chosen name.\n prefix = '.'.join(fields[1:] + [fields[0]])\n return '%s.%s' % (prefix, _root_hostname)", "def _getHostname(fqdn):\n\treturn fqdn.split('.')[0]", "def get_fqdn():\n return socket.getfqdn()", "def _make_name(self, name=None):\n\n if name:\n new_name = name.split(\"/\")[-1].split(\".png\")[0]\n if new_name.startswith((\"AWS-\", \"Amazon-\")):\n new_name = new_name.split(\"-\", 1)[1]\n # Replace non-alphanumeric with underscores (1:1 mapping)\n new_name = re.sub(r'\\W+', '_', new_name)\n return new_name", "def nodename(name, hostname):\n return NODENAME_SEP.join((name, hostname))", "def localizeForHostName(filename): \n hostname = socket.gethostname()\n if hostname in filename:\n updated_filename = filename.replace(hostname, '')\n return updated_filename.strip('-')\n return filename", "def generate_domainname():\n domainname = ''.join(generate_string(10, valid_domain_name_chars))\n domain = random.choice(['com', 'co.il', 'info'])\n return domainname+'.'+domain", "def format_hostname(hostname: str) -> str:\n if has_ipv6 and re.match(r\"\\d+.\\d+.\\d+.\\d+\", hostname) is not None:\n hostname = f\"::ffff:{hostname}\"\n return hostname", "def get_fqdn(ip_address):\n return socket.gethostbyaddr(ip_address)", "def create_r53_name ( base_name, name ) :\n env = get_env_type( base_name )\n if env :\n env = env.lower( )\n if ( env == 'prod' ) :\n return name\n\n return name + '.' + env", "def hostname(name: str = \"\") -> str:\n ...", "def _username_from_name(self, name):\r\n return name.replace(' ', '_')", "def _FormalizeName(cls, name):\n name = name.replace(\"_\", \"-\").lower()\n name = name[:cls.NAME_LENGTH_LIMIT]\n if name[-1] == \"-\":\n name = name[:-1] + cls.REPLACER\n return name", "def fqdn(self):\n raise NotImplementedError", "def fqdn_f(x: Text) -> Tuple[Text, Text]:\n return \"fqdn\", x.lower()", "def genHostname(ipAddr):\n\tdomain = '.osdev.skrill.net.'\n\tif ipAddr:\n\t\treturn 'vm-' + '-'.join(ipAddr.split('.')) + domain\n\telse:\n\t\treturn ''", "def upstream_name(uri):\r\n\treturn uri.strip(\"/\").replace(\"/\", \"-\")", "def upstream_name(uri):\n\treturn uri.strip(\"/\").replace(\"/\", \"-\")", "def forest_dns_name(self):\n forest_dn = self.get_root_basedn()\n return forest_dn.canonical_str().split('/')[0]", "def to_safe_name(name: str) -> str:\n return regex_replace(r'\\-|\\.|:', \"\", name.replace(' ', '_'))", "def makeFilename ( name ):\n # Spaces, parens and slashes are useful to have as underscores\n fn = name\n fn = re.sub ( \"[ /()]\", \"_\", fn )\n # Anything else gets removed\n fn = re.sub ( \"[^0-9a-zA-Z._-]\", \"\", fn )\n # Replace __ with _\n fn = re.sub ( \"_+\", \"_\", fn )\n return fn", "def fqdn(self):\n if not self._fqdn:\n self._fqdn = socket.getfqdn()\n return self._fqdn", "def normalize_reference_name(name):\n return name.strip().lower().replace(\"-\", \"_\").replace(\" \", \"_\")", "def gethostbycondorname(name):\n\n m = htcondor_ip_name_re.match(name)\n if m is not None:\n return m.group(1).replace('-', '.')\n else:\n return socket.gethostbyname(name)", "def _convert_name(self, name):\n if not self.re_name.match(name):\n org_name = name\n name = self.re_white.sub('_', name)\n name = self.re_alpha.sub('_', name)\n if not self.re_name.match(name):\n name = 'x_' + name2\n self.warn('Converting name <' + org_name + '> to <' + name + '>.')\n return name" ]
[ "0.75482833", "0.71567446", "0.7055514", "0.6987774", "0.6612602", "0.65653723", "0.65199786", "0.6346808", "0.6328137", "0.62123156", "0.61994225", "0.61923176", "0.6191347", "0.6179072", "0.61620694", "0.61555946", "0.6129054", "0.61018264", "0.6083514", "0.6032847", "0.6030342", "0.602433", "0.6019246", "0.6007403", "0.5959002", "0.59042615", "0.5879483", "0.58693", "0.5861082", "0.58597296" ]
0.79934853
0
Return a shuffled array with samples and labels
def shuffled_copies(samples, labels): # Check if the samples and labels are from the same format assert len(samples) == len(labels) permu = np.random.permutation(len(samples)) # Get the correct indexes samples = samples[permu] labels = labels[permu] return samples, labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shuffle_data(data, labels):\n idx = np.arange(len(labels))\n np.random.shuffle(idx)\n return data[idx], labels[idx]", "def shuffle_data(data, labels):\n idx = np.arange(len(labels))\n np.random.shuffle(idx)\n return data[idx, ...], labels[idx], idx", "def shuffle_data(self):\n images = list(self.train_images)\n labels = list(self.train_labels)\n self.train_images = []\n self.train_labels = []\n\n # create list of permutated index and shuffle data accoding to list\n idx = np.random.permutation(len(labels))\n for i in idx:\n self.train_images.append(images[i])\n self.train_labels.append(labels[i])", "def shuffle_data(data, labels):\r\n idx = np.arange(len(labels))\r\n np.random.shuffle(idx)\r\n return data[idx, ...], labels[idx], idx", "def shuffle_data(data, labels):\n idx = np.arange(len(labels))\n np.random.shuffle(idx)\n return data[idx, ...], labels[idx,...], idx", "def shuffle_data(data, labels):\n idx = np.arange(len(labels))\n np.random.shuffle(idx)\n return data[idx, ...], labels[idx], idx", "def shuffle_data(data, labels):\n idx = np.arange(len(labels))\n np.random.shuffle(idx)\n return data[idx, ...], labels[idx], idx", "def shuffle_data(data, labels):\n idx = np.arange(len(labels))\n np.random.shuffle(idx)\n return data[idx, ...], labels[idx], idx", "def shuffle_labels(self):\n random.shuffle(self.y_train)\n random.shuffle(self.y_test)", "def inlabel_shuffle(data):\n num_zero_data = np.sum(data[:,-1]==0)\n label_zero_data = data[:num_zero_data,:]\n label_one_data = data[num_zero_data:,:]\n np.random.shuffle(label_zero_data)\n np.random.shuffle(label_one_data)\n return data", "def shuffle_dataset(self, seed=None):\n stacked = np.r_[self.X_train,\n self.y_train]\n shuffle(stacked.T)\n X_shuffled = stacked[:self.nfeatures,:]\n y_shuffled = stacked[self.nfeatures:,:]\n return X_shuffled, y_shuffled", "def shuffle_dataset(data, label, others=None, class_balanced=False):\n if class_balanced:\n sorted_ids = []\n\n for i in range(label.max() + 1):\n tmp_ids = np.where(label == i)[0]\n np.random.shuffle(tmp_ids)\n sorted_ids.append(tmp_ids)\n\n sorted_ids = np.stack(sorted_ids, 0)\n sorted_ids = np.transpose(sorted_ids, axes=[1, 0])\n ids = np.reshape(sorted_ids, (-1,))\n\n else:\n ids = np.arange(data.shape[0])\n np.random.shuffle(ids)\n\n if others is None:\n return data[ids], label[ids]\n else:\n return data[ids], label[ids], others[ids]", "def shuffle_dataset(instances, labels, seed):\n data = list(zip(instances, labels))\n\n if isinstance(seed, int):\n random.Random(seed).shuffle(data)\n else:\n random.Random().shuffle(data)\n\n instances, labels = zip(*data)\n\n return instances, labels", "def random_preprocessing(inputs, labels):\r\n indices = range(0, labels.shape[0])\r\n shuffled_indices = tf.random.shuffle(indices)\r\n inputs = tf.gather(inputs, shuffled_indices)\r\n labels = tf.gather(labels, shuffled_indices)\r\n return inputs, labels", "def get_samples(n_samples, data, labels=None, use_random_transpose=False):\n indices = np.random.choice(len(data), n_samples, False)\n if np.issubdtype(data.dtype, np.bool_):\n sample_data = data[indices] * 2.0 - 1.0\n else:\n sample_data = data[indices]\n if use_random_transpose:\n sample_data = np.array([random_transpose(x) for x in sample_data])\n if labels is None:\n return sample_data\n return sample_data, labels[indices]", "def shuffle_T(self):\n np.random.shuffle(self.T)", "def next_batch(num, data, labels):\n idx = np.arange(0 , len(data))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_shuffle = [data[i] for i in idx]\n labels_shuffle = [labels[i] for i in idx]\n\n return np.asarray(data_shuffle), np.asarray(labels_shuffle)", "def shuffleData(self, x, y):\n #get new random order for inputs and targets\n order = np.arange( x.shape[0] )\n random.shuffle( order )\n #reorder inputs and targets\n return x[order], y[order]", "def shuffle_train_data(X_train, Y_train):\n perm = np.random.permutation(len(Y_train))\n Xtr_shuf = X_train[perm]\n Ytr_shuf = Y_train[perm]\n\n return Xtr_shuf, Ytr_shuf", "def randomize(data):\r\n permutation = np.random.permutation(data.shape[0])\r\n shuffled_data = data[permutation, :]\r\n # shuffled_y = y[permutation]\r\n return shuffled_data", "def build_data(samples, labels):\n num_samples = len(samples)\n indexes = list(range(num_samples))\n np.random.shuffle(indexes)\n num_train = int(train_ratio * num_samples)\n # Get the indexes of train data and test data.\n train_indexes = indexes[0:num_train]\n test_indexes = indexes[num_train:num_samples]\n\n # Build the train data and test data.\n train_data = samples[train_indexes]\n train_labels = labels[train_indexes]\n test_data = samples[test_indexes]\n test_labels = labels[test_indexes]\n\n return train_data, test_data, \\\n train_labels, test_labels, \\\n train_indexes, test_indexes", "def shuffle_train(self):\r\n if self.data_container.task == 'Classify':\r\n id_train_list=[]\r\n for i in self.idx_train_list:\r\n id_train_list.append(self._random_state.choice(i,self.train_parms[0]))\r\n for j in self._random_state.choice(self.unique_value, self.train_parms[1]):\r\n id_train_list.append(self._random_state.choice(self.idx_train_list[j],1))\r\n self.idx['train'] = np.concatenate(id_train_list, axis=0)\r\n \r\n self.idx['train'] = self._random_state.permutation(self.idx['train'])", "def load_data_train(self, shuffle=True):\n\n data, label = self._generate_all_combinations_of_stripe_images(shuffle=shuffle);\n\n return data, label;", "def next_batch(num, data, labels):\n idx = np.arange(0, len(data))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_shuffle = [data[i] for i in idx]\n labels_shuffle = [labels[i] for i in idx]\n \n return np.asarray(data_shuffle), np.asarray(labels_shuffle)", "def shuffle_data(data):\n idx = np.arange(len(data))\n np.random.shuffle(idx)\n return data[idx, ...]", "def shuffle_data(data):\n indices = list(range(data.shape[0]))\n np.random.shuffle(indices)\n return data[indices]", "def sample_train_batch(self):\r\n batch = []\r\n labels =[]\r\n num_groups = self.batch_size // self.batch_k\r\n sampleed_classes = np.random.choice(self.train_class_ids,num_groups,replace=False)\r\n for class_id in sampleed_classes:\r\n img_fname = np.random.choice(self.train_image_files[class_id],self.batch_k,replace=False)\r\n batch += img_fname.tolist()\r\n labels += [class_id]*self.batch_k\r\n return batch,labels", "def shuffle_data(X, y, seed=None):\n if seed:\n np.random.seed(seed)\n idx = np.arange(X.shape[0])\n np.random.shuffle(idx)\n return X[idx], y[idx]", "def shuffle_data(X, y, seed=None):\n if seed:\n np.random.seed(seed)\n idx = np.arange(X.shape[0])\n np.random.shuffle(idx)\n return X[idx], y[idx]", "def shuffle_data(X, y, seed=None):\n if seed:\n np.random.seed(seed)\n idx = np.arange(X.shape[0])\n np.random.shuffle(idx)\n return X[idx], y[idx]" ]
[ "0.76070595", "0.7447813", "0.7435367", "0.7387749", "0.7372803", "0.73596", "0.73596", "0.73596", "0.73275393", "0.71254265", "0.69440496", "0.67869115", "0.66839826", "0.66317755", "0.6595794", "0.6549036", "0.6538608", "0.65171605", "0.651533", "0.64785296", "0.64736927", "0.645667", "0.64490914", "0.64340854", "0.6392989", "0.63648665", "0.6342539", "0.6328009", "0.6328009", "0.6328009" ]
0.7508061
1
Return plot of the accuracy during the training of the neural network
def accuracy_plot(training, test, layers, data_size, n_neighbours, learning_rate, dropout_rate): plt.figure() plt.plot(training, label="Training") plt.plot(test, label="Test") plt.xlabel("Iterations", size='medium') plt.ylabel("Accuracy function (%)", size='medium') plt.suptitle("Accuracy function while training the neural network", size='medium', ha='center') plt.title("layers: {} with dropout rate of {}, learning rate: {}".format(layers, dropout_rate, learning_rate), size='small', ha='center') if n_neighbours == 0: plt.figtext(0.83, 0.80, "Neighbours\nexcluded", size='medium') else: plt.figtext(0.83, 0.80, "{} neighbours\nincluded".format(n_neighbours), size='medium') plt.figtext(0.83, 0.70, "{}\nsamples".format(data_size), size='medium') plt.legend(loc='right', bbox_to_anchor=(1.3, 0.5)) plt.subplots_adjust(right=0.8) working_dir = os.path.dirname(os.path.abspath(__file__)) saving(working_dir + "/output_ANN/accuracy_plots/{}_accuracy_{}".format(n_neighbours, data_size))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_accuracy(self):\n plot_title, img_title = self.prep_titles(\"\")\n test_legend = ['training data', 'test data']\n\n # Data for plotting x- and y-axis\n x = np.arange(1, CFG.EPOCHS + 1)\n y = [self.tr_accuracy, self.test_accuracy]\n\n # prints x and y-axis values\n print(f'x: {x}')\n print(f'training: {self.tr_accuracy}')\n print(f'test: {self.test_accuracy}')\n\n plt.figure(figsize=(CFG.FIG_WIDTH, CFG.FIG_HEIGHT))\n\n # Create the lineplot\n for line in range(2):\n ax = sns.lineplot(x=x, y=y[line], color=CFG.COLOR_ACCURACY[line], label=test_legend[line])\n\n if CFG.ANNOTATE:\n ax.set(xlabel='Epochs',\n ylabel='Accuracy (%)',\n title=plot_title,\n xlim=(1, CFG.EPOCHS + 2),\n ylim=(0, 119))\n\n for line in range(2):\n for e in range(0, CFG.EPOCHS):\n if y[line][e] > CFG.ANNOTATE_LEVEL:\n value = \"{:.2f}\".format(y[line][e])\n label = \"epoch \" + str(e + 1) + \"\\n\" + value + \"%\"\n plt.annotate(label,\n xy=(x[e], y[line][e]),\n alpha=1,\n size=9,\n rotation=45,\n textcoords='offset pixels', xytext=(0, 7),\n ha='left', va='bottom')\n else:\n ax.set(xlabel='Epochs',\n ylabel='Accuracy (%)',\n title=plot_title,\n xlim=(1, CFG.EPOCHS),\n ylim=(0, 102))\n\n ax.legend(loc='best')\n\n self.save_plot(img_title)\n plt.show()", "def plot_accuracy(model_fit, save_folder): \n train_acc = model_fit.history['binary_accuracy']\n val_acc = model_fit.history['val_binary_accuracy']\n epoch_axis = np.arange(1, len(train_acc) + 1)\n plt.title('Train vs Validation Accuracy')\n plt.plot(epoch_axis, train_acc, 'b', label='Train Acc')\n plt.plot(epoch_axis, val_acc,'r', label='Val Acc')\n plt.xlim([1, len(train_acc)])\n plt.xticks(np.arange(min(epoch_axis), max(epoch_axis) + 1, round((len(train_acc) / 10) + 0.5)))\n plt.legend(loc='lower right')\n plt.ylabel('Accuracy')\n plt.xlabel('Epochs')\n plt.savefig(save_folder + '/accuracy.png')\n plt.show()\n plt.close()", "def model_testing(X_train,y_train):\n\n # for testing amount of layers, each layer has 32 neurons\n # layers = [[32, 32], [32, 32, 32], [32, 32, 32, 32], [32, 32, 32, 32],\\\n # [32, 32, 32, 32, 32], [32, 32, 32, 32, 32, 32]]\n layers = [[8], [16], [32], [64], [128], [256]]\n\n # activation = [\"linear\", \"sigmoid\", \"relu\", \"softmax\"]\n activation = [\"relu\"]\n runs = 1\n for i, act in enumerate(activation):\n val_accs = []\n for layer in layers:\n acc_avg = []\n for run in range(runs):\n model = create_model_testing(layer, act)\n\n # train model on full train set, with 80/20 CV split\n training = model.fit(X_train, y_train, epochs=100, validation_split=0.2, verbose=0)\n val_acc = np.mean(training.history['val_accuracy'])\n print(\"Run \", run, \" - \", act + \" activation - layer \" + str(layer))\n acc_avg.append(val_acc)\n\n # save average accuracy of runs\n val_accs.append(round(np.mean(acc_avg)*100, 2))\n print(\"accuracy: \" + str(np.mean(acc_avg)))\n\n # plot line for each activation method\n plt.plot([1,2,4,8,16,32,64,128,256], val_accs, label=act)\n # plt.plot(val_accs, label=act)\n\n # plotting\n plt.title(\"Accuracy of neural network model with different layers (N=\" +\\\n str(len(layers)) + \")\", fontsize=22)\n plt.xlabel(\"Layers\", fontsize=20)\n # plt.xticks(np.arange(1, len(val_accs) + 1, 1), fontsize=18)\n plt.ylabel(\"Accuracy (%)\", fontsize=20)\n plt.legend()\n plt.subplots_adjust(bottom=.15, left=.15)\n plt.savefig(\"results/linear-relu-\" + str(runs) + \"runs.png\")\n plt.show()", "def plot_observations():\n plt.plot(history.history['loss'], label='training_loss')\n plt.plot(history.history['val_loss'], label='val_loss ')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()\n\n plt.plot(history.history['acc'], label='accuracy')\n plt.plot(history.history['val_acc'], label='val_accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend(loc='lower right')\n plt.show()\n\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n print(\"Test Accuracy:\", test_acc)", "def train_visualization(output_path): \n log_path = output_path + 'output.log'\n Train_Cost, Valid_Cost, Test_Cost, Train_Acc, Valid_Acc, Test_Acc = log_reader(log_path)\n n_epoch = len(Train_Cost)\n\n x1 = range(n_epoch)\n x2 = range(n_epoch)\n y1 = Train_Cost\n y2 = Valid_Cost\n y3 = Test_Cost\n y4 = Train_Acc\n y5 = Valid_Acc\n y6 = Test_Acc\n plt.subplot(2, 1, 1)\n plt.plot(x1, y1, label=\"Train_Cost\", linewidth=2)\n plt.plot(x1, y2, label=\"Valid_Cost\", linewidth=2)\n plt.plot(x1, y3, label=\"Test_Cost\", linewidth=2)\n\n plt.title('binary cross entropy vs. epoches')\n plt.ylabel('binary cross entropy')\n plt.legend(loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(x2, y4, label=\"Train_Acc\", linewidth=2)\n plt.plot(x2, y5, label=\"Valid_Acc\", linewidth=2)\n plt.plot(x2, y6, label=\"Test_Acc\", linewidth=2)\n plt.xlabel('Accuracy@20 vs. epoches')\n plt.ylabel('Accuracy@20')\n plt.legend(loc='best')\n plt.savefig(output_path + 'loss_fig.png')\n # plt.show()", "def make_accuracy_plot(num_trials=10):\n data = load_digits()\n # print data.DESCR\n train_percentages = range(5, 95, 5)\n test_accuracies = numpy.zeros(len(train_percentages))\n\n for i in range(len(train_percentages)):\n individual_trial_accuracies = []\n for j in range(num_trials):\n X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, train_size=train_percentages[i]*.01)\n model = LogisticRegression(C=10**-10)\n model.fit(X_train, y_train)\n individual_trial_accuracies.append(model.score(X_test, y_test))\n test_accuracies[i] = numpy.mean(individual_trial_accuracies)\n\n fig = plt.figure()\n plt.plot(train_percentages, test_accuracies, 'b')\n plt.xlabel('Percentage of Data Used for Training')\n plt.ylabel('Accuracy on Test Set')\n plt.show()", "def plot_acc(model_dir):\n ## extract loss from csv\n file_dir = os.path.join(model_dir, 'acc.csv')\n data = pd.read_csv(file_dir)\n epochs = data['epoch'].ravel()\n acc_train = data['acc_train'].ravel()\n acc_test = data['acc_test'].ravel()\n # epoch,acc_train,acc_test\n\n ## Theoretical Loss\n fig, ax = plt.subplots(1, 1, figsize=(7, 5), sharey=True, sharex=True, dpi=400)\n ax.plot(epochs, acc_train, label='train', color='green', alpha=0.8)\n ax.plot(epochs, acc_test, label='test', color='red', alpha=0.8)\n ax.set_ylabel('Accuracy', fontsize=10)\n ax.set_xlabel('Epoch', fontsize=10)\n ax.legend(loc='lower right', prop={\"size\": 15}, ncol=3, framealpha=0.5)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n plt.tight_layout()\n\n ## create saving directory\n acc_dir = os.path.join(model_dir, 'figures', 'acc')\n os.makedirs(acc_dir, exist_ok=True)\n file_name = os.path.join(acc_dir, 'accuracy.png')\n plt.savefig(file_name, dpi=400)\n print(\"Plot saved to: {}\".format(file_name))\n file_name = os.path.join(acc_dir, 'accuracy.pdf')\n plt.savefig(file_name, dpi=400)\n plt.close()\n print(\"Plot saved to: {}\".format(file_name))", "def plot_acc(acc_v, acc_t, save_plots_path):\n\n plt.figure()\n plt.plot(acc_v, label='Validation acc')\n plt.plot(acc_t, label='Training acc')\n plt.legend()\n title = 'Accuracy per epoch'\n plt.title(title)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.savefig(save_plots_path + \"swag_accuracy_plot.png\")", "def train_accuracy(self):\n # Train accuarcy\n add = np.ones(len(self.X_train))\n X_add1 = np.c_[add, self.X_train]\n pred_train = np.dot(X_add1, self.w_result.T)\n pred_train[pred_train > 0] = 1\n pred_train[pred_train < 0] = 0\n print(pred_train)\n train_check_lable = np.isclose(pred_train, self.y_train)\n num_true_lable = np.sum(train_check_lable)\n num_all_lable = np.size(train_check_lable)\n train_accuracy = num_true_lable / num_all_lable\n print(\"train_accuracy is: %f\" %train_accuracy)\n return train_accuracy", "def train_nn(train_nn_results, label, title, yaxis):\n plt.figure(figsize=(12,5))\n for i in range(len(label)):\n plt.plot(train_nn_results[i], label=label[i], alpha=0.75)\n plt.title(title)\n plt.xlabel('epoch')\n plt.ylabel(yaxis)\n plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')\n plt.tight_layout()\n plt.show()", "def display_convergence_acc(train_accs, valid_accs):\n if len(valid_accs) > 0:\n plt.plot(len(train_accs), train_accs, color=\"red\")\n plt.plot(len(valid_accs), valid_accs, color=\"blue\")\n plt.legend([\"Train\", \"Valid\"])\n else:\n plt.plot(len(train_accs), train_accs, color=\"red\")\n plt.legend([\"Train\"])\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.show()", "def test(self, plot=False):\n accuracy_list = []\n fobj_avg = self.load_stats()\n\n for ii in range(settings.PARS['maxIters']):\n model = self.load_model(ii)\n D1 = model['D']\n W1 = model['W']\n\n # classification\n tic = time.time()\n accuracy_list.append(self.classification(D1, W1)[1])\n toc = time.time()\n print(\n 'Final recognition rate for OnlineDL is : {} , objective function value: {}, time: {}'\n .format(accuracy_list[ii], fobj_avg[ii], toc-tic)\n )\n\n accuracy_list = np.asarray(accuracy_list)\n\n print('Best recognition rate for OnlineDL is {} at iteration {}'.format(\n accuracy_list.max(), accuracy_list.argmax()))\n\n if plot:\n # plot the objective function values for all iterations\n plt.clf()\n plt.plot(list(fobj_avg.keys()), list(fobj_avg.values()), 'mo--', linewidth=2)\n plt.xlabel('Iterations')\n plt.ylabel('Average objective function value')\n plt.xticks(list(range(0, 20)), list(range(1, 21)))\n plt.show()\n\n plt.clf()\n plt.plot(accuracy_list, 'rs--', linewidth=2)\n plt.xticks(list(range(0, 20)), list(range(1, 21)))\n plt.xlabel('Iterations')\n plt.ylabel('Accuracy')\n plt.show()", "def show_learning_curve(self):\n\n # Loop output classes\n for c in range(1,self.n_output_classes):\n # Get data\n x_values = np.array(self.n_class_samples_list[c])\n accuracy = np.array(self.accuracy_list[c])\n precision = np.array(self.precision_list[c])\n recall = np.array(self.recall_list[c])\n F1 = np.array(self.F1_list[c])\n\n # Make plot\n with sns.axes_style(\"ticks\"):\n fig,ax = plt.subplots()\n plt.plot([np.min(x_values),np.max(x_values)],[0.5,0.5],\n color='#777777',linestyle='--')\n plt.plot([np.min(x_values),np.max(x_values)],[0.66,0.66],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.8,0.8],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.9,0.9],\n color='#777777',linestyle=':')\n\n plt.plot( x_values, accuracy, color='#000000',\n linewidth=1, label='Accuracy' )\n plt.plot( x_values, precision, color='#0000aa',\n linewidth=1, label='Precision' )\n plt.plot( x_values, recall, color='#00aa00',\n linewidth=1, label='Recall' )\n plt.plot( x_values, F1, color='#aa0000',\n linewidth=2, label='F1' )\n\n plt.yticks( [0, 0.5, 0.66, 0.8, 0.9, 1.0],\n ['0','0.5','0.66','0.8','0.9','1.0'], ha='right' )\n plt.xlim(np.max(x_values)*-0.02,np.max(x_values)*1.02)\n plt.ylim(-0.02,1.02)\n plt.xlabel('Number of training samples')\n plt.ylabel('Performance')\n plt.title('Learning curve, class {}'.format(c))\n sns.despine(ax=ax, offset=0, trim=True)\n lgnd = plt.legend(loc=4, ncol=1, frameon=True, fontsize=9)\n lgnd.get_frame().set_facecolor('#ffffff')\n ax.spines['left'].set_bounds(0,1)\n ax.spines['bottom'].set_bounds(np.min(x_values),np.max(x_values))", "def cross_validation_visualization_accuracy(epochs, accs, save=False, filename=\"cross_validation_acc\"):\n plt.plot(epochs, accs, marker=\".\", color='r', label='accuracy')\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n if (save):\n plt.savefig(filename)", "def save_accuracy_chart(self):\n history = self.model.history.history\n fig = plt.figure()\n plt.plot(history['accuracy'], label='Training Accuracy')\n plt.plot(history['val_accuracy'],label='Validation Set Accuracy')\n plt.legend()\n fig.savefig('model_accuracy.png')", "def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)", "def plt_accuracy(self, lamda_choice, weights_initial):\n weights_random = []\n train_accuracy = []\n test_accuracy = []\n for i in range(len(lamda_choice)):\n self.lamda = lamda_choice[i]\n self.fit(weights_initial)\n pred_result = self.predict()\n test_ac = self.evaluate(pred_result)\n test_accuracy.append(test_ac)\n train_ac = self.train_accuracy() \n train_accuracy.append(train_ac)\n weights_random.append(self.w_result[-1])\n # print best lamda with highest accuracy\n print(\"choose lambda: %f\" % lamda_choice[np.argmax(train_accuracy)])\n labels = [\"Train_accuracy\", \"Test_accuracy\"]\n fig, ax = plt.subplots()\n ax.plot(lamda_choice, train_accuracy, 'o-', label='Train_accuracy')\n ax.plot(lamda_choice, test_accuracy, 'o-', label='Test_accuracy')\n # Draw absolute weight value corresponding the random feature\n fig, ax = plt.subplots()\n ax.plot(lamda_choice, weights_random, label='Weight for random')\n plt.show()\n # find the best lamda\n self.lamda = lamda_choice[np.argmax(train_accuracy)]", "def plot_metric_values(self, threshold=0):\n epochs_range = np.arange(threshold, len(self.accuracies), 1)\n plt.plot(epochs_range, self.accuracies[threshold:], color='red', marker='o')\n plt.title('Accuracy on test data. Eta={:.2f} Lambda={:2.2f}'.format(self.eta, self.lambda_r))\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.grid(True)\n plt.show()", "def learning_viz(self) :\n self.train\n history = self.history\n plot_loss(history)", "def acc_loss_graph(self):\n acc = self.history['accuracy']\n val_acc = self.history['val_accuracy']\n loss = self.history['loss']\n val_loss = self.history['val_loss']\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 2, 1)\n plt.plot(acc, label='Train')\n plt.plot(val_acc, label='Val')\n plt.legend(loc='lower right')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.ylim([min(plt.ylim()), 1])\n plt.title('Accuracy')\n\n plt.subplot(1, 2, 2)\n plt.plot(loss, label='Train')\n plt.plot(val_loss, label='Val')\n plt.legend(loc='lower right')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.ylim([0, max(plt.ylim())])\n plt.title('Loss')\n plt.show();", "def plot_acc (history, acc='acc', val_acc='val_acc'):\n \n history_dict = history.history\n acc = history_dict[acc]\n val_acc = history_dict[val_acc]\n loss_values = history_dict['loss']\n epochs = range(1, len(loss_values) + 1)\n\n plt.plot (epochs, acc, 'bo', label='Training accuracy')\n plt.plot (epochs, val_acc, 'b', label=\"validation accuracy\")\n plt.title('Training and validation accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n plt.show()", "def test_network(self):\n train_accuracy = 100 - percentError(map(self.neural_result,\n self.train_inputs),\n self.train_outputs)\n print 'Train accuracy:', train_accuracy\n\n test_accuracy = 100 - percentError(map(self.neural_result,\n self.test_inputs),\n self.test_outputs)\n print 'Test accuracy:', test_accuracy\n\n print '#' * int(train_accuracy), 'TR'\n print '#' * int(test_accuracy), 'TE'", "def show_accuracy(self):\r\n return round(accuracy_score(self.actual, self.predicted),2)", "def plot_on_ax(ax, trn_ls, val_ls, ylabel=\"Accuracy\"):\n ax.plot(trn_ls, 'o-', label='Training')\n ax.plot(val_ls, 'x-', label='Validation')\n ax.set_xlabel('Epochs')\n ax.set_ylabel(ylabel)\n ax.legend()", "def visualize_train_history(history):\n cat_acc = history.history['categorical_accuracy']\n val_cat_acc = history.history['val_categorical_accuracy']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs = range(1, len(cat_acc) + 1)\n\n plt.plot(epochs, cat_acc, 'bo', label='Training cat_acc')\n plt.plot(epochs, val_cat_acc, 'b', label='Validation cat_acc')\n plt.title('Training and validation accuracy')\n plt.legend()\n\n plt.figure()\n\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n\n plt.show()", "def see_evaluation(epoch, training_acc, test_acc):\n print (\"Epoch \", epoch, \"Training acc: \", training_acc*100, \"Test acc: \", test_acc*100)", "def accuracy(self):", "def visualize_training(features, labels, pl):\n print(\"Visualizing training\")\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # Take out each feature type, one at a time\n label_map = get_label_map(labels)\n\n for key in label_map.keys():\n like_ind = label_map[key]\n like_data = np.array([features[i] for i in like_ind])\n\n plt.scatter(like_data[:,0],like_data[:,1],label=key)\n\n # get limits\n xmin = features.column_min(0) - .5\n xmax = features.column_max(0) + .5\n ymin = features.column_min(1) - .5\n ymax = features.column_max(1) + .5\n\n plt.xlim(xmin,xmax)\n plt.ylim(ymin,ymax)\n\n # Track the current dividing line, as well as the number of epochs passed\n divider, = plt.plot([],[])\n epoch_tracker = plt.text(-1,.9, '', fontsize=15)\n\n def update(i):\n \"\"\"\n 1.) Get the next set of weights from the tracker\n 2.) Calculate and draw the new divider line\n 3.) Update the epoch counter\n 4.) If we are at the end of an epoch, plot a dashed divider line to track progress\n \"\"\"\n epoch = i//features.instance_count\n w = pl.weights_tracker[i]\n a = pl.accuracy_tracker[epoch]\n divider.set_data([xmin,xmax],[(-xmin * w[0] - w[2]) / w[1], (-xmax * w[0] - w[2]) / w[1]])\n epoch_tracker.set_text(\"{} {}\".format(epoch + 1, a))\n\n # Keep a shadow of the hyperplane at the end of each epoch\n if i % features.instance_count == 0:\n plot_hyperplane(w,xmin,xmax,iter = i, alpha = .3, color='black',linestyle='dashed')\n\n return divider\n\n ani = animation.FuncAnimation(fig, update, frames=range(len(pl.weights_tracker)), interval=250,repeat=False)\n plt.legend()\n\n # optional save file\n if len(sys.argv) >= 3 :\n ani.save(sys.argv[2], writer='imagemagick', fps=5)\n\n plt.show()", "def plot_eval_3(trained_model, X_val, y_val, image_name):\n # FOR EACH CLASS\n # val_pred = trained_model.predict_proba(X_val, num_iteration=iteration)\n \n iterations = trained_model.booster_.current_iteration()\n# results = np.zeros((2, iterations))\n results = np.zeros((iterations,))\n for pos in range(iterations):\n \n # Calculate the current iteration (from 1 to iterations)\n iteration = pos + 1\n \n # Predict validation set for the current iteration\n# start_time = timeit.default_timer()\n val_pred = trained_model.predict(X_val, num_iteration=iteration)\n# end_time = timeit.default_timer()\n# time = end_time - start_time\n# speed = int(X_val.shape[0] / time)\n \n # Number of hits\n val_ok = (val_pred == y_val)\n \n # Percentage of hits\n val_acc = val_ok.sum() / val_ok.size\n \n # Actualize data for plotting results\n# results[0][pos] = time\n# results[1][pos] = val_acc\n results[pos] = val_acc\n \n # Generate accuracy plot\n plt.figure()\n# plt.plot(results[0], results[1], 'b')\n plt.plot(results, 'b')\n plt.title('Validation accuracy')\n plt.xlabel('iterations')\n plt.ylabel('accuracy')\n plt.legend()\n \n # Save validation plot\n plot_file = os.path.join(OUTPUT_DIR, \"{}_val_accuracy\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')", "def accuracy_plot(LS_sizes, data_fun):\r\n\r\n opt_neigh = []\r\n\r\n #plot of optimal n_neighbors as a function of the LS size\r\n\r\n for size in LS_sizes:\r\n\r\n acc = []\r\n neighbors_values = np.arange(1,size+1,1)\r\n\r\n # For a given LS size, plots of accuracy(n_neighbors)\r\n\r\n for value in neighbors_values:\r\n\r\n X_train, y_train, X_test, y_test = data_fun(n_ts=500, n_ls=size)\r\n\r\n clf = KNeighborsClassifier(n_neighbors = value)\r\n clf = clf.fit(X_train, y_train)\r\n acc.append(clf.score(X_test,y_test))\r\n\r\n plt.figure()\r\n plt.plot(neighbors_values,acc, '.')\r\n plt.title(\"Evolution of accuracy as a function \\nof n_neighbors for LS_size = {} samples, for {}.\".format(size, data_fun.__name__))\r\n plt.savefig(\"acc(n_neigh)_{}_{}.pdf\".format(size, data_fun.__name__))\r\n\r\n opt_neigh.append(np.argmax(acc)+1)\r\n\r\n plt.figure()\r\n plt.plot(LS_sizes, opt_neigh, '.')\r\n plt.title(\"Optimal n_neighbors as a function \\nof the size of the learning sample, for {}.\".format(data_fun.__name__))\r\n plt.savefig(\"opt_n_neigh(LS_size)_{}.pdf\".format(data_fun.__name__))" ]
[ "0.80476934", "0.7662797", "0.7526127", "0.73480844", "0.73410636", "0.7286691", "0.7255891", "0.71645975", "0.7152331", "0.71372527", "0.71316504", "0.7096229", "0.70912606", "0.70813125", "0.70319337", "0.7012254", "0.69639987", "0.6930249", "0.69032884", "0.6887325", "0.68695694", "0.68587494", "0.68570805", "0.6818889", "0.6803549", "0.6797932", "0.67881614", "0.6787629", "0.67847496", "0.6753191" ]
0.78214025
1
Get the data labels that correspond to the data samples
def data_labels(data): # The data consists of a equal number of benign and deleterious samples # The first part of the data are the benign samples (label 0), and the second part the deleterious ones (label 1) n_samples = data.shape[0] n_class_samples = int(n_samples / 2) # Get a numpy array of the labels labels_ben = [0] * n_class_samples labels_del = [1] * n_class_samples labels = np.array(labels_ben + labels_del) # Convert the data into one hot encoded data labels = initialization_based(labels) return labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_labels(data):\n\n # The data consists of a equal number of benign and deleterious samples\n # The first part of the data are the benign samples (label 0), and the second part the deleterious ones (label 1)\n n_samples = data.shape[0]\n n_class_samples = int(n_samples / 2)\n\n # Get a numpy array of the labels\n labels_ben = [0] * n_class_samples\n labels_del = [1] * n_class_samples\n labels = np.array(labels_ben + labels_del)\n\n # Create float numbers for the labels\n labels = labels.astype(float)\n\n # Convert the data into a numpy array\n # One hot encoded vector is not necessary, because the data is binary\n labels = np.reshape(labels, [-1, 1])\n\n return labels", "def get_labels(self) -> np.ndarray:\n return self._dataset.get_labels()[self._ids]", "def get_labels(info):\n return info.features[\"labels\"].names", "def get_labels(info):\n return info.features[\"labels\"].names", "def get_labels(df):\n labels = []\n for i in df.index:\n label = sample_label_from_sample_name(i)\n labels.append(label)\n return labels", "def get_labels(self) -> np.ndarray:\n if self.i - 1 >= self.k:\n logging.error(\"No more training iterations!!\")\n return np.array([])\n _, test = self.indexes[self.i - 1]\n return self.labels.take(test, axis=-1)", "def labels_(self) -> DNDarray:\n return self._labels", "def get_labels(self):\n\n labels = list(self.meta_data[self.target_column])\n\n return labels", "def get_train_labels(self):\n raise NotImplementedError", "def get_labels(self):\n return self.labels", "def get_labels(self):\n return self.labels[1:]", "def labels(self):\n return self._get_labels(self.label_vector)", "def get_labels(self):\n raise NotImplementedError", "def labels(self) -> ndarray:\n return self._labels", "def load_data_labels(datasets):\n # Split by words\n x_text = datasets['data']\n x_text = [clean_str(sent) for sent in x_text]\n # Generate labels\n labels = [0, 1, 2, 3, 4]\n print(len(x_text))\n for i in range(len(x_text)):\n label = [0 for j in datasets['target_names']] \n label[datasets['target'][i]] = labels[i]\n labels.append(label)\n y = np.array(labels)\n return [x_text, y]", "def labels(self):\n return self._labels", "def _extract_labels(self, samples: List):\n targets = [\n self.sp_model.encode(sample[2].lower().replace(\"<unk>\", \"<garbage>\").replace(\"\\n\", \"\"))\n for sample in samples\n ]\n targets = [\n [ele if ele != 4 else self.sp_model.unk_id() for ele in target] for target in targets\n ] # map id of <unk> token to unk_id\n lengths = torch.tensor([len(elem) for elem in targets]).to(dtype=torch.int32)\n targets = torch.nn.utils.rnn.pad_sequence(\n [torch.tensor(elem) for elem in targets],\n batch_first=True,\n padding_value=1.0,\n ).to(dtype=torch.int32)\n return targets, lengths", "def get_labels(self):\n return []", "def labels_available(self):\n return self.training_labels.dtype.names", "def load_data_labels(datasets):\n # Split by words\n x_text = datasets['data']\n x_text = [clean_str(sent) for sent in x_text]\n # Generate labels\n labels = []\n for i in range(len(x_text)):\n label = [0 for j in datasets['target_names']]\n #print('target={}, i={}'.format(datasets['target'], i))\n label[datasets['target'][i]] = 1\n labels.append(label)\n y = np.array(labels)\n return [x_text, y]", "def generate_labels():\n label_set = set([])\n for data in load_data():\n label = data.split(' ', 1)[0]\n label_set.add(label)\n labels = list(label_set)\n labels.sort()\n return labels", "def get_labels(self):\n return [token.label for token in self.tokens]", "def train_labels(self):\n return self._train_labels", "def _get_labels(self, ind):\n pass", "def labels(self) -> List[str]:\n\n return list(self.t0.keys())", "def labels(self) -> pd.Series:\n return self.data.apply(to_label, axis=1)", "def get_target_labels(self):\n id_labels = np.zeros_like(self.id_uncertainty_measures[UncertaintyMeasuresEnum.CONFIDENCE])\n ood_labels = np.ones_like(self.ood_uncertainty_measures[UncertaintyMeasuresEnum.CONFIDENCE])\n return np.concatenate((id_labels, ood_labels), axis=0) # row wise concatenation", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels" ]
[ "0.77301323", "0.76840097", "0.7593941", "0.7593941", "0.758966", "0.7566302", "0.75237405", "0.7513252", "0.7457496", "0.74083877", "0.7381961", "0.7372222", "0.7349228", "0.7336654", "0.7326944", "0.73216856", "0.7282346", "0.72785324", "0.7277561", "0.72750634", "0.7268131", "0.720792", "0.7191111", "0.7176616", "0.7171157", "0.7168967", "0.7166684", "0.7157871", "0.7157871", "0.7157871" ]
0.7718853
1
Return a heatmap for metrics that target single qubits.
def heatmap(self, key: str) -> vis.Heatmap: metrics = self[key] assert all(len(k) == 1 for k in metrics.keys()), ( 'Heatmaps are only supported if all the targets in a metric' ' are single qubits.') assert all(len(k) == 1 for k in metrics.values()), ( 'Heatmaps are only supported if all the values in a metric' ' are single metric values.') value_map = {qubit: value for (qubit,), (value,) in metrics.items()} return vis.Heatmap(value_map)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heatmap(\n df: pd.DataFrame, figsize: tuple = (10, 10), scale: float = 1.4\n) -> sns.heatmap:\n\n # keep only quantitative features\n df = create_quanti_df(df)\n print(f\"Number of quantitaive columns: {df.shape[1]}\")\n # calcaulate features correlations\n corr = df.corr() * 100\n # create mask for the upper triangle\n mask = np.triu(np.ones_like(corr, dtype=bool))\n # create figure\n plt.figure(figsize=figsize)\n # create heatmap\n sns.set(font_scale=scale)\n sns.heatmap(corr, mask=mask, annot=True, cmap=\"coolwarm\", fmt=\".0f\")\n sns.reset_orig()\n plt.show()\n return", "def heatmap(self):\n if not self._heatmap:\n self.fill_heatmap()\n return self._heatmap", "def heatmap_from_data_frame(df, metric, rows=[\"Method\", \"Parameters\"],\n cols=[\"Dataset\"], vmin=0, vmax=1, cmap='Reds'):\n df = df.pivot_table(index=rows, columns=cols, values=metric)\n df.sort_index()\n\n height = len(df.index) * 0.35\n width = len(df.columns) * 1\n\n ax = plt.figure(figsize=(width, height))\n ax = heatmap(df, cmap=cmap, linewidths=0, square=True, vmin=vmin,\n vmax=vmax)\n\n ax.set_title(metric, fontsize=20)\n\n plt.show()\n\n return ax", "def heatmap_chart(df, title=\"\"):\r\n source = df.copy()\r\n source = source.reset_index()\r\n source = pd.melt(source, id_vars=\"index\", value_vars=df.columns)\r\n source.columns = [\"m1\", \"m2\", \"value\"]\r\n\r\n base = alt.Chart(source).encode(\r\n alt.X('m1:O', title=\"New Model\"),\r\n alt.Y(\"m2:O\", title=\"Baseline Model\"),\r\n ).properties(\r\n width=500,\r\n height=400,\r\n title=title,\r\n )\r\n rects = base.mark_rect().encode(\r\n color='value:Q',\r\n )\r\n text = base.mark_text(\r\n align='center',\r\n baseline='middle',\r\n color='black',\r\n size=12,\r\n dx=0,\r\n ).encode(\r\n text='value:Q',\r\n )\r\n return rects + text", "def generate_heatmap(self):\n data = []\n for j in range(len(self.generations[0])):\n row = []\n for i in range(len(self.generations)):\n row.append(self.generations[i][j].fitness)\n data.append(row)\n data = np.array(data)\n\n plt.figure()\n ax = sns.heatmap(\n data,\n cmap='RdBu',\n xticklabels=2,\n yticklabels=2)\n\n hfont = {'fontname': 'Helvetica'}\n plt.xlabel('Generation', **hfont)\n plt.ylabel('Individual', **hfont)\n ax.invert_yaxis()\n ax.axhline(linewidth=4, color='black')\n ax.axvline(linewidth=4, color='black')\n ax.collections[0].colorbar.set_label('Fitness')\n plt.savefig('figures/Voltage Clamp Figure/Single VC Optimization/'\n 'heatmap.svg')", "def get_heatmap(self, heatmap_name=None):\n return self.prob", "def heatmap(df, cmap ='RdBu' ):\n\n # TODO: mpld3 does not display axis labels properly\n\n # TODO: Replace with an interactive plot, see bokeh:\n # http://bokeh.pydata.org/docs/gallery/les_mis.html\n\n fig, ax = plt.subplots()\n data = df.as_matrix()\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n\n ax.pcolor(data, cmap = cmap)\n ax.set_xticks(np.arange(data.shape[1])+0.5, minor = False)\n ax.set_xticklabels(df.columns)\n \n ax.set_yticks(np.arange(data.shape[0])+0.5, minor = False)\n ax.set_yticklabels(df.index)\n ax.invert_yaxis()\n ax.xaxis.tick_top()\n\n return fig", "def plot_lut(df, output):\n flatui = [\"#3d77d4\", \"#f0b05d\"]\n fig, ax = plt.subplots(figsize=(18,10))\n p = sns.heatmap(df, linewidths=0.1, annot=False, cbar=True, \n ax=ax, cmap=sns.color_palette(flatui), \n cbar_kws={'orientation': 'vertical',\n 'label': 'class'})\n\n colorbar = p.collections[0].colorbar\n colorbar.set_ticks([0.25, 0.75])\n colorbar.set_ticklabels(['0', '1'])\n\n plt.title('2D Look-Up Table')\n plt.xlabel('binned cluster width')\n plt.ylabel('binned tau')\n plt.savefig(output)", "def visualizeC(self, M=None):\n try:\n import seaborn as sns\n import matplotlib.pyplot as plt\n except:\n print(\"Seaborn or matplotlib not imported...can't build the heatmap\")\n if M is None:\n M = self.stateC\n a = torch.argmax(M, dim=0)\n print(self.find_TPname(a))\n M = pd.DataFrame(M.numpy(), index=list(\n self.filler2index.keys()), columns=list(self.role2index.keys()))\n sns.heatmap(M, annot=True, cmap=\"Blues\")\n plt.show()", "def generate_heatmap(self):\n data = []\n for j in range(len(self.generations[0])):\n row = []\n for i in range(len(self.generations)):\n row.append(self.generations[i][j].fitness)\n data.append(row)\n data = np.array(data)\n\n # Display log error in colorbar.\n tick_range = range(\n math.floor(math.log10(data.min().min())),\n 1 + math.ceil(math.log10(data.max().max())))\n cbar_ticks = [math.pow(10, i) for i in tick_range]\n log_norm = LogNorm(vmin=data.min().min(), vmax=data.max().max())\n\n plt.figure(figsize=(10, 5))\n ax = sns.heatmap(\n data,\n cmap='viridis',\n xticklabels=2,\n yticklabels=2,\n norm=log_norm,\n cbar_kws={'ticks': cbar_ticks, 'aspect': 15})\n\n hfont = {'fontname': 'Helvetica'}\n plt.xlabel('Generation', **hfont)\n plt.ylabel('Individual', **hfont)\n plt.xticks(\n [i for i in range(0, self.config.max_generations, 5)],\n [i for i in range(0, self.config.max_generations, 5)])\n plt.yticks(\n [i for i in range(0, self.config.population_size, 5)],\n [i for i in range(0, self.config.population_size, 5)])\n\n ax.invert_yaxis()\n ax.collections[0].colorbar.set_label('Error')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n plt.savefig('figures/Parameter Tuning Figure/heatmap.svg')", "def plot_heatmap(mi, output):\n\tfig = plt.figure()\n\tdata = np.array(mi)\n\tfig, ax = plt.subplots()\n\theatmap = ax.pcolor(data, cmap=plt.cm.jet)\n\n\tax.invert_yaxis()\n\tax.xaxis.tick_top()\n\n\tax.set_xlabel('Seq 2')\n\tax.set_ylabel('Seq 1')\n\tax.xaxis.set_label_position('top')\n\n\tax.set_xlim(0, len(mi[0]))\n\tax.set_ylim(len(mi), 0)\n\n\txmajor_ticks = np.arange(0, len(mi[1]), 20)\n\txminor_ticks = np.arange(0, len(mi[1]), 1)\n\n\tymajor_ticks = np.arange(0, len(mi), 20)\n\tyminor_ticks = np.arange(0, len(mi), 1)\n\n\tax.tick_params(axis = 'both', which = 'major', labelsize = 5)\n\tax.tick_params(axis = 'both', which = 'minor', labelsize = 0)\n\n\tax.set_xticks(xmajor_ticks)\n\tax.set_xticks(xminor_ticks, minor = True)\n\tax.set_yticks(ymajor_ticks)\n\tax.set_yticks(yminor_ticks, minor = True)\n\n\tax.tick_params(which = 'both', direction = 'out')\n\n\tplt.xticks(rotation=90)\n\n\tcb = plt.colorbar(heatmap)\n\tcb.set_label('MI value')\n\n\tfig.savefig(output, dpi = 700)", "def get_heatmap(self, heatmap_name=None):\n return self.tracker.get_heatmap()", "def get_heatmap(self, heatmap_name=None):\n return self.tracker.get_heatmap()", "def generateHeatmap(title, unit, labels, sizeValues, xAxisName, legendPos, timeUnit):\n fig = preparePlot(title)\n ax = fig.axes[0]\n impls = sorted(list(sizeValues.keys()), key=cmp_to_key(compareFn))\n yposns = [val for (discard, val) in [extractDigits(impl) for impl in impls]]\n (yAxisName, discard) = extractDigits(impls[0])\n data = [sizeValues[k] for k in impls]\n nonesToNans(data)\n\n if False:\n print(\n \"Title: \",\n title,\n \"\\nunit: \",\n unit,\n \"\\nlabels:\",\n labels,\n \"\\nsizeValues: \",\n sizeValues,\n )\n print(\"impls: \", impls)\n print(\"yAxisName: \", yAxisName)\n print(\"unit: \", unit)\n print(\"timeUnit: \", timeUnit)\n print(\"data: \", data)\n\n # Do most of the work!\n im = ax.imshow(data, cmap=plt.get_cmap(\"plasma\"))\n ax.set_xlabel(xAxisName)\n ax.set_ylabel(yAxisName)\n # We want to show appropriate ticks\n # ... and label them with the respective list entries\n (labels, majorTicks, minorTicks) = generateLabelsTicks(labels)\n # print (\"labels: \", labels, \"\\nmajorTicks: \",majorTicks,\"\\nminorTicks: \",minorTicks)\n ax.set_xticks(majorTicks)\n ax.set_xticklabels(labels)\n if minorTicks:\n ax.set_xticks(minorTicks, minor=True)\n\n (labels, majorTicks, minorTicks) = generateLabelsTicks(yposns)\n ax.set_yticks(majorTicks)\n ax.set_yticklabels(labels)\n if minorTicks:\n ax.set_yticks(minorTicks, minor=True)\n\n # Add a colorbar\n cBar = plt.colorbar(im)\n finalisePlot(cBar.ax, title, None, fig, \"_map\", timeUnit)", "def heatmap(relevance_scores_nchw: torch.Tensor,\n width: int = 4,\n height: int = 4) -> None:\n pf.sanity_checks.ensure_nchw_format(relevance_scores_nchw)\n # Convert each heatmap from 3-channel to 1-channel.\n # Channel dimension is now omitted.\n r_nhw = relevance_scores_nchw.sum(dim=1)\n\n # Loop over relevance scores for each image in batch\n for r_hw in r_nhw:\n # Use Tensor.cpu() to copy the tensor to host memory before converting to numpy().\n plot.heatmap(r_hw.cpu().detach().numpy(), width, height)", "def heatmap(mat, x_label=None, y_label=None, axes=None,\n title=None, save=False):\n sns.heatmap(mat)\n plt.show()", "def matplotlib_heatmap_chart() -> Tuple:\n df = read_dataset(Path('..', '..', 'iris.csv'))\n df.drop(\"species\", axis=1, inplace=True)\n # Default is pearson's correlation coefficient\n corr_df = df.corr()\n\n fig, ax = a_libraries.matplotlib_heatmap_chart(corr_df.values)\n\n return fig, ax", "def get_heightmap_fig(self):\n x = self.heightmap.data.cpu().numpy()\n plt.figure()\n plt.imshow(x)\n plt.colorbar()\n fig = plt.gcf()\n return fig", "def heatmap(self, *args, **kwargs):\n obj = self.pcolormesh(*args, **kwargs)\n xlocator, ylocator = None, None\n if hasattr(obj, '_coordinates'):\n coords = obj._coordinates\n coords = (coords[1:, ...] + coords[:-1, ...]) / 2\n coords = (coords[:, 1:, :] + coords[:, :-1, :]) / 2\n xlocator, ylocator = coords[0, :, 0], coords[:, 0, 1]\n self.format(\n xgrid=False, ygrid=False, xtickminor=False, ytickminor=False,\n xlocator=xlocator, ylocator=ylocator,\n )\n return obj", "def timeit_heatmap(data, xlabel='xlabel', ylabel='ylabel', **kwargs):\n dataT = {}\n figs = []\n series = kwargs.get('series', (0,1))\n cmap = kwargs.get('cmap', cm.coolwarm)\n for k, v in data.items():\n dataT[k] = zip(*v)\n X, Y, Z = dataT[k][series[0]], dataT[k][series[1]], dataT[k][-1]\n left, right = min(X), max(X)\n bottom, top = min(Y), max(Y)\n extent = [left, right, bottom, top]\n wide, tall = (max(X)-min(X)+1), (max(Y)-min(Y)+1)\n intervalX = max(X) - min(heapq.nlargest(2,set(X)))\n intervalY = max(Y) - min(heapq.nlargest(2,set(Y)))\n if intervalX > 1: \n wide = 1 + wide/intervalX\n else:\n wide = 1\n if intervalY > 1: \n tall = 1 + tall/intervalY\n else: \n tall = 1\n # TODO: BUG: fix so that Z transposes with x & y series reversed\n Z = np.reshape(Z, [wide, tall])\n Z = list(zip(*Z)) # Z is transposed\n Z = [i for i in Z[::-1]] # Z is upside down\n fig, ax = plt.subplots()\n hmap = ax.imshow(Z, extent=extent, cmap=cmap, interpolation='nearest')\n fig.colorbar(hmap).set_label(\"time\")\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_title(substitute_titles(k,series))\n figs.append(fig)\n return figs", "def show_heatmap(\n self,\n value,\n name,\n normalize_method=\"separated\",\n cmap=\"ValueFunction-New\",\n title=None,\n nrows=1,\n ncols=1,\n index=1,\n scale=1.0,\n legend=True,\n ticks=True,\n colorbar=False,\n notext=False,\n cmap_vmin=MIN_RETURN,\n cmap_vmax=MAX_RETURN,\n ):\n if len(value.shape) == 1:\n value = value.reshape(self.rows, self.cols)\n\n key = name, index\n\n if key not in self.heatmap_ax:\n scale_x = np.sqrt(ncols / nrows)\n scale_y = np.sqrt(nrows / ncols)\n with with_scaled_figure(scale_x * scale, scale_y):\n self._init_heatmap_vis(\n name, cmap, nrows, ncols, index, legend, ticks, cmap_vmin, cmap_vmax\n )\n if title is not None:\n self.heatmap_ax[key].set_title(title)\n\n if colorbar:\n cbar = self.heatmap_ax[key].figure.colorbar(\n self.heatmap_img[key], ax=self.heatmap_ax[key]\n )\n cbar.ax.set_ylabel(\"\", rotation=-90, va=\"bottom\")\n\n coords = self._normalize_value(\n value,\n method=normalize_method,\n cmap_vmin=cmap_vmin,\n cmap_vmax=cmap_vmax,\n )\n self.heatmap_img[key].set_data(value * self._map_mask())\n\n if not colorbar and not notext:\n self._reset_texts(self.heatmap_texts[key])\n for r, c, ext_v in coords:\n self._text_on_cell(\n c, r, ext_v, self.heatmap_texts[key], self.heatmap_ax[key]\n )\n self.heatmap_fig[name].canvas.draw()\n\n return key", "def create_heatmap(df):\n\n fig = go.Figure(data=go.Heatmap(\n z=df.values.tolist(),\n x=df.columns,\n #y=[classifier for classifier in df.index.values.tolist()],\n y = df.index.values.tolist(),\n hoverongaps = False,\n xgap = 3,\n ygap = 3,\n colorscale=[[0.0, 'rgb(165,0,38)'], [0.1111111111111111, 'rgb(215,48,39)'], [0.2222222222222222, 'rgb(244,109,67)'], [0.3333333333333333, 'rgb(253,174,97)'], [0.4444444444444444, 'rgb(254,224,144)'], [0.5555555555555556, 'rgb(224,243,248)'], [0.6666666666666666, 'rgb(171,217,233)'], [0.7777777777777778, 'rgb(116,173,209)'], [0.8888888888888888, 'rgb(69,117,180)'], [1.0, 'rgb(49,54,149)']]\n ),\n )\n return fig", "def draw_heatmap_of_subset(cnts, info, genes, names, col_id='case',\n cases= [\"Case12\", \"Case7\", \"Case11\", \"Case8\"], subset_name=\"test\",\n draw=False, fs=(4,4), my_cmap=''):\n assert col_id in info.columns\n info = info[info[col_id].isin(cases)]\n data = info.merge(cnts.T, left_index=True, right_index=True)\n subset_means = []\n gene_means = []\n groups = data.groupby(col_id)\n for case in cases:\n df = groups.get_group(case)\n gene_means.append(pd.Series(df[genes].mean(), name=names[case]))\n subset_means.append((names[case], df[genes].mean().mean()))\n subset_df = pd.DataFrame.from_records(subset_means, columns=[\"Sample\", subset_name], index=\"Sample\")\n subset_df.index.name=\"\"\n gene_df = pd.concat(gene_means, axis=1)\n if draw:\n fig = plt.figure(figsize=fs)\n s = sns.heatmap(np.log2(gene_df + 1), cmap=my_cmap, linewidths=0.5, linecolor='black',\n cbar_kws={'label': 'Log2 TPMs'})\n return fig, ''\n return gene_df, subset_df", "def create_heatmap(num_maps, height, width, all_joints, sigma, stride):\n heatmap = np.zeros((height, width, num_maps), dtype=np.float64)\n\n for joints in all_joints:\n for plane_idx, joint in enumerate(joints):\n if joint:\n _put_heatmap_on_plane(heatmap, plane_idx, joint, sigma, height, width, stride)\n\n # background\n heatmap[:, :, -1] = np.clip(1.0 - np.amax(heatmap, axis=2), 0.0, 1.0)\n\n return heatmap", "def plot_heatmap(mi):\n\tfig = plt.figure()\n\tdata = np.array(mi)\n\tfig, ax = plt.subplots()\n\theatmap = ax.pcolor(data, cmap=plt.cm.jet)\n\n\tax.tick_params(direction='out')\n\n\tmajorLocator = MultipleLocator(20)\n\tmajorFormatter = FormatStrFormatter('%d')\n\tminorLocator = MultipleLocator(1)\n\n\tax.xaxis.set_major_locator(majorLocator)\n\tax.xaxis.set_major_formatter(majorFormatter)\n\tax.xaxis.set_minor_locator(minorLocator)\n\n\tax.yaxis.set_major_locator(majorLocator)\n\tax.yaxis.set_major_formatter(majorFormatter)\n\tax.yaxis.set_minor_locator(minorLocator)\n\n\tax.invert_yaxis()\n\tax.xaxis.tick_top()\n\n\t###check which seq belongs to each axe\n\tax.set_xlabel('Seq 2')\n\tax.set_ylabel('Seq 1')\n\n\tax.set_xlim(0, len(mi[1]))\n\tax.set_ylim(len(mi), 0)\n\n\tplt.xticks(rotation=90)\n\n\tcb = plt.colorbar(heatmap)\n\tcb.set_label('MI value')\n\n\t#pdf = PdfPages('heatmap.pdf')\n\t#pdf.savefig(fig)\n\tfig.savefig('heatmap.png')\n\t#pdf.close()", "def generate_heatmap_extract(self, bbox):\n return heatmaps[0].extract(bbox) # full extact", "def plotHeatmap(inputRunMatrix, tick_label, output_folder):\n\t# heatmap of run sim matrix\n\tinputRunMatrix = np.sqrt(inputRunMatrix)\n\tvmax = np.percentile(inputRunMatrix,95)\n\tvmin = np.amin(inputRunMatrix)\n\t\n\tfig,ax = plt.subplots()\n\tax = sns.heatmap(inputRunMatrix,vmin=vmin,vmax=vmax, \\\n xticklabels=tick_label,yticklabels=tick_label)\n\n\t# square the color bar tick label to undo sqrt of sim matrix\n\tc_bar = ax.collections[0].colorbar\n\tticLoc = c_bar.get_ticks()\n\tnewTic = [int(x*x) for x in ticLoc]\n\tc_bar.set_ticks(ticLoc)\n\tc_bar.set_ticklabels(newTic)\n\n\tplt.tight_layout()\n\tfig.savefig(output_folder + \"/heatmap.png\")\n\tplt.close(fig)", "def heatmap_visualization(embeddings, topics, top_n_words, width = 800, height = 800):\n topics_list = topics\n topics_lookup = {topic:i for i, topic in enumerate(topics_list)}\n indices = np.array([topics_lookup[topic] for topic in topics_list])\n embeddings = embeddings[indices]\n distance_matrix = cosine_similarity(embeddings)\n\n named_labels = [[[str(topic), None]] + top_n_words[topic] for topic in topics_list]\n named_labels = [\"_\".join([label[0] for label in labels[:4]]) for labels in named_labels]\n named_labels = [label if len(label) < 30 else label[:27] + \"...\" for label in named_labels]\n\n fig = px.imshow(distance_matrix, labels=dict(color=\"Similarity Score\"), x=named_labels, y=named_labels, color_continuous_scale='GnBu'\n )\n\n fig.update_layout(\n title={\n 'text': \"<b>Indeed Article's Similarity Matrix\", 'y': .95, 'x': 0.55, 'xanchor': 'center', 'yanchor': 'top', 'font': dict(size=22,color=\"Black\")\n },\n width=width,\n height=height,\n hoverlabel=dict(\n bgcolor=\"white\",\n font_size=16,\n font_family=\"Rockwell\"\n ),\n )\n fig.update_layout(showlegend=True)\n fig.update_layout(legend_title_text='Trend')\n \n return fig, distance_matrix", "def get_heatmap(self, X):\n if not self._learned:\n raise ValueError(\"SOM not trained yet\")\n\n res = {}\n for item in X:\n winner = self.winner(item)\n key = winner[0]\n\n if key in res:\n res[key][0] += 1\n res[key][1].append(item)\n else:\n res[key] = [1, [item], winner[1]]\n\n heatmap = np.zeros((self._m, self._n), dtype=np.int)\n for key, value in res.items():\n heatmap[value[2][0], value[2][1]] = int(value[0])\n\n return heatmap", "def heat_matrix (m, caption, ticks_labels_x, ticks_labels_y, colormap):\n\n plt.matshow (m, fignum = 0, aspect = 'auto', cmap = colormap[0], norm = colormap[1])\n plt.colorbar ()\n\n plt.xticks (ticks_labels_x[0], ticks_labels_x[1], rotation='vertical')\n plt.yticks (ticks_labels_y[0], ticks_labels_y[1])\n axes = plt.gca ()\n axes.tick_params (direction = 'out', pad = 5)\n\n plt.title (caption, y = 20.0)" ]
[ "0.60673547", "0.5961135", "0.5927924", "0.5823532", "0.5730508", "0.5681778", "0.5651682", "0.5618253", "0.5593625", "0.55592394", "0.54876494", "0.54307234", "0.54307234", "0.53738236", "0.5353844", "0.53439945", "0.53200763", "0.53121907", "0.5298264", "0.5285847", "0.52782667", "0.5246733", "0.5243794", "0.52349377", "0.5227022", "0.5210405", "0.51920956", "0.5182058", "0.5175929", "0.5175558" ]
0.7633295
0
Return the maximum flux
def max_flux(self): return np.max(self.flux)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_flux(frame):\n return np.max(frame.fluxes[frame.radii <= max_extent_px])", "def max_time(self):\n return self.time[np.argmax(self.flux)]", "def max(self) -> \"Stream[float]\":\n return self.agg(np.max).astype(\"float\")", "def maximum(x):\n return np.maximum(x, 0)", "def max(self):\n return numpy.ma.max(self.data)", "def max_gain(self):\n return np.max(self.fr)", "def lambda_max(self):\n return const.b_wien / self.temperature", "def find_max_f():\n fmax = fmin(g, 2)\n return fmax[0]", "def max(self):\n return self._reduce_for_stat_function(F.max, only_numeric=False)", "def _maximum(self) -> float:\n if self._type == \"power\":\n return 5.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_max\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]", "def range_flux(self):\n return self.max_flux - self.min_flux", "def getMagFlux(self):\n return self.magflux", "def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )", "def get_max(self):\n return self.serie.max()", "def f_max(cls):\n return cls.params[\"f_max\"]", "def maxx(self):\n return self.__maxx", "def x_max(self):\n return self.get_max_value(self.X_INDEX)", "def get_max_value(self):\n if self.is_ready():\n max_values = [dnd.get_max_value() for dnd in self.dnds]\n max_value = max(max_values)\n else:\n max_value = torch.tensor([[0.0]], dtype=torch.float)\n return max_value", "def get_max_value(self):\n max_value = max(self.values)\n return max_value", "def max(self) -> FrameLike:\n return super().max()", "def max(self) -> FrameLike:\n return super().max()", "def max(self) -> FrameLike:\n return super().max()", "def max(self) -> FrameLike:\n return super().max()", "def max(self):\n\n maximum = -float('inf')\n\n for i in range(self.sum.GetNbinsX()):\n bin_max = self.sum.GetBinContent(i+1) + self.sum.GetBinError(i+1)\n if bin_max > maximum:\n maximum = bin_max\n\n return maximum", "def Max(data):\n return data.max()", "def _get_maximum(self):\n return self._maximum", "def max(self):\r\n return np.max(self.data_array)", "def min_flux(self):\n return np.min(self.flux)", "def lmax(self):\n cond = (self.transmit / self.transmit.max()) > 1./100\n return max(self.wavelength[cond])", "def zmax(self):\n # Extract parameters\n pzs = self.params[0]\n return max([pz.zmax for pz in pzs])" ]
[ "0.8347883", "0.74840814", "0.71864057", "0.704579", "0.7015667", "0.69740826", "0.6961083", "0.69556326", "0.69554335", "0.69472003", "0.6935056", "0.6914028", "0.68585455", "0.67920977", "0.6785096", "0.6783946", "0.67707163", "0.67621744", "0.6759031", "0.6694084", "0.6694084", "0.6694084", "0.6694084", "0.6686113", "0.6685527", "0.6676079", "0.6675955", "0.6654236", "0.6639494", "0.66329527" ]
0.89036936
0
Return the time of the maximum flux
def max_time(self): return self.time[np.argmax(self.flux)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def peak_time(self):\n return np.array([self.wftime[ch][self.waveform[ch].argmax()] for ch in range(self.nchannels)])", "def max(self):\n\n return time_stat(self, stat=\"max\")", "def max_flux(self):\n return np.max(self.flux)", "def max_time(self):\n #{{{ function to return time of last sample\n\n if self.maxtime == -1:\n return stock.now()\n\n return self.maxtime", "def max_time(self) -> float:\r\n if(len(self.operations_by_name) == 0):\r\n return -1\r\n return max(map(lambda x: x[\"time_step\"], self.operations_by_name.values()))", "def max_intensity(self, time):\n ti = np.where(time == self.times)[0][0]\n return self.timesteps[ti].max()", "def _get_max_t(self):\n\n return max([\n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n ])", "def max_flux(frame):\n return np.max(frame.fluxes[frame.radii <= max_extent_px])", "def max_time(self):\n return self._max_time", "def max_time(self):\n return self._ll_tree_sequence.get_max_time()", "def _get_max_t(self):\n \"\"\"\n if hasattr(self,'k_of_t'):\n return max([ \n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n self.k_of_t[-1][0],\n ])\n else:\n return max([ \n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n ])\n \"\"\"\n return self.t_max", "def getEvolutionMax(self):\n \n return [self.getMaximumAtGivenTime(timeIndex) for timeIndex in range(self.numberOfTimes - 1)]", "def computeMaxTime(ham: Dict[str, Any]) -> Tuple[float, float]:\n # Find the longest time\n maxNs = 0\n for key in ham[\"control\"]:\n ctrls = ham[\"control\"][key]\n for waveform in ctrls[\"waveforms\"]:\n finalNs = waveform[\"insert_ns\"] + waveform[\"duration_ns\"]\n if maxNs < finalNs:\n maxNs = finalNs\n maxDt = floor(maxNs / ham[\"circuit\"][\"dt\"])\n\n ham[\"circuit\"][\"max_time_dt\"] = maxDt\n ham[\"circuit\"][\"max_time_ns\"] = maxNs\n\n return maxNs, maxDt", "def _psp_max_time(rise, decay, rise_power):\n return rise * np.log(1 + (decay * rise_power / rise))", "def getMaxSimTime(self):\n return self.max_simsecs_value", "def maxtime(conn):\n c = conn.cursor()\n r = c.execute(\"SELECT max(time) as max_time FROM event WHERE bin_id not null\").fetchall()\n last_time = r[0]['max_time']\n return last_time", "def endTime(self) -> float:\n try: return self.times[-1]\n except IndexError: return 0.0", "def get_tmax(data):\n return data[np.argmax(data[:, 1])][0]", "def max_time(self) -> str:\n return self._max_time", "def wave_get_max_micros():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVSM, 2, 0))", "def _maximum(self) -> float:\n if self._type == \"power\":\n return 5.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_max\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]", "def lambda_max(self):\n return const.b_wien / self.temperature", "def get_current_simulated_time(self):\n\n query = \"SELECT MAX(time) FROM patient_signal_values\"\n\n return self.mysql_obj.fetch_value(query)", "def max(ev):\n profData = getProfilingData(ev)\n if profData is not None:\n return profData.Tmax()\n return \"\"", "def get_max_temp(self):\n self.max_temp = self.domain[1] * 2", "def max_temp(self):\n return self.atag.dhw_max_temp", "def max_temp(self):\n return self._max_temp", "def lmax(self):\n cond = (self.transmit / self.transmit.max()) > 1./100\n return max(self.wavelength[cond])", "def last_fmeasure(self):\n return self.get_fvalue(self.last_position())", "def max(self):\n return numpy.ma.max(self.data)" ]
[ "0.75761044", "0.75565094", "0.75416726", "0.75097215", "0.73423856", "0.71438485", "0.7086532", "0.7000203", "0.69391656", "0.68668413", "0.67839086", "0.67351854", "0.66913486", "0.6666073", "0.66316026", "0.6575393", "0.6508536", "0.64425504", "0.64256316", "0.6416781", "0.6398545", "0.63739777", "0.635436", "0.634965", "0.63469917", "0.6339746", "0.6293812", "0.6246736", "0.6246054", "0.6245138" ]
0.9051581
0
Return the reference time, defaults to the mid time
def reference_time(self): if hasattr(self, '_reference_time') is False: self._reference_time = self.midtime return self._reference_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ref_time(self):\n from datetime import datetime, timedelta\n\n ref_time = datetime(2010, 1, 1, 0, 0, 0)\n ref_time += timedelta(seconds=int(self.fid['/PRODUCT/time'][0]))\n return ref_time", "def ref_time(self) -> float:\n return ntp_to_system_time(self.ref_timestamp)", "def timing_reference(self):\n return self._timing_reference", "def current_time(cls) -> float:", "def time(self):\n try:\n if self.single_date:\n return self.stime\n else:\n return self.stime + (self.etime - self.stime) / 2\n except TypeError:\n return None", "def get_time(self):\n return self._current_time", "def get_time(self):\n return self._current_time", "def gettime(self):\n return self.t", "def get_time(self):\n\t\treturn time.time()", "def get_current_time(self):\n return self.time", "def _get_timebase(self):\n return clock()", "def _get_timebase(self):\n return clock()", "def get_time(self):\n return self.time", "def get_time(cls):\n now = rospy.Time.now()\n return now.secs + now.nsecs*(10**-9) # time in seconds", "def get_time(self):\n return self._current_time_sec", "def getLastRefreshedTime(self):\n\t\tmonths = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\n\t\tmonth = months[self.lastRefreshed.tm_mon - 1][:3]\n\t\t\n\t\thours = str(self.lastRefreshed.tm_hour)\n\t\tif self.lastRefreshed.tm_hour < 10:\n\t\t\thours = \"0\" + hours\n\t\t\n\t\tminutes = str(self.lastRefreshed.tm_min)\n\t\tif self.lastRefreshed.tm_min < 10:\n\t\t\tminutes = \"0\" + minutes\n\t\t\t\n\t\ttime = hours + \":\" + minutes + \", \" + month + \" \" + str(self.lastRefreshed.tm_mday)\n\t\treturn time", "def ref_now():\n return as_datetime(datetime.datetime.now(), REF_TZ)", "def now(self):\n return self._startTime + self.timeToOffset(self.currentTime, self._timeScale)", "def get_time(self):\n clock = self.pipeline.get_clock()\n tm = clock.get_internal_time()\n return tm / 1.e9", "def getTime(self):\n return self.step / (self.max_step + int(self.include))", "def initialTime(self):\n return self.params['t0']", "def get_time(self) -> float:\n raise NotImplementedError()", "def _get_half_time(self):\n return self.__half_time", "def get_time(self):\n return self.__time", "def get_time(self):\n return self.get_timed() / 10.0", "def get_time():\n return datetime.datetime.now()", "def __get_current_time(self) -> datetime:\n #return datetime.strptime(\"11:30\", '%H:%M')\n return datetime.now()", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time" ]
[ "0.7942926", "0.74960715", "0.7024686", "0.67897993", "0.66483927", "0.65842915", "0.65842915", "0.6584283", "0.6582991", "0.65362465", "0.65336937", "0.65336937", "0.65284216", "0.6519982", "0.65138245", "0.65086", "0.64379597", "0.6436091", "0.64268744", "0.6416861", "0.63623154", "0.6361519", "0.63608176", "0.6350451", "0.6341713", "0.63406026", "0.63367176", "0.63351977", "0.63351977", "0.63351977" ]
0.8897539
0
Naive estimate of the pulse time Uses the mean of flux above a fraction f of the maximum fluc
def estimate_pulse_time(self, f=0.75): idxs = np.abs(self.flux) > f * self.max_flux return np.mean(self.time[idxs])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testPeakLikelihoodFlux(self):\n # make mp: a flux measurer\n measControl = measAlg.PeakLikelihoodFluxControl()\n schema = afwTable.SourceTable.makeMinimalSchema()\n mp = measAlg.MeasureSourcesBuilder().addAlgorithm(measControl).build(schema)\n \n # make and measure a series of exposures containing just one star, approximately centered\n bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(100, 101))\n kernelWidth = 35\n var = 100\n fwhm = 3.0\n sigma = fwhm/FwhmPerSigma\n convolutionControl = afwMath.ConvolutionControl()\n psf = measAlg.SingleGaussianPsf(kernelWidth, kernelWidth, sigma)\n psfKernel = psf.getLocalKernel()\n psfImage = psf.computeKernelImage()\n sumPsfSq = numpy.sum(psfImage.getArray()**2)\n psfSqArr = psfImage.getArray()**2\n for flux in (1000, 10000):\n ctrInd = afwGeom.Point2I(50, 51)\n ctrPos = afwGeom.Point2D(ctrInd)\n\n kernelBBox = psfImage.getBBox(afwImage.PARENT)\n kernelBBox.shift(afwGeom.Extent2I(ctrInd))\n\n # compute predicted flux error\n unshMImage = makeFakeImage(bbox, [ctrPos], [flux], fwhm, var)\n\n # filter image by PSF\n unshFiltMImage = afwImage.MaskedImageF(unshMImage.getBBox(afwImage.PARENT))\n afwMath.convolve(unshFiltMImage, unshMImage, psfKernel, convolutionControl)\n \n # compute predicted flux = value of image at peak / sum(PSF^2)\n # this is a sanity check of the algorithm, as much as anything\n predFlux = unshFiltMImage.getImage().get(ctrInd[0], ctrInd[1]) / sumPsfSq\n self.assertLess(abs(flux - predFlux), flux * 0.01)\n \n # compute predicted flux error based on filtered pixels\n # = sqrt(value of filtered variance at peak / sum(PSF^2)^2)\n predFluxErr = math.sqrt(unshFiltMImage.getVariance().get(ctrInd[0], ctrInd[1])) / sumPsfSq\n\n # compute predicted flux error based on unfiltered pixels\n # = sqrt(sum(unfiltered variance * PSF^2)) / sum(PSF^2)\n # and compare to that derived from filtered pixels;\n # again, this is a test of the algorithm\n varView = afwImage.ImageF(unshMImage.getVariance(), kernelBBox)\n varArr = varView.getArray()\n unfiltPredFluxErr = math.sqrt(numpy.sum(varArr*psfSqArr)) / sumPsfSq\n self.assertLess(abs(unfiltPredFluxErr - predFluxErr), predFluxErr * 0.01)\n \n for fracOffset in (afwGeom.Extent2D(0, 0), afwGeom.Extent2D(0.2, -0.3)):\n adjCenter = ctrPos + fracOffset\n if fracOffset == (0, 0):\n maskedImage = unshMImage\n filteredImage = unshFiltMImage\n else:\n maskedImage = makeFakeImage(bbox, [adjCenter], [flux], fwhm, var)\n # filter image by PSF\n filteredImage = afwImage.MaskedImageF(maskedImage.getBBox(afwImage.PARENT))\n afwMath.convolve(filteredImage, maskedImage, psfKernel, convolutionControl)\n\n exposure = afwImage.makeExposure(filteredImage)\n exposure.setPsf(psf)\n \n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*adjCenter))\n measFlux = source.get(measControl.name)\n measFluxErr = source.get(measControl.name + \".err\")\n self.assertFalse(source.get(measControl.name + \".flags\"))\n self.assertLess(abs(measFlux - flux), flux * 0.003)\n \n self.assertLess(abs(measFluxErr - predFluxErr), predFluxErr * 0.2)\n\n # try nearby points and verify that the flux is smaller;\n # this checks that the sub-pixel shift is performed in the correct direction\n for dx in (-0.2, 0, 0.2):\n for dy in (-0.2, 0, 0.2):\n if dx == dy == 0:\n continue\n offsetCtr = afwGeom.Point2D(adjCenter[0] + dx, adjCenter[1] + dy)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, offsetCtr)\n offsetFlux = source.get(measControl.name)\n self.assertLess(offsetFlux, measFlux)\n \n # source so near edge of image that PSF does not overlap exposure should result in failure\n \n for edgePos in (\n (1, 50),\n (50, 1),\n (50, bbox.getHeight() - 1),\n (bbox.getWidth() - 1, 50),\n ):\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*edgePos))\n self.assertTrue(source.get(measControl.name + \".flags\"))\n \n # no PSF should result in failure: flags set\n noPsfExposure = afwImage.ExposureF(filteredImage)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, noPsfExposure, afwGeom.Point2D(*adjCenter))\n self.assertTrue(source.get(measControl.name + \".flags\"))", "def max_time(self):\n return self.time[np.argmax(self.flux)]", "def water_evapotranspiration_flux(evap):\n return evap * (-1)", "def mtof(p):\n return 440.0 * 2 ** ((p - 69) / 12.0)", "def range_flux(self):\n return self.max_flux - self.min_flux", "def mean_wave_period(F, f, df):\n return np.sum(F * df) / np.sum(F * f * df)", "def calc_peakt(self, trial_dur):\n if trial_dur <= 11.0:\n peakt = 0.5375*trial_dur + 6.09625\n else:\n peakt = 11.75\n return peakt", "def ALPflux(self, EMeV, t_sec, g11):\n na_dedt = self._alp(EMeV=EMeV, ts = t_sec, g10 = g11 * 0.1) # alp spectrum per energy and time\n return na_dedt * 1.e52", "def estimate_bpm(D):\n if len(D) < 2*ignore:\n return 0\n else:\n return 1/np.mean(np.diff(D))*60", "def FAP_threshold(sig, Ni):\n # F := false alarm probability\n # sig := 1 - F\n return - np.log(1 - sig**(1/Ni))", "def peak_uncertainty(t, S, P):\n t = t.jyear\n N = S.size\n T = t.max() - t.min()\n amp, phase, offset = find_sine_params(t, S - S.mean(), P)\n sine = sinefunc(t, P, amp, phase, offset)\n residual = (S - S.mean()) - sine\n sigma = residual.std()\n #print \"P=%0.3f N=%i T=%0.1f amp=%0.3g sigma=%0.3g\" % (P, N, T, amp, sigma)\n return 3. * sigma * P**2 / (4. * T * amp * np.sqrt(N))", "def get_flux_density(self):\n if self.no_flux is False:\n return self.snu_at_1GHz\n else:\n return -1", "def surface_runoff_flux(runoff, drain):\n return runoff - drain", "def max_flux(self):\n return np.max(self.flux)", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - self.fr[fc_ix + n - 1:fc_ix - 1:-1]))", "def find_alert_time(self) -> None:\n \n # Also not clear from the paper how to doe this,\n # use the first 10 data points in the light curve to determine the magnitude\n # baseline\n\n mean_mag = np.mean(self.mags[:10])\n std_mag = np.std(self.mags[:10])\n\n num_above = 0 \n i = 9\n\n while num_above < 3 and i < len(self.times)-1:\n \n i += 1 \n\n if self.mags[i] < mean_mag - std_mag:\n num_above += 1\n else:\n num_above = 0.0\n\n if len(self.times) - 1 == i:\n print(\"Give me more training data, not alerted yet, this is probably going to fail\")\n \n return self.times[i-1]", "def F0(t):\n if (t < 1e-6):\n return 1.0 - t / 3.0\n else:\n return 0.5 * (np.pi / t) ** 0.5 * sp.erf(t ** 0.5)", "def max_flux(frame):\n return np.max(frame.fluxes[frame.radii <= max_extent_px])", "def time_function(self, t):\n\n\tif type(t) == float:\n\t if t > self.pulse_duration:\n\t\tfield_strength = 0.0\n\t else:\n\t\tfield_strength = (self.amplitude * \n\t\t sin(pi * t / self.pulse_duration)**2 * \n\t\t cos(self.omega * t))\n\telse:\n\t field_strength = zeros(shape(t))\n\t for i, time in enumerate(t):\n\t\tif time > self.pulse_duration:\n\t\t temp_field_strength = 0.0\n\t\telse:\n\t\t temp_field_strength = (self.amplitude * \n\t\t\tsin(pi * time / self.pulse_duration)**2 * \n\t\t\tcos(self.omega * time))\n\n\t\tfield_strength[i] = temp_field_strength\n\t\n\treturn field_strength", "def totalInfilHorton1time(f0, fc, k, t):\n numerator = (f0 - fc)*(1 - np.exp(-k*t))\n Ft = (fc*t) + (numerator/k)\n return Ft", "def ftom(f):\n return 69 + 12 * log(f / 440.0, 2)", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - (self.gain - self.fr[fc_ix + n - 1:fc_ix - 1:-1])))", "def test_flux(equation):\n u = .5\n eps = 1e-5\n expected = (equation.flux(u+eps) - equation.flux(u))/eps\n computed = equation.flux_prime(u)\n npt.assert_allclose(computed, expected, rtol=1e-4)", "def _psp_max_time(rise, decay, rise_power):\n return rise * np.log(1 + (decay * rise_power / rise))", "def compute_fade(f):\n\n return 6 * f**5 - 15 * f**4 + 10 * f**3", "def rectpulse(L, fsT, normalize = True):\n u = np.ones(L * fsT, dtype='float')\n\n # Normalize so that the integral of the pulse is 1/2 max\n if normalize:\n u = u / (2.0 * np.sum(u))\n \n return u", "def excitationPulse(self, time, power):\n t = time * ns + self.step # Should center at one step before 0\n if self.step <= 200 * ps: # resolution warrants modelling the pulse\n width = 200.0 * ps # self.step\n\n if t < width * 10: # Only evaulate when the value is significant\n amp = power / (width * sqrt_2pi) # normalized amplitude\n value = amp * np.exp(-1.0 * (t) * (t) / (2 * width * width))\n value = value\n else:\n value = 0.0\n else: # impulsive limit, just dump all the excitons in at t=0\n # if time >= 0 - self.step/2 and time < 0 + self.step/2:\n if t > -0.5 * self.step and t <= 0.5 * self.step:\n value = power / self.step\n else:\n value = 0.0\n return (value*self.step)", "def FMScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 1\n elif x <= d[p][0.4]:\n return 2\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 4\n else:\n return 5", "def FMScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 1\n elif x <= d[p][0.4]:\n return 2\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 4\n else:\n return 5", "def precondition(amp):\n n = len(amp)\n mean = np.mean(amp[:n/5])\n return -(amp-mean)" ]
[ "0.6198238", "0.6178355", "0.61166596", "0.610102", "0.6081345", "0.60715514", "0.5992787", "0.58948153", "0.5877116", "0.58590066", "0.5851902", "0.5837642", "0.583406", "0.58227193", "0.5817751", "0.5800002", "0.57959425", "0.5790262", "0.57887185", "0.5788051", "0.57787895", "0.57635665", "0.57392144", "0.57379556", "0.5729462", "0.56910235", "0.56862974", "0.5685893", "0.5685893", "0.5682866" ]
0.81107914
0
Read in the time and flux from a csv The filename must point to a commaseparated file with at least two columns, "time" and "flux". Optionally, an additional "pulse_number" column can exist, if the pulse_number is specified, only data matching the requested pulse number will be loaded.
def from_csv(cls, filename, pulse_number=None): df = pd.read_csv(filename) return cls._sort_and_filter_dataframe(df, pulse_number)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv(name):\n\n DATA = []\n for row in csv.reader(open(name), delimiter=','):\n DATA.append((parse_time(row[0]), float(row[1])))\n\n return DATA", "def readData(filename,timeDelay=0.0, ampMult=1.0):\n data = []\n with open(filename,'r') as f:\n for x in range(4):\n f.readline()\n data = pd.read_csv(f) \n data.Ampl = precondition(data.Ampl)*ampMult # convert amplitudes, possibly.\n data.Time = data.Time*1.0e6 - timeDelay # convert to microseconds, offset by delay in signals.\n return data", "def read_load_data_from_csv(csv_path):\n # Load the original DataFrame, use easier-to-read column names, and drop unnecessary column\n original_df = pd.read_csv(csv_path).rename(columns={\"OperDay\" : \"Date\"}).drop([\"TOTAL\", \"DSTFlag\"],axis=1)\n\n original_df.name = csv_path.split(\"_\")[1]\n\n # Combine the originally separate date and hour columns into a single DateTime column\n return combine_date_and_hour_columns(original_df)", "def dataset_from_csv(self, filename, time_column='point_in_time'):\n return pd.from_csv(filename, parse_dates=[time_column])", "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def load_data_from_file(filename):\r\n time = []\r\n position = []\r\n with open(filename, 'r') as original:\r\n time_position = list(csv.reader(original)) # list()\r\n for row in range(1, len(time_position)):\r\n time.append(float(time_position[row][0]))\r\n position.append(float(time_position[row][1]))\r\n\r\n return time, position", "def load_data(self, dropna=False):\r\n # Load data, delete Ml index, get number of channels, add\r\n df = pd.read_csv(self.file_path, header=None, index_col=0, dtype='float64')\r\n\r\n cols = df.shape[1]\r\n if cols < 2:\r\n raise ValueError(f'{self} wrong file type.')\r\n\r\n df.columns = ['t'] + [f\"c{i}\" for i in range(1, cols)]\r\n df.index = df.index.astype(int)\r\n df.index.name = 'r'\r\n\r\n if dropna:\r\n df.dropna(axis=1, how='all', inplace=True)\r\n\r\n self.set_data(df)", "def spectre_csv(f):\n \n skip = 0\n while True:\n try: \n wav, flux = np.loadtxt(f, delimiter = ',',\n skiprows = skip, unpack = True)\n \n except ValueError:\n # Si les première lignes ont un en-tête\n skip += 1\n \n else:\n break\n \n return wav,flux", "def read_pulse_acc(filename, multi_header=True):\n\n # TODO: Add Azure support\n num_headers = 20\n header_row = 18\n timestamp_row = 20\n\n with open(filename, \"r\") as f:\n accreader = csv.reader(f, delimiter=\" \")\n\n # Skip file info headers but extract header row and timestamp row data\n for i in range(num_headers):\n # Read columns header\n if i == header_row - 1:\n header = next(accreader)\n # Read the start timestamp\n elif i == timestamp_row - 1:\n ts_start = next(accreader)\n else:\n next(accreader)\n\n # Read body - drop blanks\n data = [[x for x in line if x != \"\"] for line in accreader]\n\n # Convert column names list so that split by \":\" not \" \"\n header = \" \".join(header).split(\":\")\n\n # Drop \"%Data,\" from the first column\n header[0] = header[0].split(\",\")[1]\n\n # Extract and convert start timestamp to datetime\n ts_start = [int(i) for i in ts_start[1:]]\n dt_start = datetime(\n ts_start[5], # year\n ts_start[4], # month\n ts_start[3], # day\n ts_start[2], # hour\n ts_start[1], # minute\n ts_start[0], # second\n )\n\n # Create dataframe and timestamps using start timestamp marker and time steps column\n df = pd.DataFrame(data, dtype=\"float\")\n ts = df.iloc[:, 0].values\n timestamps = [dt_start + timedelta(seconds=t) for t in ts]\n\n # For raw data module\n if multi_header is True:\n # Create multi-index header of channel names and units and time steps index\n channels = [col.split(\"(\")[0].strip() for col in header]\n units = [col.split(\"(\")[1][:-1] for col in header]\n header = list(zip(channels, units))\n header.insert(0, (\"Timestamp\", \"\"))\n header = pd.MultiIndex.from_tuples(header, names=[\"channels\", \"units\"])\n df = df.set_index(df.columns[0])\n df.index.name = \"Time (s)\"\n df.insert(loc=0, column=\"Timestamp\", value=timestamps)\n # For screening module\n else:\n # Create single row header of only channel names (i.e. strip out the units)\n # Replace time steps column with timestamps and use range index\n header = [\"Timestamp\"] + [col.split(\"(\")[0].strip() for col in header]\n df.iloc[:, 0] = timestamps\n\n # Set desired header (single or multi-index)\n df.columns = header\n\n return df", "def read_MyCSV(symbol_path, file_name, data_time_difference_to_UTC, names, usecols):\n shifted_hr = config.BROKER_TIME_BETWEEN_UTC + data_time_difference_to_UTC\n full_path = os.path.join(symbol_path, file_name)\n df = pd.read_csv(full_path, header=None, names=names, usecols=usecols)\n df.set_index('time', inplace=True)\n df.index = pd.to_datetime(df.index).shift(shifted_hr, freq='H')\n return df", "def read_traffic_sensor_from_csv(path: str) -> pd.DataFrame:\n\n df = pd.read_csv(path)\n df[\"measuredTime\"] = pd.to_datetime(df[\"measuredTime\"])\n df.set_index(\"measuredTime\", inplace=True)\n return df", "def process_csv(filepath):\n suburb = get_suburb(filepath)\n read_file = pd.read_csv(filepath,\n infer_datetime_format=True,\n parse_dates=[\"SALE DATE\"],\n dayfirst=True)\n read_file[\"SUBURB\"] = suburb\n separate_date(read_file)\n return read_file", "def read_csv(self, filepath, obs_vars = ['obs'], header = True):\n # determine if the type file is gzip\n filetype, encoding = mimetypes.guess_type(filepath)\n if encoding == 'gzip':\n self.data = pd.read_csv(filepath, compression='gzip')\n else:\n self.data = pd.read_csv(filepath)\n\n self.original_data = copy.deepcopy(self.data)\n if self.cutoff:\n self.data = self.data[:self.cutoff]\n \n self.data = self.data[obs_vars]\n self.N = self.data.shape[0]\n return True", "def loadCSV(input_file):", "def read_csv():", "def read_from_csv(path):\n if not os.path.exists(path):\n return None\n if not path.endswith('.csv'):\n return None\n\n with open(path, 'r') as file:\n data = pd.read_csv(file, header=0)\n\n return data", "def read_csv():\n csv_file = \"dow.csv\"\n\n # read the data from the csv file, parsing the Dates to make the x-axis, setting index_col to zero to remove it\n data_frame = pd.read_csv(csv_file, parse_dates=True, index_col=0)\n return data_frame", "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self", "def _loadCSVFile(self):\n self._df = pd.read_csv(\n self._pathfile, sep=CSV_SEPARATOR, index_col=CSV_INDEX_COL)", "def load_data(csv_path):\n df = pd.read_csv(csv_path)\n return df", "def read_wx_data(wx_file, harbor_data):\n wx_data = pd.read_csv(wx_file) # a dataframe that holds the data from \"TempPressure.txt\"\n \n temp = list(wx_data[\"Time\"]) # a list of strings\n # Convert string time to float hours for easier plotting\n init_time = temp[0] # take first time which will be your time zero\n harbor_data[\"wx_times\"] = [] # list to hold the data\n for h_time in temp:\n delta_t = dt.strptime(h_time, '%H:%M:%S') - dt.strptime(init_time, '%H:%M:%S') # get delta time\n harbor_data[\"wx_times\"].append(float(delta_t.total_seconds()/3600)) # convert to hours\n\n harbor_data[\"wx_temperatures\"] = wx_data[\"Ch1:Deg F\"] # Places temperatures in harbor_data", "def load(self, path):\n self.df = pd.read_csv(path)\n print(\"Loaded data from {}\".format(path))", "def read_csv_data(csv_path):\n\n return pd.read_csv(csv_path, sep=',', engine='python')", "def read_weather_data_from_csv(csv_path):\n\n # Read the original DataFrame and select the relevant columns\n original_df = pd.read_csv(csv_path)[[\"DateUTC\",\"TemperatureF\"]]\n\n # Round up the hour of each Date to the nearest whole hour\n original_df[\"Date\"] = original_df[\"DateUTC\"].apply(round_utc_hour_up)\n\n # Rename Temperature field to include city name\n city = csv_path.split(\"_\")[1].split(\"/\")[1]\n original_df[city + \"_TemperatureF\"] = original_df[\"TemperatureF\"]\n original_df = original_df.drop([\"TemperatureF\", \"DateUTC\"], axis=1)\n\n return original_df", "def spectrum_csv(f):\n\n skip = 0\n while True:\n try:\n wav, flux = np.loadtxt(f, delimiter=\",\", skiprows=skip, unpack=True)\n\n except ValueError:\n # If the first lines have a header\n skip += 1\n\n else:\n break\n\n return wav, flux", "def from_csv(self,path):\n self.csv_path = path\n\n try:\n fh = open(self.csv_path, \"r\")\n except IOError:\n print(\"Error: no such file or directory\") \n\n self.csv_dataframe = pd.DataFrame(pd.read_csv(self.csv_path, header=0, keep_default_na=False)).dropna(axis=0, how='any')\n test = pd.DataFrame(pd.read_csv(self.csv_path)).dropna(axis=0, how='any')\n types = [0 for i in range(len(test.dtypes))]\n a = fh.readline()\n a = a[:-1] # remove '\\n'\n x = a.split(',') # x stores the name of each column\n fh.close()\n\n #type transformation\n for i in range(len(test.dtypes)):\n if test.dtypes[i].name[0:3] == 'int' or test.dtypes[i].name[0:5] == 'float':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if not (j == 0 or (j > 1000 and j < 2100)):\n types[i] = test.dtypes[i].name[0:5]\n break\n else:\n types[i] = 'year'\n elif test.dtypes[i].name[0:6] == 'object':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if j != 0 and not(re.search(r'\\d+[/-]\\d+[/-]\\d+', j)):\n types[i] = 'varchar'\n break\n else:\n types[i] = 'date'\n \n name = path.rsplit('/', 1)[-1][:-4]\n self.table_info(name, x, types)\n self.import_method = methods_of_import[2] # = 'csv'\n\n self.show_csv_info()", "def load_from_csv(self):\n\n self._logger.info('Reading data coming from CSV files')\n\n sta = self.stations\n\n if sta != None:\n msta = \", \".join(sta)\n self._logger.debug('Using only stations {0}'.format(msta))\n\n # load the data\n v = list(self.variables)\n v.append('metadata')\n for i in v:\n if i in self.dataConfig:\n\n self._logger.debug('Reading %s...' % self.dataConfig[i])\n if i == 'metadata':\n dp_final = pd.read_csv(self.dataConfig[i],\n index_col='primary_id')\n #Ensure all stations are all caps.\n dp_final.index = [s.upper() for s in dp_final.index]\n\n elif self.dataConfig[i]:\n dp_full = pd.read_csv(self.dataConfig[i],\n index_col='date_time',\n parse_dates=[0])\n dp_full.columns = [s.upper() for s in dp_full.columns]\n\n if sta is not None:\n\n data_sta = dp_full.columns.str.upper()\n\n # Grab IDs from user list thats also in Data\n self.stations = [s for s in data_sta if s in sta]\n dp = dp_full[dp_full.columns[(data_sta).isin(sta)]]\n\n else:\n dp = dp_full\n\n # Only get the desired dates\n dp_final = dp[self.start_date:self.end_date]\n\n if dp_final.empty:\n raise Exception(\"No CSV data found for {0}\"\n \"\".format(i))\n\n setattr(self, i, dp_final)", "def load_columns(self, csv_data):\n column_date = []\n column_time = []\n column_hold = []\n column_outcome = []\n for row in dataframe_to_rows(csv_data, index=False):\n cell_date = row[18]\n cell_date = cell_date.split(': ')[1]\n cell_time = row[23]\n cell_hold = row[24]\n cell_outcome = row[25]\n column_date.append(cell_date)\n column_time.append(cell_time)\n column_hold.append(cell_hold)\n column_outcome.append(cell_outcome)\n return column_date, column_time, column_hold, column_outcome", "def read_trajectory(self, data_name):\r\n # read in CSV\r\n data = pd.read_csv(data_name)\r\n\r\n # pull out columns\r\n times = data[\"Time\"].to_numpy()\r\n dist = data[\"Distance\"].to_numpy()\r\n\r\n # set the minimum time, for adding back at the end\r\n self.mint = np.min(times)\r\n times = times-self.mint\r\n\r\n # find the ending time for cutoffs\r\n self.end_time = times[-1]\r\n\r\n # create spline\r\n self.trajectory = UnivariateSpline(times, dist)", "def read_recorded(self):\n reader=csv.reader(open(self._filename,\"rb\"),delimiter=',')\n self._headers = reader.next()\n x=list(reader)\n recorded_positions=np.array(x).astype('float')\n self.refine_pos(recorded_positions)\n self._joint_first = recorded_positions[0]\n self._joint_last = recorded_positions[-1]" ]
[ "0.64431447", "0.63257873", "0.6152354", "0.6150834", "0.61482006", "0.61410695", "0.61265177", "0.61201715", "0.6092382", "0.5975094", "0.5948267", "0.5919354", "0.5901084", "0.58961165", "0.58782846", "0.5832039", "0.5829291", "0.58229786", "0.5819973", "0.57776815", "0.5777328", "0.5762455", "0.5753525", "0.57468224", "0.57361263", "0.57134336", "0.56961846", "0.5689045", "0.568856", "0.5688316" ]
0.67390954
0
Read in the time and flux from a pandas h5 file The filename must point to a h5 dataframe file. The dataframe should have at least two columns, "time" and "flux". Optionally, an additional "pulse_number" column can exist, if the pulse_number is specified, only data matching the requested pulse number will be loaded.
def from_h5(cls, filename, pulse_number=None): df = pd.read_hdf(filename) return cls._sort_and_filter_dataframe(df, pulse_number)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(file_path):\n with h5py.File(file_path) as f:\n # load meta info\n fs, channels, p_names, signals = _get_info(f)\n\n # load raw data\n data = [f['protocol{}/raw_data'.format(k + 1)][:] for k in range(len(p_names))]\n df = pd.DataFrame(np.concatenate(data), columns=channels)\n\n # load signals data\n signals_data = [f['protocol{}/signals_data'.format(k + 1)][:] for k in range(len(p_names))]\n df_signals = pd.DataFrame(np.concatenate(signals_data), columns=['signal_'+s for s in signals])\n df = pd.concat([df, df_signals], axis=1)\n\n # load timestamps\n if 'timestamp' in df:\n timestamp_data = [f['protocol{}/timestamp_data'.format(k + 1)][:] for k in range(len(p_names))]\n df['timestamps'] = np.concatenate(timestamp_data)\n\n # events data\n events_data = [f['protocol{}/mark_data'.format(k + 1)][:] for k in range(len(p_names))]\n df['events'] = np.concatenate(events_data)\n\n # set block names and numbers\n df['block_name'] = np.concatenate([[p]*len(d) for p, d in zip(p_names, data)])\n df['block_number'] = np.concatenate([[j + 1]*len(d) for j, d in enumerate(data)])\n return df, fs, channels, p_names", "def FromH5(self,h5File=None):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n\r\n if h5File == None:\r\n h5File=self.h5File\r\n\r\n #Check if h5File exists\r\n if not os.path.exists(h5File): \r\n logStrFinal=\"{0:s}{1:s}: Not Existing!\".format(logStr,h5File) \r\n raise XmError(logStrFinal) \r\n \r\n try:\r\n self.dataFrames={} \r\n with pd.HDFStore(h5File) as h5Store:\r\n h5Keys=sorted(h5Store.keys())\r\n for h5Key in h5Keys:\r\n match=re.search('(/)(\\w+$)',h5Key)\r\n key=match.group(2)\r\n logger.debug(\"{0:s}{1:s}: Reading h5Key {2:s} to tableName {3:s}.\".format(logStr,h5File,h5Key,key)) \r\n self.dataFrames[key]=h5Store[h5Key]\r\n \r\n\r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n \r\n finally:\r\n h5Store.close()\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))", "def read_logger_hdf5(filename):\n\n with pd.HDFStore(filename, mode=\"r\") as store:\n datasets = store.keys()\n\n df = pd.read_hdf(filename, key=datasets[0], start=0, stop=36000)\n t = (df.index - df.index[0]).total_seconds().values.round(3)\n df = df.reset_index()\n df.index = t\n\n return df", "def read_lh5(in_file, key=None, cols=None, ilo=0, ihi=None):\n if \".lh5\" not in in_file:\n print(\"Error, unknown file:\", in_file)\n exit()\n \n # open the file in context manager to avoid weird crashes \n t_start = time.time()\n with h5py.File(os.path.expanduser(in_file)) as hf:\n \n header = get_lh5_header(f_lh5, verbose=False)\n\n # pick off first table by default, or let the user specify the name\n table = list(header.keys())[0] if key is None else key\n df_hdr = header[table] \n \n # this function reads the Table into memory\n df = read_table(table, hf, df_hdr, ilo, ihi)\n\n # t_elapsed = time.time() - t_start\n # print(\"elapsed: {t_elapsed:.4f} sec\")\n \n return df", "def read(filename: str) -> orm.Data:\n return from_bands_inspect(load(hdf5_file=filename))", "def _loadHDF5File(self, filename):\n matfile = h5py.File(filename)\n\n self.StokesI = np.transpose(matfile['StokesI'][:,:])\n self.StokesQ = np.transpose(matfile['StokesQ'][:,:])\n self.StokesU = np.transpose(matfile['StokesU'][:,:])\n self.StokesV = np.transpose(matfile['StokesV'][:,:])\n self.detectorPosition = matfile['detectorPosition'][:,0]\n self.detectorDirection = matfile['detectorDirection'][:,0]\n self.detectorVisang = matfile['detectorVisang'][0,0]\n\n try: self.wall = matfile['wall'][:,:]\n except KeyError: pass\n\n try: self.separatrix = matfile['separatrix'][:,:]\n except KeyError: pass", "def load_h5py_file(fname, offsets = [0, 0, 0]):\n # Load the data\n f = h5py.File(fname, 'r') # r for read only\n print(\"Available fields: \", list(f.keys())) # f is a dictionary. Let's look at the keys\n\n # Create variables from loaded dictionary\n neural_data = f['ripple_data'][:,0:32]\n emg_data = f['ripple_data'][:,32:]\n force_data = f['data'][0:6,:].transpose()\n fs = f['mySampleRate'][:]\n\n # Transform matrix for force data\n TF = [[1.117\t, -0.096747,\t 1.7516, 0.03441, -0.88072, 0.042127, -0.89026],\n [0.3134, 0.0041349, 0.0045219, -0.055942, 1.5273, 0.037719,-1.5227],\n [0.135\t, 1.4494, -0.061075, 1.6259, 0.083867, 1.5999, 0.0058155]]\n TF = np.array(TF)\n\n # Read force data\n force_data = np.concatenate((np.ones((len(force_data),1)), force_data), axis=1)\n force_data = force_data @ TF.transpose()\n\n # Make baseband zero\n force_data[:,0] = force_data[:,0] - offsets[0]\n force_data[:,1] = force_data[:,1] - offsets[1]\n force_data[:,2] = force_data[:,2] - offsets[2]\n\n # Use sent and received pulse signals to allign DAQ and RIPPLE data\n pulse_sent = f['data'][6,:].transpose()\n ps_ind, = np.nonzero(pulse_sent>1)\n ps_ind = ps_ind[0]\n\n pulse_received = f['ttl_data'][:,0]\n pr_ind, = np.nonzero(pulse_received>2000)\n pr_ind = pr_ind[0]\n\n p_diff = ps_ind - pr_ind\n\n # Align data\n if p_diff > 0:\n pulse_sent = np.concatenate((pulse_sent[p_diff:], np.zeros((p_diff,))), axis=0)\n trailing = np.mean(force_data[-int(fs*0.1):], axis=0) * np.ones((p_diff,1))\n force_data = np.concatenate((force_data[p_diff:,:], trailing))\n else:\n pulse_sent = np.concatenate((np.zeros((-p_diff,)), pulse_sent[:p_diff]), axis=0)\n leading = np.mean(force_data[:int(fs * 0.1)], axis=0) * np.ones((-p_diff, 1))\n force_data = np.concatenate((leading, force_data[:p_diff,:]))\n\n # Choose force channel for analysis\n force_data = force_data[:,1]\n force_data = -force_data # Invert the sign (increased as applied force increased)\n\n # Choose EMG data\n emg_data = emg_data[:,(5,15)]-emg_data[:,(23,25)]\n\n # Re-order EMG data so that 1. Dorsal 2. Biceps 3. Ventral 4. Triceps\n positions3 = (0,1)\n emg_data = emg_data[:,positions3]\n\n # Corresponding time vectors\n time = f['ripple_time'][:]\n return neural_data, emg_data, force_data, time, fs", "def load_h5(filename: str, **kwargs):\n return open_h5(filename, 'r', **kwargs)", "def from_file(cls, filename, pulse_number=None):\n if \"h5\" in filename:\n return cls.from_h5(filename, pulse_number)\n else:\n return cls.from_csv(filename, pulse_number)", "def load_dataset(path):\n if '.h5' in str(path):\n dataframe = pd.read_hdf(path)\n elif '.pkl' in str(path):\n dataframe = pd.read_pickle(path)\n else:\n print('Wrong file')\n sys.exit()\n\n # Make it multiindex\n dataframe['event'] = dataframe.index\n dataframe = dataframe.set_index(['sample_nr', 'event'])\n dataframe = dataframe.reset_index('event', drop=True)\n dataframe = dataframe.set_index(dataframe.groupby(level=0).cumcount().rename('event'), append=True)\n\n return dataframe", "def read_uvh5(\n self,\n filename,\n antenna_nums=None,\n antenna_names=None,\n ant_str=None,\n bls=None,\n frequencies=None,\n freq_chans=None,\n times=None,\n time_range=None,\n polarizations=None,\n blt_inds=None,\n keep_all_metadata=True,\n read_data=True,\n data_array_dtype=np.complex128,\n multidim_index=False,\n background_lsts=True,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n strict_uvw_antpos_check=False,\n ):\n if not os.path.exists(filename):\n raise IOError(filename + \" not found\")\n\n # open hdf5 file for reading\n with h5py.File(filename, \"r\") as f:\n # extract header information\n header = f[\"/Header\"]\n self._read_header(\n header,\n filename,\n run_check_acceptability=run_check_acceptability,\n background_lsts=background_lsts,\n )\n\n if not read_data:\n # don't read in the data. This means the object is incomplete,\n # but that may not matter for many purposes.\n return\n\n # Now read in the data\n dgrp = f[\"/Data\"]\n self._get_data(\n dgrp,\n antenna_nums,\n antenna_names,\n ant_str,\n bls,\n frequencies,\n freq_chans,\n times,\n time_range,\n polarizations,\n blt_inds,\n data_array_dtype,\n keep_all_metadata,\n multidim_index,\n run_check,\n check_extra,\n run_check_acceptability,\n strict_uvw_antpos_check,\n )\n\n return", "def h5ToDf(filename):\n log.info(f\"Import data from: {filename}\")\n with h5py.File(filename, \"r\") as hf :\n d = {}\n for name in list(hf.keys()):\n d[name] = np.array(hf[name][:])\n df = pd.DataFrame(data=d)\n return(df)", "def h5ToDf(filename):\n log.info(f\"Import data from: {filename}\")\n with h5py.File(filename, \"r\") as hf :\n d = {}\n for name in list(hf.keys()):\n d[name] = np.array(hf[name][:])\n df = pd.DataFrame(data=d)\n return(df)", "def HDF5_to_dataframe(self, **kwds):\n # compile regular expression operator for extracting info from ICESat2 files\n rx = re.compile(r'(processed_)?(ATL\\d+)(-\\d{2})?_(\\d{4})(\\d{2})(\\d{2})'\n r'(\\d{2})(\\d{2})(\\d{2})_(\\d{4})(\\d{2})(\\d{2})_(\\d{3})_(\\d{2})(.*?).h5$')\n # split extension from HDF5 file\n # extract parameters from ICESat2 HDF5 file\n if isinstance(self.filename, str):\n # extract parameters from ICESat2 HDF5 file\n SUB,PRD,HEM,YY,MM,DD,HH,MN,SS,TRK,CYCL,GRAN,RL,VERS,AUX = \\\n rx.findall(os.path.basename(self.filename)).pop()\n else:\n SUB,PRD,HEM,YY,MM,DD,HH,MN,SS,TRK,CYCL,GRAN,RL,VERS,AUX = \\\n rx.findall(os.path.basename(self.filename.filename)).pop()\n\n # copy bare minimum variables from the HDF5 file to pandas data frame\n source = h5py.File(self.filename,mode='r')\n\n # find valid beam groups by testing for particular variables\n if (PRD == 'ATL06'):\n VARIABLE_PATH = ['land_ice_segments','segment_id']\n elif (PRD == 'ATL07'):\n VARIABLE_PATH = ['sea_ice_segments','height_segment_id']\n elif (PRD == 'ATL08'):\n VARIABLE_PATH = ['land_segments','segment_id_beg']\n elif (PRD == 'ATL10'):\n VARIABLE_PATH = ['freeboard_beam_segments','delta_time']\n elif (PRD == 'ATL12'):\n VARIABLE_PATH = ['ssh_segments','delta_time']\n # create list of valid beams within the HDF5 file\n beams = []\n for gtx in [k for k in source.keys() if bool(re.match(r'gt\\d[lr]',k))]:\n # check if subsetted beam contains data\n try:\n source['/'.join([gtx,*VARIABLE_PATH])]\n except KeyError:\n pass\n else:\n beams.append(gtx)\n\n # for each valid beam within the HDF5 file\n frames = []\n gt = dict(gt1l=10,gt1r=20,gt2l=30,gt2r=40,gt3l=50,gt3r=60)\n for gtx in sorted(beams):\n # set variable parameters to read for specific products\n if (PRD == 'ATL06'):\n # land ice height\n var = source[gtx]['land_ice_segments']\n valid, = np.nonzero(var['h_li'][:] != var['h_li'].fillvalue)\n # variables for the output dataframe\n vnames = ['segment_id','delta_time','latitude','longitude',\n 'h_li','h_li_sigma','atl06_quality_summary',\n 'fit_statistics/dh_fit_dx',\n 'fit_statistics/dh_fit_dy',\n 'fit_statistics/dh_fit_dx_sigma',\n 'fit_statistics/n_fit_photons',\n 'fit_statistics/h_expected_rms',\n 'fit_statistics/h_robust_sprd',\n 'fit_statistics/w_surface_window_final']\n elif (PRD == 'ATL07'):\n # sea ice height\n var = source[gtx]['sea_ice_segments']\n valid, = np.nonzero(var['heights/height_segment_quality'][:] == 1)\n # variables for the output ascii file\n vnames = ['height_segment_id','seg_dist_x','delta_time',\n 'latitude','longitude',\n 'heights/height_segment_height',\n 'heights/height_segment_confidence',\n 'heights/height_segment_type',\n 'heights/height_segment_ssh_flag',\n 'heights/height_segment_w_gaussian',\n 'stats/photon_rate','stats/cloud_flag_asr',\n 'geophysical/height_segment_lpe',\n 'geophysical/height_segment_mss',\n 'geophysical/height_segment_ocean',\n 'geophysical/height_segment_ib']\n elif (PRD == 'ATL08'):\n # land and vegetation height\n var = source[gtx]['land_segments']\n valid, = np.nonzero(var['terrain/h_te_best_fit'][:] !=\n var['terrain/h_te_best_fit'].fillvalue)\n # variables for the output dataframe\n vnames = ['segment_id_beg','segment_id_end','delta_time',\n 'latitude','longitude','brightness_flag','layer_flag',\n 'msw_flag','night_flag','terrain_flg','urban_flag',\n 'segment_landcover','segment_snowcover','segment_watermask',\n 'terrain/h_te_best_fit','terrain/h_te_uncertainty',\n 'terrain/terrain_slope','terrain/n_te_photons',\n 'canopy/h_canopy','canopy/h_canopy_uncertainty',\n 'canopy/canopy_flag','canopy/n_ca_photons']\n # create a dictionary of valid output segment values\n data = {}\n # convert data to numpy array for backwards HDF5 compatibility\n for v in vnames:\n values = np.copy(var[v][:])\n data[posixpath.basename(v)] = values[valid]\n # Generate Time Column\n delta_time = (data['delta_time']*1e9).astype('timedelta64[ns]')\n data['time'] = pandas.to_datetime(self.atlas_sdp_epoch+delta_time)\n # copy filename parameters\n data['rgt'] = np.array([int(TRK)]*len(valid))\n data['cycle'] = np.array([int(CYCL)]*len(valid))\n data['gt'] = np.array([gt[gtx]]*len(valid))\n # calculate global reference point\n if PRD in ('ATL06','ATL07','ATL08'):\n data['global_ref_pt'] = 6*1387*data[VARIABLE_PATH[-1]] + \\\n 6*(data['rgt']-1) + (data['gt']/10)\n # copy beam-level attributes\n attrs = ['groundtrack_id','atlas_spot_number','atlas_beam_type',\n 'sc_orientation','atmosphere_profile','atlas_pce']\n for att_name in attrs:\n att_val=self.attributes_encoder(source[gtx].attrs[att_name])\n data[att_name] = [att_val]*len(valid)\n # pandas dataframe from compiled dictionary\n frames.append(pandas.DataFrame.from_dict(data))\n # return the concatenated pandas dataframe\n return pandas.concat(frames)", "def read_file(fname: str) -> pd.DataFrame:\n raw_data = (\n pd.read_hdf(fname).to_frame().reset_index(level=[0, 1]).loc[ANALYSIS_DATE]\n )\n raw_data[\"date\"] = raw_data.index\n return raw_data", "def get_frames_rep_hdf5(file_hdf5, time_hdf5, filename, time_begin_s, time_end_s):\n get_features = Features_Accessor(time_hdf5, file_hdf5).get_features_from_raw\n return get_features(dict({'file': [filename], 'onset': [time_begin_s], 'offset': [time_end_s]}))[1]", "def load_h5(fname, surfmap=True):\n filenames = glob.glob(fname)\n print(\"Files found: {}\".format(filenames))\n fin = h5py.File(filenames[0])\n meas = fin['measurement0'] # Wavefront data located in 'measurement0'\n opdsets = meas['genraw']\n wvl = opdsets.attrs['wavelength'][:]\n wvl = float(wvl[:-3])\n # Get the x pixel spacing\n try:\n iscale = float(opdsets.attrs['xpix'][:-3])\n except TypeError:\n iscale = 0.0\n print(\"No Calibration Dimensioning Found in H5 file\")\n # Return either surface map or fringe map\n if surfmap is True:\n data = np.asarray(opdsets['data'])\n data[data > 1e10] = np.nan # Eliminates \"bad\" data sets to NAN\n data *= wvl * mask_data(filenames[0])\n else:\n data = np.asarray(meas['reserve_interferogram']['frame4']['data'])\n return data, wvl, iscale", "def load_h5_file(file_path):\n # load\n fr = h5py.File(file_path, 'r')\n a_group_key = list(fr.keys())[0]\n data = list(fr[a_group_key])\n # transform to appropriate numpy array \n data=data[0:]\n data = np.stack(data, axis=0)\n return data", "def load_h5_file(file_path):\n # load\n fr = h5py.File(file_path, 'r')\n a_group_key = list(fr.keys())[0]\n data = list(fr[a_group_key])\n # transform to appropriate numpy array \n data=data[0:]\n data = np.stack(data, axis=0)\n return data", "def parse_hdf5(inp, close=True, **kwargs):\n import json\n import h5py\n # Path\n path = kwargs.pop('path', '/')\n # Open\n if isinstance(inp, basestring):\n hdf5 = h5py.File(inp, 'r')\n else:\n hdf5 = inp\n # Data\n data = hdf5[path+'data'][()]\n # Meta\n if 'meta' in hdf5[path].keys():\n meta = json.loads(hdf5[path+'meta'][()])\n # Headers\n for jj,heads in enumerate(meta['headers']):\n try:\n meta['headers'][jj] = fits.Header.fromstring(meta['headers'][jj])\n except TypeError: # dict\n if not isinstance(meta['headers'][jj], dict):\n raise IOError(\"Bad meta type\")\n else:\n meta = None\n # Units\n units = json.loads(hdf5[path+'units'][()])\n for key,item in units.items():\n if item == 'dimensionless_unit':\n units[key] = u.dimensionless_unscaled\n else:\n units[key] = getattr(u, item)\n # Other arrays\n try:\n sig = data['sig']\n except (NameError, IndexError):\n sig = None\n try:\n co = data['co']\n except (NameError, IndexError):\n co = None\n # Finish\n if close:\n hdf5.close()\n return XSpectrum1D(data['wave'], data['flux'], sig=sig, co=co,\n meta=meta, units=units, **kwargs)", "def read_hdf5(path_to_file):\n\n print(\"\\nReading HDF5 file: \", path_to_file)\n file = h5py.File(path_to_file, 'r')\n\n # List the groups\n groups = list(file.keys())\n print(\"Groups available: \", groups)\n\n # Read Zemax Metadata\n zemax_metadata = {}\n print(\"\\nZemax Metadata:\")\n for key in file['Zemax Metadata'].attrs.keys():\n print('{} : {}'.format(key, file['Zemax Metadata'].attrs[key]))\n zemax_metadata[key] = file['Zemax Metadata'].attrs[key]\n\n # Read the analysis groups\n for group_name in groups:\n if group_name != 'Zemax Metadata':\n analysis_group = file[group_name]\n print('\\nAnalysis: ', group_name)\n # For each Analysis Group we loop over subgroups\n for subgroup_key in analysis_group.keys():\n subgroup = analysis_group[subgroup_key]\n print('Subgroup #', subgroup_key)\n # List the metadata of the subgroup\n for att_key in subgroup.attrs.keys():\n print(' {} : {}'.format(att_key, subgroup.attrs[att_key]))\n\n file.close()\n\n return zemax_metadata", "def _readHDF5(self):\n\n h5 = h5py.File(self.pointInputFile, 'r')\n self.coords = h5['geometry/vertices'][:]\n self.stations = h5['stations'][:]\n self.dispRaw = h5['vertex_fields/displacement'][self.timeStep,:,:]\n h5.close()\n\n self.numStations = self.coords.shape[0]\n\n return", "def read_hdf5(file_path):\n if not os.path.exists(file_path):\n logging.fatal(\"Cannot read feature file {}.\".format(file_path))\n exit()\n hdf5_file = h5py.File(file_path, 'r')\n data = np.array(hdf5_file['data'])\n hdf5_file.close()\n\n return data", "def load_data(infile, nstep): \n \n f = h5py.File(infile, 'r')\n \n edges_grp = f['edges']\n xedges = np.asarray(edges_grp['x'][nstep], dtype=float)\n yedges = np.asarray(edges_grp['y'][nstep], dtype=float)\n\n time = np.asarray(f['time'][nstep])\n\n tables_grp = f['tables']\n rho_hist = np.asarray(tables_grp['rho'][nstep], dtype=float)\n vx_hist = np.asarray(tables_grp['vx'][nstep], dtype=float)\n vy_hist = np.asarray(tables_grp['vy'][nstep], dtype=float)\n vorticity = np.asarray(tables_grp['vorticity'][nstep], dtype=float) \n \n box_grp = f['box']\n lx = box_grp['x'][...]\n ly = box_grp['y'][...]\n \n #nsteps = f['nsteps'][...]\n f.close()\n\n return lx, ly, time, xedges, yedges, rho_hist, vx_hist, vy_hist, vorticity", "def load_measurement(file_name, mask_df=None, shift=None):\n # Load ground-truth data\n df = pd.read_hdf(file_name, 'data')\n # Convert to dataframe if necessary\n if not isinstance(df, pd.DataFrame):\n df = df.to_frame()\n # Replace multiindex with start_date, lat, lon columns if necessary\n if isinstance(df.index, pd.core.index.MultiIndex):\n df.reset_index(inplace=True)\n if mask_df is not None:\n # Restrict output to requested lat, lon pairs\n df = subsetmask(df, mask_df)\n # Return dataframe with desired shift\n return shift_df(df, shift=shift, date_col='start_date', groupby_cols=['lat', 'lon'])", "def read_hdf5(filename, namelist=None, **kwargs):\n\n print('Reading %s...'%filename)\n\n fid = h5py.File(filename, mode='r')\n \n data = read_hdf5_tree(fid, namelist, **kwargs)\n\n fid.close()\n \n print('Finished reading %s.'%filename)\n return data", "def read_hdf5(self, file_name,\r\n projections_start=None,\r\n projections_end=None,\r\n projections_step=None,\r\n slices_start=None,\r\n slices_end=None,\r\n slices_step=None,\r\n pixels_start=None,\r\n pixels_end=None,\r\n pixels_step=None,\r\n white_start=None,\r\n white_end=None,\r\n dark_start=None,\r\n dark_end=None,\r\n dtype='float32'):\r\n print \"Reading data...\"\r\n self.file_name = file_name\r\n\r\n # Initialize f to null.\r\n f = None\r\n\r\n # Get the file_name in lower case.\r\n lFn = file_name.lower()\r\n\r\n # Split the string with the delimeter '.'\r\n end = lFn.split('.')\r\n\r\n # If the string has an extension.\r\n if len(end) > 1:\r\n # Check.\r\n if end[len(end) - 1] == 'h5' or end[len(end) - 1] == 'hdf':\r\n f = Hdf5()\r\n\r\n # If f != None the call read on it.\r\n if not f == None:\r\n # Read data from exchange group.\r\n self.data = f.read(file_name,\r\n array_name='exchange/data',\r\n x_start=projections_start,\r\n x_end=projections_end,\r\n x_step=projections_step,\r\n y_start=slices_start,\r\n y_end=slices_end,\r\n y_step=slices_step,\r\n z_start=pixels_start,\r\n z_end=pixels_end,\r\n z_step=pixels_step).astype(dtype)\r\n\r\n # Read white field data from exchange group.\r\n print white_start, white_end, slices_start, slices_end\r\n self.white = f.read(file_name,\r\n array_name='exchange/data_white',\r\n x_start=white_start,\r\n x_end=white_end,\r\n y_start=slices_start,\r\n y_end=slices_end,\r\n y_step=slices_step,\r\n z_start=pixels_start,\r\n z_end=pixels_end,\r\n z_step=pixels_step).astype(dtype)\r\n\r\n # Read dark field data from exchange group.\r\n self.dark = f.read(file_name,\r\n array_name='exchange/data_dark',\r\n x_start=dark_start,\r\n x_end=dark_end,\r\n y_start=slices_start,\r\n y_end=slices_end,\r\n y_step=slices_step,\r\n z_start=pixels_start,\r\n z_end=pixels_end,\r\n z_step=pixels_step).astype(dtype)\r\n\r\n # Assign the rotation center.\r\n self.center = self.data.shape[2] / 2\r\n else:\r\n print 'Unsupported file.'", "def read_h5file(self, fname, datasetname):\n with h5py.File(fname, 'r') as f:\n atom_pos = f.get(datasetname + '/r').value # atom position -> N x 3 array\n ion_list = f.get(\n datasetname + '/xyz').value # length = N, contain atom type id for each atom\n self.atom_pos = atom_pos[np.argsort(ion_list)]\n _, idx = np.unique(np.sort(ion_list), return_index=True)\n self.split_idx = np.append(idx, [len(ion_list)])\n\n # get atom factor table, sorted by atom type id\n atom_type = f.get(\n datasetname + '/T').value # atom type array, each type is represented by an integer\n self.num_atom_types = len(atom_type)\n ff_table = f.get(datasetname + '/ff').value\n self.ff_table = ff_table[np.argsort(atom_type)]\n\n self.q_sample = f.get(datasetname + '/halfQ').value\n self.num_q_samples = len(self.q_sample)\n self.compton_q_sample = f.get(datasetname + '/Sq_halfQ').value\n self.num_compton_q_samples = len(self.compton_q_sample)\n self.sBound = f.get(datasetname + '/Sq_bound').value\n self.nFree = f.get(datasetname + '/Sq_free').value", "def load_measurement(file_name, mask_df=None, shift=None):\r\n # Load ground-truth data\r\n df = pd.read_hdf(file_name, 'data')\r\n\r\n # Convert to dataframe if necessary\r\n if not isinstance(df, pd.DataFrame):\r\n df = df.to_frame()\r\n # Replace multiindex with start_date, lat, lon columns if necessary\r\n if isinstance(df.index, pd.MultiIndex):\r\n df.reset_index(inplace=True)\r\n if mask_df is not None:\r\n # Restrict output to requested lat, lon pairs\r\n df = subsetmask(df, mask_df)\r\n\r\n # Return dataframe with desired shift\r\n return shift_df(df, shift=shift, date_col='start_date', groupby_cols=['lat', 'lon'])", "def openMCSH5File(filename, verbose=False):\n rf = h5py.File(filename, 'r')\n \n stream = rf.require_group('/Data/Recording_0/AnalogStream/Stream_0')\n data = np.array(stream.get('ChannelData'),dtype=np.int)\n timestamps = np.array(stream.get('ChannelDataTimeStamps'))\n info = np.array(stream.get('InfoChannel'))\n \n Unit = info['Unit'][0]\n Tick = info['Tick'][0]/1e6\n exponent = info['Exponent'][0]\n convFact = info['ConversionFactor'][0]\n \n nRecCh, nFrames = data.shape\n channel_ids = info['ChannelID']\n assert len(np.unique(channel_ids)) == len(channel_ids), 'Duplicate MCS channel IDs found'\n electrodeLabels = info['Label']\n \n TimeVals = np.arange(timestamps[0][0],timestamps[0][2]+1,1)*Tick\n \n assert Unit==b'V', 'Unexpected units found, expected volts, found {}'.format(Unit.decode('UTF-8'))\n data_V = data*convFact.astype(float)*(10.0**(exponent))\n \n timestep_avg = np.mean(TimeVals[1:]-TimeVals[0:-1])\n timestep_std = np.std(TimeVals[1:]-TimeVals[0:-1])\n timestep_min = np.min(TimeVals[1:]-TimeVals[0:-1])\n timestep_max = np.min(TimeVals[1:]-TimeVals[0:-1])\n assert all(np.abs(np.array((timestep_min, timestep_max))-timestep_avg)/timestep_avg < 1e-6), 'Time steps vary by more than 1 ppm'\n samplingRate = 1./timestep_avg\n\n if verbose:\n print('# MCS H5 data format')\n print('#')\n print('# File: {}'.format(rf.filename))\n print('# File size: {:.2f} MB'.format(rf.id.get_filesize()/1024**2))\n print('#')\n for key in rf.attrs.keys():\n print('# {}: {}'.format(key,rf.attrs[key]))\n print('#')\n print('# Signal range: {:.2f} to {:.2f} µV'.format(np.amin(data_V)*1e6,np.amax(data_V)*1e6))\n print('# Number of channels: {}'.format(nRecCh))\n print('# Number of frames: {}'.format(nFrames))\n print('# Time step: {:.2f} µs ± {:.5f} % (range {} to {})'.format(timestep_avg*1e6, timestep_std/timestep_avg*100, timestep_min*1e6, timestep_max*1e6))\n print('# Sampling rate: {:.2f} Hz'.format(samplingRate))\n print('#')\n print('# MCSH5RecordingExtractor currently only reads /Data/Recording_0/AnalogStream/Stream_0')\n\n return (rf, nFrames, samplingRate, nRecCh, channel_ids, electrodeLabels, exponent, convFact)" ]
[ "0.682035", "0.6719041", "0.6556243", "0.6371989", "0.635916", "0.635621", "0.6349493", "0.6265173", "0.6226241", "0.61761767", "0.6165642", "0.6137056", "0.6137056", "0.60813636", "0.60271335", "0.60026705", "0.59815794", "0.59458834", "0.59458834", "0.591166", "0.5883735", "0.5876447", "0.5847416", "0.5829509", "0.5825623", "0.58101064", "0.5788034", "0.576465", "0.57562757", "0.5709598" ]
0.7634437
0
Send Grafana annotations to various endpoints
def main(annotate_uri, api_key, title, tags, description, start_time, end_time, debug): log_level = logging.INFO if debug: log_level = logging.DEBUG logging.basicConfig(format=' [%(levelname)s] %(message)s', level=log_level) try: if description is None: if not sys.stdin.isatty(): description = "".join([line for line in iter(sys.stdin.readline, '')]) else: description = "" this_annotation = Annotation(title, tags, description, start_time, end_time) result = this_annotation.send(annotate_uri, api_key) if result['event_data']: logging.debug(result['event_data']) if result['message']: logging.info(result['message']) except Exception as e: logging.exception(e) """ We could exit 1 here but we really don't want to cause a job to fail just because we couldn't send an event. """ sys.exit(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def annotation_request():\n resp = make_response([])\n return jsonify(resp)", "def annotate(api_key, text, ontologies=[], longest_only=False, expand_mappings=False, include=[]):\n annotations = []\n url = BIOPORTAL_API_BASE + '/annotator'\n\n headers = {\n 'content-type': \"application/json\",\n 'authorization': \"apikey token=\" + api_key\n }\n\n if len(text) > 0:\n payload = {'text': text,\n 'longest_only': longest_only,\n 'expand_mappings': expand_mappings}\n\n if len(ontologies) > 0:\n payload['ontologies'] = ','.join(ontologies)\n\n if len(include) > 0:\n payload['include'] = ','.join(include)\n\n response = requests.post(url, json=payload, headers=headers, verify=False)\n\n if response.status_code != 200:\n raise Exception('Problem when calling the Annotator: ' + response.text)\n\n\n\n # print(payload)\n # print(response.url)\n # print(response.status_code)\n # print(response.text)\n annotations = json.loads(response.text)\n\n return annotations", "def main():\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',\n level=logging.DEBUG)\n\n usage = '%prog storage_service_url annotation'\n parser = optparse.OptionParser(usage=usage)\n\n args = parser.parse_args()[-1]\n\n if len(args) != 2:\n parser.error(\"Insufficient arguments\")\n\n url = args[0]\n annotation = args[1]\n submit_annotations(url, annotation)", "def add_annotations(self, annotations):\n for annotation in annotations:\n logging.info(\"Annotation received on: '%s'\" % annotation.communication.id)\n self.annotations.extend(annotations)\n return True", "def annotation(self, label, duration):\n self.send_trigger(self.new_trigger('annotation', label, duration))", "def Annotate(self, request, context):\n\n session_id = uuid.uuid4()\n mnemonic = encode(session_id)\n\n try:\n pid = os.getpid()\n self.logger.info(\"Current PID: \" + str(pid))\n payload = parse_payload(request.annotations, request.genes)\n response, check = check_genes(payload=payload)\n self.logger.warning(response)\n\n if check:\n response = start_annotation(session_id=session_id, mnemonic=mnemonic, payload=payload)\n if response:\n url = \"{MOZI_RESULT_URI}/?id={mnemonic}\".format(MOZI_RESULT_URI=MOZI_RESULT_URI, mnemonic=mnemonic)\n return annotation_pb2.AnnotationResponse(result=url)\n else:\n msg = \"an internal error occured. please try again\"\n context.set_details(msg)\n context.set_code(grpc.StatusCode.INTERNAL)\n return annotation_pb2.AnnotationResponse(result=msg)\n else:\n self.logger.warning(\"The following genes were not found in the atomspace %s\", response)\n context.set_details(response)\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n return annotation_pb2.AnnotationResponse(result=response)\n\n except Exception as ex:\n self.logger.exception(traceback.format_exc())\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n context.set_details(\"Error occurred in while trying to perform request: \" + ex.__str__())\n return annotation_pb2.AnnotationResponse(result=\"url\")", "def get_analysis_annotations():\n sample_id = demisto.getArg('id')\n r = req('GET', SUB_API + 'samples/' + sample_id + '/analysis/annotations')\n\n annotations = []\n context_path = 'ThreatGrid.AnalysisResults.Sample.Id.Annotations'\n ec = {context_path: []} # type: ignore\n ips = demisto.get(r.json(), 'data.items.network') # type: ignore\n if ips:\n for k in ips:\n annotation = {\n 'IP': k,\n 'IP.Asn': ips[k].get('asn'),\n 'IP.City': ips[k].get('city'),\n 'IP.Country': ips[k].get('country'),\n 'IP.Org': ips[k].get('org'),\n 'IP.Region': ips[k].get('region'),\n 'IP.Timestamp': ips[k].get('ts')\n }\n annotations.append(annotation)\n ec[context_path].append(annotation)\n\n demisto.results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': r.json(),\n 'EntryContext': ec,\n 'HumanReadable': tableToMarkdown('ThreatGrid - Analysis Annotations', annotations, [\n 'IP', 'IP.Asn', 'IP.City', 'IP.Country', 'IP.Org', 'IP.Region', 'IP.Timestamp'\n ])\n })", "def register_endpoints(api):\n api.add_resource(EventList, '/events')", "def annotation_all_stats(request):\n\n id_report = request.GET.get('report',None)\n language = request.GET.get('language',None)\n\n json_dict = get_annotations_count(id_report,language)\n\n # print('annotations',json_dict)\n return JsonResponse(json_dict)", "def do_POST(self):\n if not self.path.endswith(\"/\"): self.path += \"/\"\n if self.path == \"/annotate/\":\n # Read message\n length = int(self.headers.get('content-length'))\n msg = self.rfile.read(length)\n\n # Do the annotation\n doc = Document()\n parseFromDelimitedString(doc, msg)\n self.annotator.annotate(doc)\n\n with io.BytesIO() as stream:\n writeToDelimitedString(doc, stream)\n msg = stream.getvalue()\n\n # write message\n self.send_response(HTTPStatus.OK)\n self.send_header(\"Content-Type\", \"application/x-protobuf\")\n self.send_header(\"Content-Length\", len(msg))\n self.end_headers()\n self.wfile.write(msg)\n\n else:\n self.send_response(HTTPStatus.BAD_REQUEST)\n self.end_headers()", "def reannotate_trials_api():\n reannotate_trials()\n resp = Response(response=json.dumps({\"success\": True}),\n status=200,\n mimetype=\"application/json\")\n return resp", "def add_annotations(\n viewer,\n annotations,\n layer_nodes_name='nodes',\n layer_edges_name='edges',\n edge_color='white',\n ):\n \n if 'nodes_coords' in annotations.keys():\n add_nodes(viewer, annotations, name=layer_nodes_name)\n if 'edges_coords' in annotations.keys():\n add_edges(viewer, annotations, edge_color=edge_color, name=layer_edges_name)\n return", "def annotate_data(phage_id, file_method):\n UPLOAD_FOLDER = os.path.join(ROOT, 'users', phage_id, 'uploads')\n\n if request.method == \"GET\":\n if file_method == \"check\":\n return jsonify(check_blast_task(phage_id))\n elif file_method == \"blast\":\n return jsonify(add_blast_task(phage_id, UPLOAD_FOLDER))\n elif file_method == \"geneMap\":\n return jsonify(get_map(phage_id, UPLOAD_FOLDER))\n else:\n return jsonify(get_annotations_data(phage_id))\n\n if request.method == \"PUT\":\n return jsonify(add_cds(request, UPLOAD_FOLDER, phage_id))\n\n if request.method == \"POST\":\n return jsonify(share_with(phage_id, file_method))\n\n if request.method == \"OPTIONS\":\n if file_method == \"none\":\n return jsonify(get_settings(phage_id))\n else:\n return jsonify(update_settings(phage_id, file_method))", "def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))", "def index():\n g.data['api_version'] = API_VERSION\n g.data['apilib_version'] = API_VERSION\n g.data['oar_version'] = VERSION\n g.data['links'] = []\n #endpoints = ('resources', 'jobs', 'config', 'admission_rules')\n endpoints = ('resources', 'jobs')\n for endpoint in endpoints:\n g.data['links'].append({\n 'rel': 'collection',\n 'href': url_for('%s.index' % endpoint),\n 'title': endpoint,\n })", "def PostAnnotationsStatus(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def annotationlabel(request,action=None):\n\n username = request.session['username']\n mode1 = request.session['mode']\n auto_required = request.GET.get('ns_id', None)\n mode = NameSpace.objects.get(ns_id=mode1)\n\n # print('mode',mode1)\n usecase = request.session['usecase']\n # language = request.GET.get('language',request.session['language'])\n type = 'labels'\n\n if request.method == 'GET' and action.lower() == 'user_labels':\n\n \"\"\"GET request: given the report, the labels annotated by the user are returned\"\"\"\n\n language = request.GET.get('language', request.session['language'])\n user_get = request.GET.get('username',username)\n report_id = request.GET.get('report_id')\n report1 = Report.objects.get(id_report = report_id,language = language)\n # if auto_required == 'Robot':\n # mode = NameSpace.objects.get(ns_id=auto_required)\n if auto_required is not None:\n mode_1 = NameSpace.objects.get(ns_id=auto_required)\n else:\n mode_1 = mode\n json_dict = get_user_gt(user_get,mode_1,report1,language,'labels')\n return JsonResponse(json_dict,safe=False)\n\n elif request.method == 'GET' and action.lower() == 'all_labels':\n\n \"\"\" GET request: given the use case, all the labels associated to that usecase are returned. \"\"\"\n\n labels = AnnotationLabel.objects.filter(name=usecase).values('seq_number','label','annotation_mode')\n print(labels)\n json_dict = {}\n if len(labels) > 0:\n\n if mode1 == 'Human' or auto_required == 'Human':\n json_dict['labels'] = []\n for el in labels:\n json_val = {}\n if 'Manual' in el['annotation_mode']:\n # if int(el['seq_number']) > count: # i primi 20 sono inseriti automaticamente\n json_val['label'] = (el['label'])\n json_val['seq_number'] = (el['seq_number'])\n json_dict['labels'].append(json_val)\n if mode1 == 'Robot' or auto_required == 'Robot':\n json_dict['labels'] = []\n for el in labels:\n json_val = {}\n if 'Automatic' in el['annotation_mode']:\n json_val['label'] = (el['label'])\n json_val['seq_number'] = (el['seq_number'])\n json_dict['labels'].append(json_val)\n\n else:\n json_dict['labels'] = []\n\n json_dict['labels'] = sorted(json_dict['labels'], key=lambda json: json['seq_number'])\n print(json_dict)\n return JsonResponse(json_dict)\n\n elif request.method == 'POST' and action.lower() == 'delete':\n\n \"\"\"PSOT request: given the report, the labels the user annotated are removed together with the\n associated groundtruth.\"\"\"\n\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n user = User.objects.get(username=username,ns_id=mode)\n language = request.GET.get('language', request.session['language'])\n report1 = Report.objects.get(id_report=report_id,language = language)\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting parameters.'}\n return json_response\n to_del = Associate.objects.filter(username=user, ns_id=mode, id_report=report1, language=language)\n if mode1 == 'Human':\n try:\n with transaction.atomic():\n\n if to_del.exists():\n json_response = delete_all_annotation(to_del, user, report1,language, type,mode)\n\n else:\n json_response = {'msg':'nothing to do'}\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred saving the ground_truth and the labels'}\n return JsonResponse(json_response)\n else:\n return JsonResponse(json_response)\n else:\n json_response = restore_robot_annotation(report1, 'labels', user)\n return JsonResponse(json_response)\n\n\n if request.method == 'POST' and action.lower() == 'insert':\n\n \"\"\"PSOT request: given the report, the labels the user annotated are added in the database and a new \n JSON groundtruth is created. \"\"\"\n\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n user = User.objects.get(username=username,ns_id=mode)\n language = request.GET.get('language', request.session['language'])\n report1 = Report.objects.get(id_report=report_id,language = language)\n\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting the parameters.'}\n return JsonResponse(json_response)\n\n labels_to_save = request_body_json['labels']\n # In this case the user manually deletes all the labels (NOT WITH CLEAR BUTTON) and saves.\n if len(labels_to_save) == 0 and mode1 == 'Human':\n\n \"\"\"If there are not labels to save, if there is a ground truth saved in the database, this is removed,\n otherwise no action is performed. \"\"\"\n\n rows = Associate.objects.filter(username = user,ns_id=mode, id_report = report1, language = language)\n if rows.exists():\n try:\n with transaction.atomic():\n json_response = delete_all_annotation(rows,user,report1,language,type,mode)\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred.'}\n return JsonResponse(json_response, status=500)\n else:\n return JsonResponse(json_response)\n else:\n json_response = {'message': 'Nothing to save.'}\n return JsonResponse(json_response)\n\n if len(labels_to_save) == 0 and mode1 == 'Robot':\n\n \"\"\" If there are not labels to save and the name space is Robot no action is performed and the already \n existing ground-truth is kept \"\"\"\n to_del = Associate.objects.filter(id_report=report1, language=language, username=user, ns_id=mode)\n # print('RESTORE')\n json_response = restore_robot_annotation(report1, 'labels',user)\n return JsonResponse(json_response)\n\n update = True\n\n \"\"\" Check if the user's labels she inserted are as many as the rows already present in the db: \n if they are not: update the annotation: the old annotation is replaced with the new one\n if they are: check if the labels existing are those inserted, in this case nothing is done, otherwise \n the current groundtruth is updated. \"\"\"\n\n existing_rows = Associate.objects.filter(username = user,ns_id=mode, id_report =report1,language =language)\n if existing_rows.exists():\n if existing_rows.count() == len(labels_to_save):\n for label in labels_to_save:\n label1 = AnnotationLabel.objects.get(name=usecase, label=label['label'], seq_number=label['seq_number'])\n if not Associate.objects.filter(username=user,ns_id=mode, seq_number=label1.seq_number, label=label1,\n id_report=report1, language=language).exists():\n update = True\n break\n else:\n update = False\n if update == True:\n try:\n with transaction.atomic():\n # Remove all the existing labels inserted by the user for that report. The existing ground truth is kept untile the deletion is successful\n to_del = Associate.objects.filter(username=user,ns_id=mode, id_report=report1,language = language)\n delete_all_annotation(to_del,user,report1,language,type,mode)\n\n json_resp_labels = update_annotation_labels(labels_to_save,usecase,user,report1,language,mode)\n\n jsonDict = serialize_gt(type, usecase, username, report_id,language,mode)\n GroundTruthLogFile.objects.create(username=user,ns_id=mode, id_report=report1, language = language,\n gt_json=jsonDict, gt_type=type,insertion_time=Now())\n\n except (Exception) as error:\n print(error)\n print('rolled back')\n json_response = {'error': 'An error occurred saving the ground_truth '\n 'and the labels, the transaction rolledback'}\n return JsonResponse(json_response)\n\n else:\n return JsonResponse(json_resp_labels)\n else:\n if mode1 == 'Human':\n if not GroundTruthLogFile.objects.filter(gt_type='labels', username=user, ns_id=mode, id_report=report1,\n language=language).exists():\n js = serialize_gt('labels', usecase, username, report1.id_report, language, mode)\n\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user,\n ns_id=mode, id_report=report1, language=language,\n gt_type='labels')\n\n ass = Associate.objects.filter(username=user, id_report=report1, language=language,\n ns_id=mode).values('label', 'seq_number')\n for el in ass:\n lab = AnnotationLabel.objects.get(label=el['label'], seq_number=el['seq_number'])\n Associate.objects.filter(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number,\n id_report=report1, language=language).delete()\n Associate.objects.create(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number,\n insertion_time=Now(), id_report=report1, language=language)\n\n json_response = {'message': 'ok'}\n else:\n json_response = {'message': 'no changes detected'}\n return JsonResponse(json_response)\n\n elif mode1 == 'Robot':\n\n \"\"\" In this section the name space Robot is handled: If the user is in the AUTOMATIC MODE and the labels\n she inserts are those annotated by the algorithm, this means that she agrees with the annotation of the \n Robot user. The annotation does not change, only the insertion time is changed.\"\"\"\n\n try:\n with transaction.atomic():\n # in questa sezione solo se la gt è uguale a prima, l'utente acconsente alla gt della macchina\n user_robot = User.objects.get(username='Robot_user', ns_id=mode)\n gt_robot = GroundTruthLogFile.objects.filter(username=user_robot, ns_id=mode,\n id_report=report1, language=language,\n gt_type='labels')\n\n gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1,\n language=language,\n gt_type='labels')\n if gt_robot.count() == 1 and not gt.exists():\n # if gt_robot[0].insertion_time == gt[0].insertion_time:\n js = serialize_gt('labels', usecase, username, report1.id_report, language, mode)\n GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1,\n language=language,\n gt_type='labels').delete()\n\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user,\n ns_id=mode, id_report=report1, language=language,\n gt_type='labels')\n\n ass = Associate.objects.filter(username=user, id_report=report1, language=language,\n ns_id=mode).values('label', 'seq_number')\n for el in ass:\n lab = AnnotationLabel.objects.get(label=el['label'], seq_number=el['seq_number'])\n Associate.objects.filter(username=user, ns_id=mode, label=lab,\n seq_number=lab.seq_number,\n id_report=report1, language=language).delete()\n Associate.objects.create(username=user, ns_id=mode, label=lab,\n seq_number=lab.seq_number,\n insertion_time=Now(), id_report=report1, language=language)\n\n except Exception as error:\n print(error)\n print('rolled back')\n json_response = {'error': 'An error occurred updating labels dates'}\n return JsonResponse(json_response)\n else:\n json_response = {'message': 'dates updated'}\n return JsonResponse(json_response)", "def annotate(self, **annotations):\n _check_annotations(annotations)\n self.annotations.update(annotations)", "def annotate_one(f_json):\n logger.info(f_json + '--->')\n \n filename = os.path.basename(f_json).split('.')[0] \n \n f_out = os.path.join(cfg.OUTPUT_PATH, filename + cfg.OUTPUT_SUFFIX) \n \n if not cfg.OUTPUT_OVERWRITE_EXISTING:\n if os.path.exists(f_out):\n logger.info(f_out + ' already exists')\n return f_out\n \n f_out = annotate_with_geonames(f_json, f_out)\n logger.info(f_out)\n \n return f_out", "def ListAnnotations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_overview_annotations() -> dict:\n return {}", "def annotations(self, annotations):\n self._annotations = annotations", "def get_annotations(self):\n entity = self.get_object()\n serializer = AnnotationValueSerializer(entity.annotations.all(), many=True)\n return Response(serializer.data)", "def addAnnotations(self, sessionId, annotations):\n pass", "def api_index():\n func_list = {}\n for rule in app.url_map.iter_rules():\n if rule.endpoint != 'static':\n func_list[rule.rule] = app.view_functions[rule.endpoint].__doc__\n return jsonify(func_list)", "def _annotations_to_targets(self, annotations):\n raise NotImplementedError('Implement this')", "def on_parsed(args):\n data = {\n 'application': args.application if args.application else ''\n }\n\n try:\n from akrr import akrrrestclient\n\n result = akrrrestclient.put(\n '/resources/{0}/on'.format(args.resource),\n data=data)\n if result.status_code == 200:\n message = 'Successfully enabled {0} -> {1}.\\n{2}' if args.application and args.resource \\\n else 'Successfully enabled all applications on {0}.\\n{1}'\n parameters = (args.application, args.resource, result.text) if args.application and args.resource \\\n else (args.resource, result.text)\n log.info(message.format(*parameters))\n else:\n log.error(\n 'something went wrong.%s:%s',\n result.status_code,\n result.text)\n except Exception as e:\n log.error('''\n An error occured while communicating\n with the REST API.\n %s: %s\n ''',\n e.args[0] if len(e.args) > 0 else '',\n e.args[1] if len(e.args) > 1 else '')", "def create_auto_annotations(request): # post\n\n request_body_json = json.loads(request.body)\n usecase_list = request_body_json['usecase']\n fields_list = request_body_json['selected']\n report_key = request_body_json['report_type']\n batch = request_body_json['batch']\n\n # check existence of examode labels and concepts\n\n if report_key == 'reports':\n for usecase in usecase_list:\n fields = []\n if fields_list != {}:\n if usecase in fields_list.keys():\n fields = list(set(fields_list[usecase]))\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json'),\n 'r') as use_outfile:\n json_to_ret = json.load(use_outfile)\n json_to_ret['extract_fields'][usecase] = fields\n # print(json_to_ret)\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json'), 'w') as use_outfile:\n json.dump(json_to_ret,use_outfile)\n\n # print(fields)\n\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n # output_concepts_dir = os.path.join(workpath, './sket/outputs')\n # for root, dirs, files in os.walk(output_concepts_dir):\n # for f in files:\n # os.unlink(os.path.join(root, f))\n # for d in dirs:\n # shutil.rmtree(os.path.join(root, d))\n\n bool_val,error = create_auto_gt_1(usecase,fields,report_key,batch)\n if bool_val == False:\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json'),\n 'r') as use_outfile:\n json_to_ret = json.load(use_outfile)\n json_to_ret['extract_fields'][usecase] = []\n # print(json_to_ret)\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json'),\n 'w') as use_outfile:\n json.dump(json_to_ret, use_outfile)\n json_resp = {'error': error}\n return JsonResponse(json_resp)\n\n elif report_key == 'pubmed':\n for usecase in usecase_list:\n fields = ['title','abstract']\n # workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n # output_concepts_dir = os.path.join(workpath, './sket/outputs')\n # for root, dirs, files in os.walk(output_concepts_dir):\n # for f in files:\n # os.unlink(os.path.join(root, f))\n # for d in dirs:\n # shutil.rmtree(os.path.join(root, d))\n\n bool_val, error = create_auto_gt_1(usecase, fields, report_key, batch)\n if bool_val == False:\n json_resp = {'error': error}\n return JsonResponse(json_resp)\n\n json_resp = {'msg':'ok'}\n return JsonResponse(json_resp)", "def run(self):\n logging.info('Perform automatic annotations')\n\n # The procedure outcome report file.\n self.__param.set_report_filename(self.__log_report.get_filename())\n self.__log_report.increment()\n\n # Create the progress bar then run the annotations\n wx.BeginBusyCursor()\n p = sppasAnnotProgressDialog()\n self.__manager.annotate(self.__param, p)\n p.close()\n wx.EndBusyCursor()\n\n self.__update_log_text()\n self.Refresh()\n\n # send to parent\n evt = DataChangedEvent(data=self.__param.get_workspace())\n evt.SetEventObject(self)\n wx.PostEvent(self.GetParent(), evt)", "def save_annotations(self):\n r = requests.get(\n f'{self.api_host}/v1/entity-annotations?'\n f'annotation_type=Source reliability (binary)&size=100',\n headers=self.get_request_headers()\n )\n\n entity_annotations = r.json().get('entity_annotations')\n\n for annotation in entity_annotations:\n annotation_id = annotation.get('entity_id')\n with open(\n f'{self.data_folder}/annotations/{annotation_id}.json',\n 'w'\n ) as f:\n json.dump(annotation, f)" ]
[ "0.59798527", "0.5450754", "0.5424835", "0.54219246", "0.52403", "0.51730776", "0.51689434", "0.5130103", "0.50847834", "0.5067602", "0.5056875", "0.5036574", "0.5017098", "0.50098765", "0.4997202", "0.49937025", "0.4971913", "0.4971564", "0.49649894", "0.49378031", "0.49359307", "0.49355203", "0.49166712", "0.48702928", "0.48635113", "0.48503366", "0.48349565", "0.48314014", "0.48130682", "0.48066592" ]
0.55500025
1
create an local command Test Runner object create the log directory
def start_local_cmd(log_path, log_name="localcmd"): log_dir = os.path.abspath(log_path) try: os.makedirs(log_dir) except OSError: pass return CmdRunner.CmdRunner(log_dir, log_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n self.path = tempfile.mkdtemp()\n self.log = log.Log(self.path)", "def init_log():\n os.system('rm -rf /target/testdriver.log || true')\n os.system('touch /target/testdriver.log')\n os.system(f\"chown {uid_gid_output} /target/testdriver.log\")\n os.system('chmod 664 /target/testdriver.log')", "def start_cmd_list(self, log_path, testsuite, prefix):\n log_dir_orte = os.path.abspath(log_path)\n try:\n os.makedirs(log_dir_orte)\n except OSError:\n pass\n return (OrteRunner.OrteRunner(self.test_info, log_dir_orte,\n testsuite, prefix))", "def setup_class(cls):\n cls.runner = CliRunner()\n cls.agent_name = \"agent_1\"\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n os.chdir(cls.t)", "def _setup_dir(self):\n if not os.path.exists(self._save_dir):\n logger.info(\"save_dir {} does not exist, \"\n \"creating it\".format(self._save_dir))\n os.makedirs(self._save_dir)\n\n # Log the run parameters.\n logger.info(\"Writing logs to {}\".format(self._log_dir))\n\n if not os.path.exists(self._log_dir):\n logger.info(\"log path {} does not exist, \"\n \"creating it\".format(self._log_dir))\n os.makedirs(self._log_dir)", "def _create_paths(self):\r\n\r\n # Copying the file 'PCU_logs.robot' to the folder with test suites.\r\n if not os.path.exists('\\\\'.join([self.path, self.log_test])):\r\n shutil.copy(self.log_test, self.path)\r\n\r\n # Moving to test suites directory\r\n os.chdir(self.path)\r\n\r\n # Create a directory for the test suite\r\n if not os.path.exists(self.output_dir_path):\r\n os.makedirs(self.output_dir_path)", "def get_test_logdir(node=None, init=False):\n cur_test = os.environ['PYTEST_CURRENT_TEST']\n\n ret = '/tmp/topotests/' + cur_test[0:cur_test.find(\".py\")].replace('/','.')\n if node != None:\n dir = ret + \"/\" + node\n if init:\n os.system('mkdir -p ' + dir)\n os.system('chmod -R go+rw /tmp/topotests')\n return ret", "def logger_test():\n test_logger = Logger(True)\n test_dir = r'{}/logger_test'.format(os.getcwd())\n header = ['x', 'y', 'z']\n test_logger.new('test', header)\n for i in range(10):\n data = np.random.random((3,))\n test_logger.add('test', data)\n test_logger.save('test', test_dir)", "def start_test_exec(cls):\n time_str = cls.get_current_time()\n os.system(\"robot -l ./logs/log_{0}.html -r ./logs/report_{0}.html -o ./logs/output_{0}.xml \\\n ./test_suite/{1}\".format(time_str, test_suite))", "def __init__(self):\r\n self.file_object = './ExecutionLogs/PredictFromModel.log'\r\n\r\n \"\"\" Initialize logger class for log writing \"\"\"\r\n self.log_writer = logger.logger(self.file_object)", "def CreateLoggingDirectories(\n dataset_root: Path, model_name: str, analysis: str, run_id: str = None\n) -> Path:\n run_id = run_id or time.strftime(\"%y:%m:%dT%H:%M:%S\")\n log_dir = dataset_root / \"logs\" / model_name / analysis / run_id\n if log_dir.is_dir():\n raise OSError(\n f\"Logs directory already exists. Refusing to overwrite: {log_dir}\"\n )\n logging.info(\"Writing logs to %s\", log_dir)\n log_dir.mkdir(parents=True)\n (log_dir / \"epochs\").mkdir()\n (log_dir / \"checkpoints\").mkdir()\n (log_dir / \"graph_loader\").mkdir()\n return log_dir", "def pytest_logger_logsdir(self, config):", "def test_log_file_created(self, mock_parsing_handler, mock_api_handler, mock_progress):\n\n directory = path.join(path_to_module, \"fake_ngs_data\")\n directory_status = DirectoryStatus(directory)\n log_file = path.join(directory, \"irida-uploader.log\")\n # Check that log file does not exist before starting\n self.assertFalse(path.exists(log_file))\n\n cli_entry._validate_and_upload(directory_status, False)\n\n # Make sure log file is created\n self.assertTrue(path.exists(log_file))", "def begin(self):\n os.mkdir(self.meta)\n\n self.logname = os.path.join(self.rundir, self.meta, 'log')\n self.logfile = open(self.logname, 'a')\n if settings.verbosity >= 3:\n self.logfile = Tee(self.logfile)\n\n if self.test.setup:\n self.setup_script = self._make_setup_script()\n self.steps_script = self._make_steps_script()\n if self.test.teardown:\n self.teardown_script = self._make_teardown_script()", "def run(project, logger, cmd_name, command):\n dir_logs = project.expand('$dir_logs')\n pybuilder.utils.mkdir(dir_logs)\n out_file = os.path.join(dir_logs, '{0}.log'.format(cmd_name))\n err_file = os.path.join(dir_logs, '{0}.err'.format(cmd_name))\n with open(out_file, 'w') as out:\n with open(err_file, 'w') as err:\n retcode = subprocess.call(command, shell=True, stdout=out, stderr=err)\n if retcode:\n logger.error(\"{2} failed. See {0} and {1} for details.\"\n .format(out_file, err_file, cmd_name))\n raise Exception(\"{0} Failed\".format(cmd_name))", "def setUp(self):\n self._dir = tempfile.mkdtemp(prefix=f\"miniwdl_test_{self.id()}_\")", "def new_custom_log_dir(self) -> str:", "def __init__(self, level, general_log_path, outputs_folder):\n self.log_level = level\n\n # self.general_log_file = general_log_path.open('w')\n self.general_log_file = GCOpen(general_log_path, 'w')\n self.general_log_file.open()\n\n self.file_outputs_dir = outputs_folder / 'output_files'\n # self.file_outputs_dir.mkdir(exist_ok=True)\n\n exp_name = str(outputs_folder).split('/')[-1]\n\n self.summary_writer = SummaryWriter(log_dir=str(TEMP_FOLDER),\n filename_suffix='.' + exp_name)\n tf_filename = find_tf_event(exp_name)\n self.sw_local_path = Path(TEMP_FOLDER) / tf_filename\n self.sw_gc_path = outputs_folder / tf_filename\n\n self.log(\"Starting new experiment at \" +\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n self.log(\"User: \" + getpass.getuser())\n self.log(\"Host: \" + socket.gethostname())\n\n Logger.unique_logger = self", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "def __init__(self, dataSetDirectory=None, verbose=True):\n\n # sys.stdout = open(logDirectory+\"2.txt\",'w')\n\n self._dataSetDirectory = dataSetDirectory\n\n self._verbose = verbose\n\n self._actions = []", "def __init__(self,\n logdir,\n mode='a',\n delete=False,\n clearmem=True):\n Logger.__init__(self)\n self.logdir = logdir\n self.mode = mode\n self.delete = delete\n self.clearmem = clearmem\n if not os.path.exists(self.logdir):\n os.mkdir(self.logdir)", "def create_instance(test_id, config, args):\n return TestLogs(test_id, config, args)", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()\n if not os.path.exists(self.workspace_dir):\n os.makedirs(self.workspace_dir)", "def setup_class(cls):\n cls.runner = CliRunner()\n cls.agent_name = \"myagent\"\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n # copy the 'packages' directory in the parent of the agent folder.\n shutil.copytree(Path(CUR_PATH, \"..\", \"packages\"), Path(cls.t, \"packages\"))\n cls.connection_id = str(HTTP_CLIENT_PUBLIC_ID)\n cls.connection_name = \"http_client\"\n\n os.chdir(cls.t)\n result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, \"init\", \"--author\", AUTHOR])\n assert result.exit_code == 0\n result = cls.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"create\", \"--local\", cls.agent_name],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n os.chdir(cls.agent_name)\n result = cls.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"add\", \"--local\", \"connection\", cls.connection_id],\n standalone_mode=False,\n )\n assert result.exit_code == 0" ]
[ "0.65183616", "0.6403981", "0.6274537", "0.6198722", "0.6148515", "0.6137054", "0.610452", "0.60814726", "0.6077535", "0.6061433", "0.60586435", "0.59921736", "0.59883654", "0.59766644", "0.5950229", "0.59476614", "0.59351313", "0.5928388", "0.59242725", "0.59242725", "0.59242725", "0.59242725", "0.59242725", "0.59242725", "0.59242725", "0.59230024", "0.58923256", "0.58879864", "0.5875135", "0.58660835" ]
0.6608124
0
Export CTSDG generator for inference
def export_ctsdg(cfg): generator = Generator( image_in_channels=config.image_in_channels, edge_in_channels=config.edge_in_channels, out_channels=config.out_channels ) generator.set_train(False) load_checkpoint(cfg.checkpoint_path, generator) ckpt_path = Path(cfg.checkpoint_path) output_file_name = (ckpt_path.parent / ckpt_path.stem).as_posix() file_format = config.file_format img_dummy = mnp.zeros([1, config.image_in_channels, *cfg.image_load_size], dtype=mstype.float32) edge_dummy = mnp.zeros([1, 2, *cfg.image_load_size], dtype=mstype.float32) mask_dummy = mnp.zeros([1, 1, *cfg.image_load_size], dtype=mstype.float32) export(generator, img_dummy, edge_dummy, mask_dummy, file_name=output_file_name, file_format=file_format) print(f'{output_file_name}.mindir exported successfully!', flush=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_generator(\n ckpt, is_stylegan1, G_res, out_size, noconst, latent_dim, n_mlp, channel_multiplier, dataparallel, base_res_factor\n):\n if is_stylegan1:\n generator = G_style(output_size=out_size, checkpoint=ckpt).cuda()\n else:\n generator = Generator(\n G_res,\n latent_dim,\n n_mlp,\n channel_multiplier=channel_multiplier,\n constant_input=not noconst,\n checkpoint=ckpt,\n output_size=out_size,\n base_res_factor=base_res_factor,\n ).cuda()\n if dataparallel:\n generator = th.nn.DataParallel(generator)\n return generator", "def generate():", "def main():\n parsed_args = parse_args()\n dfg = DummyFileGenerator(parsed_args[0], **parsed_args[1])\n dfg.write_output_file(**parsed_args[2])", "def build_gan(\n optimizer,\n timesteps,\n vocab_sizes,\n latlon_dense_units=64,\n concat_dense_units=100,\n lstm_units=100,\n latent_dim=100,\n lstm_reg=0.02,\n):\n gen = build_generator(\n timesteps,\n latlon_dense_units,\n concat_dense_units,\n lstm_units,\n latent_dim,\n lstm_reg,\n vocab_sizes,\n )\n dis = build_discriminator(\n timesteps,\n latlon_dense_units,\n concat_dense_units,\n lstm_units,\n latent_dim,\n lstm_reg,\n vocab_sizes,\n )\n # Compile discriminator with masked BCE loss. Mask is last output of generator\n dis.compile(optimizer=optimizer, loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n dis.trainable = False\n\n # The trajectory generator takes real trajectories and noise as inputs\n # inputs = [layers.Input(shape=(timesteps, 2), name=\"input_latlon\")]\n # for key, val in vocab_sizes.items():\n # inputs.append(layers.Input(shape=(timesteps, val), name=\"input_\" + key))\n # inputs.append(layers.Input(shape=(latent_dim,), name=\"input_noise\"))\n # inputs.append(layers.Input(shape=(timesteps, 1), name=\"input_mask\"))\n # gen_trajs = gen(inputs)\n # y_pred = dis(gen_trajs[:-1])\n # mask = inputs[-1]\n # gan = Model(inputs, y_pred)\n # gan.add_loss(traj_loss(inputs[:-2], gen_trajs[:-1], mask))\n ##\n y_pred = dis(gen.outputs[:-1])\n gan = Model(gen.inputs, y_pred)\n mask = gen.inputs[-1]\n gan.add_loss(traj_loss(gen.inputs[:-2], gen.outputs[:-1], mask))\n gan.compile(optimizer=optimizer, loss=\"binary_crossentropy\")\n return gen, dis, gan", "def generate(env):\n## doxyfile_scanner = env.Scanner(## DoxySourceScan,\n## \"DoxySourceScan\",\n## scan_check = DoxySourceScanCheck,\n##)\n\n if targz.exists(env):\n srcdist_builder = targz.makeBuilder(srcDistEmitter)\n\n env['BUILDERS']['SrcDist'] = srcdist_builder", "def testgen(self):\n self.parse()\n self.generate()", "def test_gen():\n tpot_obj = TPOTClassifier()\n\n pipeline = tpot_obj._gen_grow_safe(tpot_obj._pset, 1, 3)\n\n assert len(pipeline) > 1\n assert pipeline[0].ret == Output_DF", "def generate(self):\r\n raise NotImplementedError", "def create_gens(train_path, gen):\n\n _logger.debug(\"Creating Data Generators\")\n image_files = glob(train_path + '/*/*.jp*g')\n try:\n train_generator = gen.flow_from_directory(\n train_path,\n target_size=input_size,\n shuffle=True,\n batch_size=batch_size,\n subset = \"validation\"\n )\n test_generator = gen.flow_from_directory(\n train_path,\n target_size=input_size,\n shuffle=True,\n batch_size=batch_size,\n subset = \"training\"\n )\n class_indices = train_generator.class_indices\n except FileNotFoundError:\n _logger.error(\"data generators invalid\")\n train_generator = None\n test_generator = None\n image_files = None\n class_indices= None\n return train_generator, test_generator, image_files, class_indices", "def example_generator(self, mode: str):\n raise NotImplementedError", "def generate(self):\n pass", "def create_model(opts):\n G_XtoY = CycleGenerator(conv_dim=opts.g_conv_dim, init_zero_weights=opts.init_zero_weights)\n G_YtoX = CycleGenerator(conv_dim=opts.g_conv_dim, init_zero_weights=opts.init_zero_weights)\n D_X = DCDiscriminator(conv_dim=opts.d_conv_dim)\n D_Y = DCDiscriminator(conv_dim=opts.d_conv_dim)\n\n return G_XtoY, G_YtoX, D_X, D_Y", "def CycleGAN(g_conv_dim=64, d_conv_dim=64, n_res_blocks=6):\n \n # Instantiate generators\n G_XtoY = Generator(conv_dim=g_conv_dim, n_res_blocks=n_res_blocks)\n G_YtoX = Generator(conv_dim=g_conv_dim, n_res_blocks=n_res_blocks)\n # Instantiate patch discriminators\n Dp_X = PatchDiscriminator(conv_dim=d_conv_dim)\n Dp_Y = PatchDiscriminator(conv_dim=d_conv_dim)\n # Instantiate global discriminators\n Dg_X = GlobalDiscriminator(conv_dim=d_conv_dim)\n Dg_Y = GlobalDiscriminator(conv_dim=d_conv_dim)\n\n # move models to GPU, if available\n cuda_available = torch.cuda.is_available()\n device = torch.device(\"cuda:0\" if cuda_available else \"cpu\")\n\n device = torch.device(device)\n G_XtoY.to(device)\n G_YtoX.to(device)\n Dp_X.to(device)\n Dp_Y.to(device)\n Dg_X.to(device)\n Dg_Y.to(device)\n\n print('Using {}.'.format(\"GPU\" if cuda_available else \"CPU\"))\n return G_XtoY, G_YtoX, Dp_X, Dp_Y, Dg_X, Dg_Y", "def generate(self):", "def generate(args):\n # Define latent space dataset and generator model.\n uniform_dataloader, generator = get_datasets_and_generator(args, no_target=True)\n # generator.to(device)\n\n with torch.no_grad():\n generator.load_state_dict(torch.load(args.model_path))\n for input_ in uniform_dataloader:\n # Model forward pass.\n output = generator(input_.float())\n if len(output) > 1:\n print('\\t'.join(map(str, output.squeeze().tolist())))\n print(output.item())", "def generate(self):\n return self.gen.generate()", "def generate(self):\n pass", "def generate(self):\n pass", "def generate(self):\n pass", "async def gpt2_generate(self, ctx, *, arg=''):\n print('Command gpt2_generate triggered')\n if gpt2.is_gpt2_downloaded(model_name=self.config['model_name']):\n generate_args = parse_generate_arguments(self.config)\n await ctx.send(\"Generating...\")\n sample = gpt2.generate(self.sess, prefix=arg, return_as_list=True, **generate_args)[0]\n await ctx.send(sample)\n else:\n await ctx.send(f\"ERROR: Model {self.config['model_name']} is not downloaded\")", "def __init__(self, gen):\n self.gen = gen", "def generate(self, di):\n raise NotImplementedError", "def generator(self, inpt, reuse, is_train):\n h, w, c = self.input_dim\n with tf.variable_scope(\"generator\"):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n net = dense_layer(inpt, self.g_init*h**2/32, activation=tf.nn.relu,\n name=\"dense_input\", use_bn=True, is_train=is_train, stddv=self.stddv)\n # reshape the output of dense layer to be H/16, W/16, K*8\n net = tf.reshape(net, (-1, h//16, w//16, self.g_init*8))\n net = transpose_conv2d(net, (self.batch_size, h//8, w//8, self.g_init*4), name=\"trans_conv1\",\n is_train=is_train, padding=\"SAME\", stddv=self.stddv)\n net = transpose_conv2d(net, (self.batch_size, h//4, w//4, self.g_init*2), is_train=is_train,\n padding=\"SAME\", stddv=self.stddv)\n net = transpose_conv2d(net, (self.batch_size, h//2, w//2, self.g_init), name=\"trans_conv3\",\n is_train=is_train, padding=\"SAME\", stddv=self.stddv)\n net = transpose_conv2d(net, (self.batch_size, h, w, c), name=\"trans_conv4\", is_train=is_train,\n activation=tf.nn.tanh, padding=\"SAME\", stddv=self.stddv)\n return net", "def sample_generator(self, sess):\n\n to_return = {\n 'g_sample': self.G_sample_test,\n }\n return sess.run(to_return)", "def get_generator_class(self) -> Any:", "def get_generator(name, device):\n if name == \"dcgan_rand\":\n generator = transformations.dcgan_cifar10(device, random=True)\n elif name == \"dcgan_cifar10\":\n generator = transformations.dcgan_cifar10(device)\n elif name == \"dcgan_cifar100_grey\":\n generator = transformations.dcgan_cifar100_grey(device)\n elif name == \"nvp_cifar10\":\n generator = transformations.nvp_cifar10(device)\n else:\n raise ValueError(\"Did not recognise the generator here, will exit now.\")\n\n return generator", "def generator(self, x, reuse=None, name=\"\"):\n with tf.variable_scope('generator-%s' % name, reuse=reuse):\n\n def d(x, f, name=''):\n x = t.conv2d(x, f=f, k=3, s=2, name='gen-d-conv2d-%s' % name)\n x = t.instance_norm(x, name='gen-d-ins_norm-%s' % name)\n x = tf.nn.relu(x)\n return x\n\n def R(x, f, name=''):\n x = t.conv2d(x, f=f, k=3, s=1, name='gen-R-conv2d-%s-0' % name)\n x = t.conv2d(x, f=f, k=3, s=1, name='gen-R-conv2d-%s-1' % name)\n x = t.instance_norm(x, name='gen-R-ins_norm-%s' % name)\n x = tf.nn.relu(x)\n return x\n\n def u(x, f, name=''):\n x = t.deconv2d(x, f=f, k=3, s=2, name='gen-u-deconv2d-%s' % name)\n x = t.instance_norm(x, name='gen-u-ins_norm-%s' % name)\n x = tf.nn.relu(x)\n return x\n\n x = t.conv2d(x, f=self.gf_dim, k=7, s=1, name='gen-conv2d-0')\n\n x = d(x, self.gf_dim * 2, name='1')\n x = d(x, self.gf_dim * 4, name='2')\n\n for i in range(1, 7):\n x = R(x, self.gf_dim * 4, name=str(i))\n\n x = u(x, self.gf_dim * 4, name='1')\n x = u(x, self.gf_dim * 2, name='2')\n\n logits = t.conv2d(x, f=3, k=7, s=1, name='gen-conv2d-1')\n prob = tf.nn.tanh(logits)\n\n return prob", "def generate(self):\n raise NotImplementedError", "def task_generate_sc_figure2():\n for dept in Department.list():\n yield {\n 'name': dept.name,\n 'file_dep': [\n dept.census_tracts_path,\n dept.police_precincts_path,\n ],\n 'targets': [dept.sc_figure2_path],\n 'actions': [dept.generate_sc_figure2],\n 'clean': True,\n }", "def generate(args):\n\n # Using the data Augmentation in traning data\n\n normalizer = Normalizer()\n\n train_aug = tf.keras.preprocessing.image.ImageDataGenerator(\n #rescale=1. / 255.,\n shear_range=args.shear_range,\n zoom_range=args.zoom_range,\n rotation_range=args.rotation_range,\n width_shift_range=args.width_shift_range,\n height_shift_range=args.height_shift_range,\n horizontal_flip=args.horizontal_flip,\n vertical_flip=args.vertical_flip,\n preprocessing_function=normalizer)\n\n\n validation_aug = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=normalizer)\n\n train_generator = train_aug.flow_from_directory(\n args.train_dir,\n target_size=(args.input_size, args.input_size),\n batch_size=args.batch_size,\n class_mode='categorical',\n shuffle=True)\n\n mean, std = [], []\n if args.mean is None or args.std is None:\n mean, std = normalizer.get_stats(args.train_dir, train_generator.filenames, (args.input_size, args.input_size))\n else:\n mean = [float(m.strip()) for m in args.mean.split(',')]\n std = [float(s.strip()) for s in args.std.split(',')]\n normalizer.set_stats(mean, std)\n\n if not os.path.exists('model'):\n os.makedirs('model')\n with open('model/stats.txt', 'w') as stats:\n stats.write(\"Dataset mean [r, g, b] = {}\\n\".format(mean))\n\n\n label_map = (train_generator.class_indices)\n label_map = dict((v,k) for k,v in label_map.items())\n\n with open('model/labels.csv', 'w') as csv_file:\n csv_writer = csv.writer(csv_file, lineterminator='\\n')\n csv_writer.writerows(label_map.items())\n\n validation_generator = validation_aug.flow_from_directory(\n args.validation_dir,\n target_size=(args.input_size, args.input_size),\n batch_size=args.batch_size,\n class_mode='categorical')\n\n return train_generator, validation_generator, train_generator.samples, validation_generator.samples, len(label_map)" ]
[ "0.6295087", "0.61204", "0.5873329", "0.5787015", "0.57816166", "0.5729231", "0.5703594", "0.5680445", "0.5666309", "0.56184757", "0.5589947", "0.55868477", "0.5584511", "0.5568219", "0.55620843", "0.5538686", "0.5524669", "0.5524669", "0.5524669", "0.5520368", "0.5519996", "0.5516941", "0.5510894", "0.5500409", "0.5492095", "0.5487716", "0.5435401", "0.543097", "0.54198134", "0.54164886" ]
0.6933986
0
Calculate the disparity value at each pixel by searching a small patch around a pixel from the left image in the right image.
def calculate_disparity_map( left_img: torch.Tensor, right_img: torch.Tensor, block_size: int, sim_measure_function: Callable, max_search_bound: int = 50, ) -> torch.Tensor: assert left_img.shape == right_img.shape (H,W,C) = left_img.shape H_offset = block_size//2 W_offset = block_size//2 disp_map = torch.zeros(H-2*H_offset,W-2*W_offset) # placeholder, this is not the actual size ########################################################################### # Student code begins ########################################################################### for ii in range(H-2*H_offset): for jj in range(W-2*W_offset): left_patch = left_img[ii:ii+block_size,jj:jj+block_size, :] similarity_error_array = -1 * np.ones(max_search_bound) for kk in range(max_search_bound): jj_start = max(jj - kk, 0) jj_end = max(jj - kk + block_size, block_size) right_patch = right_img[ii:ii+block_size, jj_start:jj_end, :] similarity_error_array[kk] = sim_measure_function(left_patch, right_patch) disp_map[ii,jj] = np.argmin(similarity_error_array) ########################################################################### # Student code ends ########################################################################### return disp_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_disparity(self, imgL, imgR):\n # SGBM Parameters -----------------\n window_size = 1 # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely\n param = {'minDisparity': 0, 'numDisparities': 32, 'blockSize': 5, 'P1': 10, 'P2': 20, 'disp12MaxDiff': 1,\n 'preFilterCap': 65, 'uniquenessRatio': 10, 'speckleWindowSize': 150, 'speckleRange': 2, 'mode': 2}\n left_matcher = cv2.StereoSGBM_create(**param)\n # left_matcher = cv2.StereoSGBM_create(\n # minDisparity=-1,\n # numDisparities=5*16, # max_disp has to be dividable by 16 f. E. HH 192, 256\n # blockSize=window_size,\n # P1=8 * 3 * window_size,\n # # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely\n # P2=32 * 3 * window_size,\n # disp12MaxDiff=12,\n # uniquenessRatio=10,\n # speckleWindowSize=50,\n # speckleRange=32,\n # preFilterCap=63,\n # mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY\n # )\n right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)\n # FILTER Parameters\n lmbda = 8000\n sigma = 1.3\n wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)\n wls_filter.setLambda(lmbda)\n\n wls_filter.setSigmaColor(sigma)\n displ = left_matcher.compute(imgL, imgR) # .astype(np.float32)/16\n dispr = right_matcher.compute(imgR, imgL) # .astype(np.float32)/16\n displ = np.int16(displ)\n dispr = np.int16(dispr)\n filteredImg = wls_filter.filter(displ, imgL, None, dispr) # important to put \"imgL\" here!!!\n filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX)\n filteredImg = np.uint8(filteredImg)\n # 除以16得到真实视差(因为SGBM算法得到的视差是×16的)\n displ[displ < 0] = 0\n # disparity.astype(np.float32) / 16.\n displ = np.divide(displ.astype(np.float32), 16.)\n return filteredImg, displ", "def disparitySSD(img_l: np.ndarray, img_r: np.ndarray, disp_range: (int, int), k_size: int) -> np.ndarray:\r\n kernel_half = int ((k_size*2 + 1) //2)\r\n w , h = img_r.shape\r\n # the depth of the image\r\n depth = np.zeros((w , h))\r\n for y in range (kernel_half, (w - kernel_half)): # iterate through the rows\r\n for x in range(kernel_half, (h - kernel_half)): # iterate through the columns\r\n best_offset = 0\r\n pixel = 0\r\n prev_ssd = 654354\r\n for offset in range(disp_range[0], disp_range[1]): # check the kernel which is exit in this range\r\n ssd = 0\r\n for v in range(-kernel_half, kernel_half):\r\n for u in range(-kernel_half , kernel_half):\r\n # calculate the difference between the left and right kernel and then make the disp point to be\r\n # the the offset with the minimum SSD (Sum of square difference)\r\n # arg_min =>(I_left(x , y) - I_right (x + v, y +u))^2\r\n ssd += (img_r [y+v, x+u] - img_l[(y + v), (x + u) - offset])**2\r\n if ssd < prev_ssd:\r\n prev_ssd = ssd\r\n best_offset = offset\r\n\r\n depth[y, x] = best_offset\r\n\r\n print(depth)\r\n\r\n return depth\r\n pass", "def image_pre_filtering(left_img: np.ndarray, right_img: np.ndarray) -> tuple:\n\n def clahe(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply Contrast Limited Adaptive Histogram Equalization\n :param image: the image to be filtered\n :return: the image filtered with CLAHE\n \"\"\"\n clahe_filter = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n return clahe_filter.apply(image)\n\n def logarithmic(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply Logarithmic Transform\n :param image: the image to be filtered\n :return: the image filtered with logarithmic transform\n \"\"\"\n c = max_disparity / math.log(1 + np.max(image))\n sigma = 1\n for i in range(0, image.shape[1]): # image width\n for j in range(0, image.shape[0]): # image height\n # compute logarithmic transform\n image[j, i] = int(c * math.log(1 + ((math.exp(sigma) - 1) * image[j, i])))\n return image\n\n def exponential(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Perform pre-processing - raise to the power, as this subjectively appears\n to improve subsequent disparity calculation\n :param image:\n :return:\n \"\"\"\n return np.power(image, 0.75).astype('uint8')\n\n def apply_filter(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Choose which filter to apply to both images, this could be a combination too\n :param image: the image to be filtered\n :return:\n \"\"\"\n # choose filters to apply\n return clahe(image)\n\n return apply_filter(left_img), apply_filter(right_img)", "def calculate_cost_volume(\n left_img: torch.Tensor,\n right_img: torch.Tensor,\n max_disparity: int,\n sim_measure_function: Callable,\n block_size: int = 9,\n):\n # placeholders\n H = left_img.shape[0]\n W = right_img.shape[1]\n H_offset = block_size//2\n W_offset = block_size//2\n cost_volume = torch.ones(H, W, max_disparity) * 255\n\n ###########################################################################\n # Student code begins\n ###########################################################################\n\n for ii in range(H-2*H_offset):\n for jj in range(W-2*W_offset):\n left_patch = left_img[ii:ii+block_size,jj:jj+block_size, :]\n similarity_error_array = np.ones(max_disparity)\n \n for kk in range(max_disparity):\n if jj - kk >= 0:\n jj_start = max(jj - kk, 0)\n jj_end = max(jj - kk + block_size, block_size)\n\n right_patch = right_img[ii:ii+block_size, jj_start:jj_end, :]\n similarity_error_array[kk] = sim_measure_function(left_patch, right_patch)\n else:\n similarity_error_array[kk] = 255.0\n\n cost_volume[ii+H_offset,jj+W_offset,:] = torch.tensor(similarity_error_array)\n\n ###########################################################################\n # Student code ends\n ###########################################################################\n\n return cost_volume", "def measure_curvature(self, warped, leftx, rightx):\n\t\t# Define conversions in x and y from pixels space to meters\n\t\t#xm_per_pix = 3.7/warped.shape[1] # meters per pixel in x dimension\n\t\t#ym_per_pix = 30.0/warped.shape[0] # meters per pixel in y dimension\n\t\txm_per_pix = 3.7/700 # meters per pixel in x dimension\n\t\tym_per_pix = 30.0/720 # meters per pixel in y dimension\n\t\t# Generate some fake data to represent lane-line pixels\n\t\tploty = np.linspace(0, 719, num=720) # to cover same y-range as image\n\t\t# Fit second order polynomials to x, y in world space\n\t\tleft_fit_cr = np.polyfit(ploty * ym_per_pix, leftx * xm_per_pix, 2)\n\t\tright_fit_cr = np.polyfit(ploty * ym_per_pix, rightx * xm_per_pix, 2)\n\t\t# Define y-value where we want radius of curvature\n\t\t# Choose the maximum y-value, corresponding to the bottom of the image\n\t\ty_eval = np.max(ploty)\n\t\t# Calculate radius of fitted curvature\n\t\tleft_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * left_fit_cr[0])\n\t\tright_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * right_fit_cr[0])\n\t\t# Calculate the lane deviation\n\t\tlane_deviation = self.lane_deviation(warped, xm_per_pix)\n\n\t\treturn left_curverad, right_curverad, lane_deviation", "def computePSNR(img1, img2, pad_y=0, pad_x=0):\n if pad_y != 0 and pad_x != 0:\n img1_u = (np.clip(img1, 0, 255.0)[pad_y:-pad_y, pad_x:-pad_x, ...]).astype(dtype=np.uint8)\n img2_u = (np.clip(img2, 0, 255.0)[pad_y:-pad_y, pad_x:-pad_x, ...]).astype(dtype=np.uint8)\n else:\n img1_u = (np.clip(img1, 0, 255.0)).astype(dtype=np.uint8)\n img2_u = (np.clip(img2, 0, 255.0)).astype(dtype=np.uint8)\n imdiff = (img1_u).astype(dtype=np.float32) - (img2_u).astype(dtype=np.float32)\n rmse = np.sqrt(np.mean(np.power(imdiff[:], 2)))\n return 20.0 * np.log10(255.0 / rmse)", "def depth_rendering(ref_view, disparity_map, lf_size = (64, 512, 512, 3)):\n lf_one_way = int(math.floor(math.sqrt(lf_size[0])))\n\n x_indices = np.arange(lf_size[1])\n y_indices = np.arange(lf_size[2])\n b_indices = np.arange(lf_size[0])\n\n #Create a grid of size lf_size[:3] consisting of the pixel co ordinates of each image\n _, x, y = np.meshgrid(b_indices, x_indices, y_indices, indexing= 'ij')\n\n # Create a grid of size (lf_size[0], 2) consiting of the row, col lf positions\n grid = np.meshgrid(np.arange(lf_one_way), np.arange(lf_one_way), indexing= 'ij')\n stacked = np.stack(grid, 2)\n positions = stacked.reshape(-1, 2)\n\n # Compute the distance from each lf position from the reference view\n # Repeat the elements of this to match the size of the disparity map\n ref_pos = np.array(\n [lf_one_way // 2, lf_one_way // 2])\n distance = (np.tile(ref_pos, (lf_size[0], 1)) - positions).T\n dis_repeated = np.repeat(distance, lf_size[1] * lf_size[2], axis = 1)\n dis_repeated = dis_repeated.reshape(2, lf_size[0], lf_size[1], lf_size[2])\n\n\n # Tile the disparity map so that there is one for each lf_position - lf_size[0]\n tiled_map = np.tile(disparity_map, (lf_size[0], 1, 1))\n\n # Compute the shifted pixels\n x_shifted = (x.astype(np.float32) - tiled_map * dis_repeated[0]).flatten()\n y_shifted = (y.astype(np.float32) - tiled_map * dis_repeated[1]).flatten()\n\n #indices for linear interpolation in a square around the central point\n x_low = np.around(x_shifted).astype(int)\n #x_high = x_low + 1\n\n y_low = np.around(y_shifted).astype(int)\n #y_high = y_low + 1\n\n #Place co-ordinates outside the image back into the image\n x_low_clip = np.clip(x_low, 0, ref_view.shape[0] - 1)\n #x_high_clip = np.clip(x_high, 0, ref_view.shape[0] - 1)\n y_low_clip = np.clip(y_low, 0, ref_view.shape[1] - 1)\n #y_high_clip = np.clip(y_high, 0, ref_view.shape[1] - 1)\n\n #Gather the interpolation points\n interp_pts_1 = np.stack((x_low_clip, y_low_clip))\n #interp_pts_2 = np.stack((x_low_clip, y_high_clip))\n #interp_pts_3 = np.stack((x_high_clip, y_low_clip))\n #interp_pts_4 = np.stack((x_high_clip, y_high_clip))\n\n #Index into the images\n desired_shape = lf_size\n res_1 = torch_big_sample(ref_view, interp_pts_1, desired_shape)\n return res_1\n res_2 = torch_big_sample(ref_view, interp_pts_2, desired_shape)\n res_3 = torch_big_sample(ref_view, interp_pts_3, desired_shape)\n res_4 = torch_big_sample(ref_view, interp_pts_4, desired_shape)\n\n #Compute interpolation weights\n x_low_f = x_low.astype(np.float32)\n d_x_low = 1.0 - (x_shifted.astype(np.float32) - x_low_f)\n d_x_high = 1.0 - d_x_low\n y_low_f = y_low.astype(np.float32)\n d_y_low = 1.0 - (y_shifted.astype(np.float32) - y_low_f)\n d_y_high = 1.0 - d_y_low\n\n w1 = torch.from_numpy(d_x_low * d_y_low)\n w2 = torch.from_numpy(d_x_low * d_y_high)\n w3 = torch.from_numpy(d_x_high * d_y_low)\n w4 = torch.from_numpy(d_x_high * d_y_high)\n\n #THEY AGREE AT THIS POINT\n weighted_1 = torch.mul(repeat_weights(w1, desired_shape), res_1)\n weighted_2 = torch.mul(repeat_weights(w2, desired_shape), res_2)\n weighted_3 = torch.mul(repeat_weights(w3, desired_shape), res_3)\n weighted_4 = torch.mul(repeat_weights(w4, desired_shape), res_4)\n\n novel_view = torch.add(torch.add(weighted_1, weighted_2), weighted_3)\n torch.add(novel_view, weighted_4, out=novel_view)\n return novel_view", "def depthImg(imgL, imgR, ndisparities=16, blockSize=16):\n\tstereo = cv2.StereoBM(cv2.STEREO_BM_BASIC_PRESET, ndisparities=ndisparities, SADWindowSize=blockSize)\n\tdisparity = stereo.compute(imgL, imgR)\n\treturn disparity", "def measure_curvature_pixels(ploty, leftx, lefty, rightx, righty):\n\n # Start by generating our fake example data\n # Make sure to feed in your real data instead in your project!\n #ploty, left_fit, right_fit = generate_data()\n \n # Define y-value where we want radius of curvature\n # We'll choose the maximum y-value, corresponding to the bottom of the image\n y_eval = np.max(ploty)\n\n # Fit new polynomials to x,y in world space\n left_fit_cr = np.polyfit(lefty * ym_per_pix, leftx * xm_per_pix, 2)\n right_fit_cr = np.polyfit(righty * ym_per_pix, rightx * xm_per_pix, 2)\n\n # Calculate the new radii of curvature\n left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix +\n left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * left_fit_cr[0])\n right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix +\n right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * right_fit_cr[0])\n # Now our radius of curvature is in meters\n\n return left_curverad, right_curverad", "def _load_disparity(image_path, img_height, img_width):\n disp_img = np.array(Image.open(image_path)).astype('float64')\n disp_img = trim_image(disp_img, img_height, img_width)\n disp_img /= 256\n\n return disp_img", "def find_tfl_lights(image: np.ndarray):\n kernel = np.array(\n [[0, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [1, 3, 1],\n [0, 1, 0]])\n\n kernel = kernel - kernel.mean()\n\n red_image = image.copy()\n red_image = red_image[:, :, 0]\n _, red_image = cv2.threshold(red_image, 200, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(red_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n red_points = np.where(mask)\n positions = []\n final_red_points = []\n for point1 in range(len(red_points[0])):\n point = (red_points[0][point1], red_points[1][point1])\n pixel = image[point[0], point[1]]\n if (pixel[1] < 170 or pixel[2] < 120) and pixel[0] >= 200:\n final_red_points.append(point)\n final_red_points = filter_points(final_red_points)\n positions += final_red_points\n auxilary = ['r'] * len(positions)\n red_x = [val[1] for val in final_red_points]\n red_y = [val[0] for val in final_red_points]\n green_image = image.copy()\n green_image = green_image[:, :, 1]\n _, green_image = cv2.threshold(green_image, 190, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(green_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n green_points = np.where(mask)\n final_green_points = []\n for point1 in range(len(green_points[0])):\n point = (green_points[0][point1], green_points[1][point1])\n pixel = image[point[0], point[1]]\n if pixel[0] <= 180 and pixel[1] >= 220 and pixel[2] >= 160:\n final_green_points.append(point)\n\n final_green_points = filter_points(final_green_points)\n positions += final_green_points\n auxilary += ['g'] * len(final_green_points)\n green_x = [val[1] for val in final_green_points]\n green_y = [val[0] for val in final_green_points]\n print(f\"There are {len(green_x) + len(red_x)} points\")\n return positions, auxilary", "def pairing(left, right):\n # same class: 0\n if left[label] == right[label]:\n flag = 0\n # not same: 1\n else:\n flag = 1\n return tf.cast(left[\"image\"], tf.float32) / 255., tf.cast(right[\"image\"], tf.float32) / 255., tf.cast(flag, tf.float32)", "def same_landmark_images(path_1: str, path_2: str) -> float:\n img_1_greyscale = read_image_greyscale(path_1)\n img_2_greyscale = read_image_greyscale(path_2)\n img_1_rgb_separated = np.array([read_image_color(path_1, component) for component in RGB_COMPONENTS])\n img_2_rgb_separated = np.array([read_image_color(path_2, component) for component in RGB_COMPONENTS])\n\n similarity_hog = similarity_two_images_hog(img_1_greyscale, img_2_greyscale)\n similiarities_rgb = np.array([similarity_two_images_color(img_1_rgb_separated[i], img_2_rgb_separated[i])\n for i in range(0, len(RGB_COMPONENTS))])\n similarity_color = np.mean(similiarities_rgb)\n\n similarity_percentage = np.average([similarity_hog, similarity_color], weights=[1.2, 1])\n return float(similarity_percentage)", "def diff_image(images):\n prev_image = cv2.absdiff(images[0], images[1])\n cur_image = cv2.absdiff(images[1], images[2])\n return cv2.bitwise_and(prev_image, cur_image)", "def find_components(image,deltaPix,lens_rad_arcsec = 6.0,lens_rad_ratio = None,\n center_x = None,center_y = None, gal_rad_ratio = 0.1,\n min_size_arcsec=0.7,thresh=0.5, many_sources = True,\n show_locations=False, title = None):\n\n # convert minimum component size in pixel units\n min_size = int(min_size_arcsec / deltaPix)\n \n #Convert lens radius and central galaxy radius to pixels\n if lens_rad_ratio == None:\n lens_rad = int(lens_rad_arcsec / deltaPix)\n else: lens_rad = int(len(image) * lens_rad_ratio)\n gal_rad = int(len(image) * gal_rad_ratio)\n \n \n# im2[im2 < im2.min() + 10.*thresh] = 0.\n \n # downscale source image to data resolution (for speed + easier for converting to data units)\n #down = image_util.re_size(image, factor=supersampling_factor_source)\n \n # apply laplacian of gaussian (LoG) filter to enhance maxima\n LoG = - gaussian_laplace(deepcopy(image), sigma = min_size, mode='constant', cval=0.) \n \n# LoG = - gaussian_laplace(deepcopy(im2), sigma = 2., mode='constant', cval=0.)\n \n filtered = deepcopy(LoG)\n \n# print(LoG.min(),LoG.max(),np.abs(LoG.min()) + thresh )\n \n# print(type(filtered))\n \n #background mean and std of filtered image \n corners = np.zeros([4,5,5])\n corners[0] = LoG[0:5,0:5]\n corners[1] = LoG[-5:,0:5]\n corners[2] = LoG[0:5,-5:]\n corners[3] = LoG[-5:,-5:]\n means = []\n stds = []\n for c in corners:\n mn,med,s = sigma_clipped_stats(c,sigma=3.0)\n means.append(mn)\n stds.append(s)\n \n stds=np.array(stds)\n means = np.array(means)\n means_std = np.std(means)\n# means_good = means[(means >= means.mean() - 1.0 * means_std) & (means <= means.mean() + 1.0 * means_std)]\n means_good = means[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)]\n mean_bg = np.mean(means_good)\n std_bg = np.mean(stds[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)])\n# print('LoG means: {}, Log means std: {}, Log means good: {}, LoG avg mean: {}'.format(means,means_std,means_good,mean_bg))\n# print('min: {}, max: {}, cut: {}'.format(LoG.min(),LoG.max(),mean_bg + thresh))\n# print(LoG.min(),LoG.max(),filtered.min() + thresh)\n \n \n # assume all value below max*threshold can not be maxima, so put all to zero\n# filtered[filtered < thresh*filtered.max()] = 0.\n \n# assume all value below min*threshold can not be maxima, so put all to zero\n# filtered[filtered < filtered.min() + thresh * np.abs(filtered.min())] = 0.\n# filtered[filtered < mean_bg + thresh] = 0.\n filtered[filtered < mean_bg + 6.*std_bg] = 0. #set pixels below the mean + 6x threshold to 0\n \n # find coordinates of local maxima\n #print(int(0.5 * min_size))\n max_idx_2d_small = peak_local_max(filtered, min_distance=0) #All bright pixels\n max_idx_2d_large = peak_local_max(filtered, min_distance=1) #peaks with min size of 1 pixel\n \n x_list_small, y_list_small = max_idx_2d_small[:, 1], max_idx_2d_small[:, 0]\n x_list_large, y_list_large = max_idx_2d_large[:, 1], max_idx_2d_large[:, 0]\n \n im_center_x, im_center_y = len(image) / 2., len(image) / 2. #center of image\n \n if (center_x == None) & (center_y == None):\n new_center_x, new_center_y = im_center_x,im_center_y\n else:\n new_center_x, new_center_y = center_x,center_y #new \"center\" = location of lens galaxy\n \n \n #distance of each detected peak from center\n R_small = np.sqrt((x_list_small - new_center_x)**2 + (y_list_small - new_center_y)**2) \n R_large = np.sqrt((x_list_large - new_center_x)**2 + (y_list_large - new_center_y)**2)\n \n #Contaminant light is only bright pixels further from center than lens_rad\n x_sats, y_sats = x_list_small[R_small > lens_rad], y_list_small[R_small > lens_rad]\n \n if many_sources:\n x_lens, y_lens = deepcopy(x_list_small), deepcopy(y_list_small)\n else:\n x_lens, y_lens = deepcopy(x_list_large), deepcopy(y_list_large)\n \n# x_lens, y_lens = x_list_small[R_small <= lens_rad], y_list_small[R_small <= lens_rad]\n \n if (len(x_lens) == 0) & (len(y_lens) == 0):\n x_lens = [0,15]\n y_lens = [0,15]\n \n sources = QTable([x_lens, y_lens],names={'x_local_peak','y_local_peak'}) #make table of all detected objects\n# print(x_list_large)\n# print(y_list_large)\n# print(sources)\n \n # show maxima on image for debug\n \n if show_locations:\n# fig = plt.figure(figsize=(4, 4))\n #plt.imshow(image, origin='lower', cmap=cmap_flux, norm=LogNorm(1e-2))\n \n f, axes = plt.subplots(1, 5, figsize=(20,5), sharex=False, sharey=False)\n# plt.figure(figsize = (8,8))\n# plt.subplot(1,2,1)\n \n axes[0].imshow(image, origin='lower', norm=SymLogNorm(5))\n axes[0].set_title('Image')\n axes[0].set_axis_off()\n \n \n axes[1].imshow(LoG, origin='lower', norm=SymLogNorm(5))\n axes[1].set_title('LoG Filtered Image')\n axes[1].set_axis_off()\n\n# plt.subplot(1,2,2)\n axes[2].imshow(filtered, origin='lower', norm=SymLogNorm(5))\n axes[2].set_title('Final Filtered Image')\n axes[2].set_axis_off()\n \n axes[3].imshow(image, origin='lower', norm=SymLogNorm(5))\n for i in range(len(x_lens)):\n axes[3].scatter([x_lens[i]], [y_lens[i]], c='red', s=60, marker='+')\n \n for i in range(len(x_list_large)):\n axes[3].scatter([x_list_large[i]], [y_list_large[i]], c='black', s=100, marker='x')\n axes[3].set_title('Detected Objects')\n axes[3].set_axis_off()\n \n axes[4].imshow(image, origin='lower', norm=SymLogNorm(5))\n \n for i in range(len(x_sats)):\n axes[4].scatter([x_sats[i]], [y_sats[i]], c='red', s=60, marker='+')\n \n# plt.annotate(i+1, (x_list[i], y_list[i]), color='black')\n \n# for i in range(len(x_mask)):\n# plt.scatter([x_mask[i]], [y_mask[i]], c='red', s=100, marker='*')\n# plt.annotate(i+1, (x_mask[i], y_mask[i]), color='red')\n axes[4].scatter(new_center_x, new_center_y,c='red', s=100, marker='*')\n \n draw_lens_circle = Circle((new_center_x, new_center_y),lens_rad ,fill=False)\n draw_gal_circle = Circle((new_center_x, new_center_y),gal_rad, fill = False)\n# plt.gcf().gca().add_artist(draw_lens_circle)\n# plt.gcf().gca().add_artist(draw_gal_circle)\n axes[4].add_patch(draw_lens_circle)\n# axes[4].add_patch(draw_gal_circle)\n \n axes[4].set_title('Pixels to Mask: \\n r = {:.3f}'.format(lens_rad_arcsec))\n axes[4].text(1, 1, \"detected components\", color='red')\n axes[4].set_axis_off()\n \n if title != None:\n f.suptitle(title, fontsize = 15)\n# plt.show()\n \n \n return (x_sats, y_sats), (new_center_x, new_center_y), sources", "def find_patch0(self):\n orig_image = central_area_crop(self.outp1, crop_size=(128, 192, 160))\n array_shape = np.array(orig_image.shape) # (128, 192, 160)\n patch_shape = np.array([self.patch_size] * 3) # (128)\n space = np.array([16] * 2, dtype=np.uint8) # (8)\n patch_idx_limit = (array_shape[1:] - patch_shape[1:]) // space # (4, 2)\n # construct an array, then np.argmax()\n patches_array = np.zeros(patch_idx_limit)\n for patch_idx_y in range(patch_idx_limit[0]):\n for patch_idx_x in range(patch_idx_limit[1]):\n patch_idx = np.array([patch_idx_y, patch_idx_x])\n patch_start = space * patch_idx\n patch_end = space * patch_idx + np.array(patch_shape[1:])\n cropped_array = orig_image[:, patch_start[0]:patch_end[0], patch_start[1]:patch_end[1]]\n num_tumor_voxel = (cropped_array > 0).sum()\n\n patches_array[patch_idx_y, patch_idx_x] = num_tumor_voxel\n argsmax = np.argwhere(patches_array == patches_array.max())\n patch_idx = argsmax[np.random.randint(len(argsmax))]\n # best_patch_idx = np.unravel_index(patches_array.argmax(), patches_array.shape)\n\n # convert in coords in the whole image\n orig_shape = np.array([155, 240, 240])\n cur_shape = np.array([128, 192, 160])\n coord_diffs = (orig_shape - cur_shape) // 2\n patch0_START_pt = np.array((0, ) + tuple(patch_idx * space)) + coord_diffs\n return patch0_START_pt", "def match3(img1, img2, coordinates1, coordinates2, PATCH_SIZE, threshold=0.7):\n\n\t#creating patches for all points from img1 and img2\n\tcoord1_patches = [make_patch(coordinate, PATCH_SIZE, img1) for coordinate in coordinates1]\n\tcoord2_patches = [make_patch(coordinate, PATCH_SIZE, img2) for coordinate in coordinates2]\n\n\t# creating a matrix with dissimilarity measures for all pairs\n\tall_matches = np.zeros((len(coordinates1), len(coordinates2)))\n\n\tfor (x, y), _ in np.ndenumerate(all_matches):\n\t\tall_matches[x,y] = count_difference(coord1_patches[x], coord2_patches[y])\n\n\t#looking for best left-to-right and right-to-left matches\n\tmatches = []\n\t#left-to-right\n\tfor i, coord1 in enumerate(coordinates1):\n\t\tbest_ltr_match = np.argmin(all_matches[i, :]) #best left-to-right match for coord1\n\t\tbest_rtl_match = np.argmin(all_matches[:, best_ltr_match]) #best match for a best match\n\t\tif (i == best_rtl_match): #hurray, there is a super match\n\n\t\t\tmatches.append([coord1, coordinates2[best_ltr_match], all_matches[i, best_ltr_match]])\n\t\n\treturn matches", "def stereoWarpK_noMotion_singleSided(curImageInfo, conversionParam, globalParam): \n h, w, u = curImageInfo.originalImageResized.shape # shape after resize\n K = 1\n N = h * w * K\n gr = np.mean(curImageInfo.originalImageResized, 2) # not 3 as it is zero based :3\n grs = cv2.GaussianBlur(gr, (5, 5), 1)\n \n # One heuristic for converting depth to disparity\n disparity0 = imnormalize(1/(1+imnormalize(curImageInfo.depthResized)))*conversionParam.maxDisp - conversionParam.maxDisp/2;\n \n if conversionParam.spatialSmoothnessSwitch == True:\n # Smoothing the depth spatially according to adjacent pixels by using Gx, Gy gradients\n # Vertical and Horizontal Edges\n dx = cv2.filter2D(grs, -1, np.transpose(np.array([[-1, 1, 0]])))\n dy = cv2.filter2D(grs, -1, np.array([[-1, 1, 0]]))\n \n W = ( imnormalize(disparity0) + sigmoid(np.sqrt(np.power(dx, 2) + np.power(dy, 2)), 0.01, 500) ) / 2 \n \n A = np.transpose(spdiags(np.transpose(W).flatten(), 0, N, N, \"csc\") \\\n + (conversionParam.spatialSmoothCoeff_x * globalParam.Gx.transpose() * globalParam.Gx) \\\n + (conversionParam.spatialSmoothCoeff_y * globalParam.Gy.transpose() * globalParam.Gy))\n \n b = np.transpose(W).flatten() * np.transpose(disparity0).flatten()\n \n [x, flag] = cg(A, b, np.transpose(disparity0).flatten(), 5e-1, 50)\n \n disparity = np.transpose(np.reshape(x, (w, h))) # remove (h, w, 1, K)\n else:\n disparity = disparity0\n \n curImageInfo.leftImage = curImageInfo.originalImage\n \n # The -ve sign to convert the white to black and black to white \n warpright = -disparity\n \n # only the warping interp2 is done on the original size image with no resizing to have good estimation\n warpright = cv2.resize(warpright, (curImageInfo.originalImage.shape[1], curImageInfo.originalImage.shape[0]), \n interpolation=cv2.INTER_LINEAR)\n \n curImageInfo.rightImage = (clip(warpImage_v2((curImageInfo.originalImage), (warpright), \n conversionParam.resizeFactor, globalParam.xx, globalParam.yy, globalParam.YY)))\n \n return disparity", "def _find_thresh(im_1, im_2, a, b, thresh_r=0.0):\n if im_1.dtype not in [np.uint16, np.uint8]:\n incr = (im_1.max() - im_1.min()) / 256.0\n else:\n incr = 1\n\n thresh_max = im_1.max()\n thresh_min = im_1.min()\n thresh = thresh_max\n r = _pearsonr_below_thresh(thresh, im_1, im_2, a, b)\n min_r = r\n min_thresh = thresh\n while thresh > thresh_min and r > thresh_r:\n thresh -= incr\n r = _pearsonr_below_thresh(thresh, im_1, im_2, a, b)\n if min_r > r:\n min_r = r\n min_thresh = thresh\n\n if thresh == thresh_min:\n thresh = min_thresh\n\n return thresh", "def _red_detect_(self, nslice = 0, thresh = 2.0):\n zk_1 = 's_' + format(nslice, '03d')\n zk_2 = 's_' + format(nslice+1, '03d')\n\n zf_1 = self.z_dense[zk_1]\n zf_2 = self.z_dense[zk_2]\n\n # extract the y and x coordinates\n y1 = zf_1[:,0]\n x1 = zf_1[:,1]\n\n y2 = zf_2[:,0]\n x2 = zf_2[:,1]\n\n\n # create a meshgrid\n [YC, YR] = np.meshgrid(y2, y1)\n [XC, XR] = np.meshgrid(x2, x1)\n\n\n dist_block = np.sqrt((YC-YR)**2 + (XC-XR)**2)\n red_pair = np.where(dist_block <= thresh) # find out where the distance between cell i in plane k and cell j in plane k+1 is below the threshold.\n\n ind1 = red_pair[0] # the indices in the first frame\n ind2 = red_pair[1] # the indices in the second frame\n\n\n # select those with markers > 0 and markers < 0\n marker_1 = zf_1[ind1, 3]\n\n\n new_idx = (marker_1 == 0) # select those with zero-markers, which are never counted before. These are new cells. marker_1 needs to be updated.\n pool_new = ind1[new_idx] # select the indices in the first frame where new redundancies are detected \n pool_new_cov = ind2[new_idx] # select the indices in the second frame where new redundancies are detected.\n\n\n pool_exist = ind1[~new_idx] # among the detected redundancies, find those already marked.\n pool_exist_cov = ind2[~new_idx] # correspondingly, find those already marked in the adjacent slice\n\n n_new = len(pool_new)\n n_exist = len(pool_exist)\n if self.verbose:\n print(n_new, \"new redundancies, \", n_exist, \"existing redundancies\")\n\n for n_count in np.arange(n_new):\n # build the new keys\n # also, we need to assign each new key an identity number which is unique.\n n_ind1 = pool_new[n_count] # find the indices in the first slice that contains new redundancies\n n_ind2 = pool_new_cov[n_count] # find the indices in the following slice \n pr_number = nslice * 1000 + n_ind1\n pr_key = 'sl_' + str(pr_number) # build a key \n new_sl = Simple_list(nslice) # create a simple list with z_marker = nslice, nslice is the index of the first z-slice \n new_sl.add([nslice, zf_1[n_ind1, 4]])\n new_sl.add([nslice+1, zf_2[n_ind2, 4]])\n zf_1[n_ind1, 3] = pr_number # assign the new pr_number to zf_1\n zf_2[n_ind2, 3] = pr_number # assigne the same new pr_number to zf_2\n\n self.redundancy_pool[pr_key] = new_sl # stored into the redundancy pool\n\n\n for n_count in np.arange(n_exist):\n # search for the existing keys\n n_ind1 = pool_exist[n_count]\n n_ind2 = pool_exist_cov[n_count]\n pr_number = int(zf_1[n_ind1, 3])# catch up the pr_number\n pr_key = 'sl_' + str(pr_number) # this pr_key should already exist in the pool. \n\n self.redundancy_pool[pr_key].add([nslice+1, zf_2[n_ind2, 4]])\n zf_2[n_ind2, 3] = pr_number # update the pr_number in the adjacent slice", "def cal_psnr(im1, im2):\n # assert pixel value range is 0-255 and type is uint8\n mse = ((im1.astype(np.float) - im2.astype(np.float)) ** 2).mean()\n psnr = 10 * np.log10(255 ** 2 / mse)\n return psnr", "def getStereoGoodPixelPercentage(inputPrefix, workDir=''):\n\n # Set up input folder\n inputFolder = os.path.dirname(inputPrefix)\n if not os.path.exists(inputFolder):\n raise Exception('Input folder ' + inputFolder + ' not found!') \n if workDir == '':\n workDir = inputFolder\n\n \n #TODO: Look for goodPixelMap file!\n \n #TODO: Look for later stage estimates!\n \n # If the later stage files were not found, use the integer correlation file \n \n # Extract the third band of the D_sub.tif image which contains a good pixel map\n inputPath = inputPrefix + '-D_sub.tif'\n if not os.path.exists(inputPath):\n raise Exception('Could not find file ' + inputPath)\n convertedImagePath = os.path.join(workDir, 'goodPixelMap-D_sub.tif')\n cmd = 'gdal_translate -of GTiff -ot BYTE -b 3 ' + inputPath + ' ' + convertedImagePath\n print cmd\n os.system(cmd)\n \n # Determine the percentage of good pixels \n cmd = ['gdalinfo', '-hist', convertedImagePath]\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n translateOut, err = p.communicate()\n\n # Parse the gdalinfo output\n bucket = translateOut.find('buckets')\n colon = translateOut.find(':', bucket)\n start = translateOut.find('\\n', colon)\n end = translateOut.find('\\n', start+1)\n buckets = translateOut[start+1:end] # Pick off the string containing the buckets\n numbers = buckets.strip().split(' ')\n \n numBad = int(numbers[0]) # All pixels are in the first (bad) or last (good) buckets\n numGood = int(numbers[-1])\n percentGood = float(numGood) / float(numGood + numBad)\n\n return percentGood", "def patch_average_error(self, image_1, image_2, height, width, center_x, center_y):\n size = tf.constant([height, width], dtype=tf.int32)\n offset = tf.constant([[center_x, center_y]], dtype=tf.float32)\n image_1 = tf.constant(image_1, dtype=tf.float32)\n image_2 = tf.constant(image_2, dtype=tf.float32)\n #print(image_1.get_shape().as_list(), image_2.get_shape().as_list())\n patch_1 = tf.image.extract_glimpse(image_1, size, offset, centered=False, normalized=True)\n patch_2 = tf.image.extract_glimpse(image_2, size, offset, centered=False, normalized=True)\n\n shape_1 = patch_1.get_shape().as_list()\n shape_2 = patch_2.get_shape().as_list()\n assert shape_1 == shape_2, (\n 'Patch to compare must have the same shape'\n )\n patch_1 = tf.squeeze(patch_1)\n patch_2 = tf.squeeze(patch_2)\n mean_pixel_error = tf.reduce_mean(tf.sqrt(tf.square(patch_1-patch_2)))\n\n return mean_pixel_error, patch_1, patch_2", "def compute_psnr_and_ssim(image1, image2, border_size=0):\r\n if len(image1.shape) == 2:\r\n image1 = image1.reshape(image1.shape[0], image1.shape[1], 1)\r\n if len(image2.shape) == 2:\r\n image2 = image2.reshape(image2.shape[0], image2.shape[1], 1)\r\n\r\n if image1.shape[0] != image2.shape[0] or image1.shape[1] != image2.shape[1] or image1.shape[2] != image2.shape[2]:\r\n return None\r\n\r\n image1 = trim_image_as_file(image1)\r\n image2 = trim_image_as_file(image2)\r\n\r\n if border_size > 0:\r\n image1 = image1[border_size:-border_size, border_size:-border_size, :]\r\n image2 = image2[border_size:-border_size, border_size:-border_size, :]\r\n\r\n psnr = peak_signal_noise_ratio(image1, image2, data_range=255)\r\n ssim = structural_similarity(image1, image2, win_size=11, gaussian_weights=True, multichannel=True, K1=0.01, K2=0.03,\r\n sigma=1.5, data_range=255)\r\n return psnr, ssim", "def yolo_pre_filtering(left_img: np.ndarray) -> np.ndarray:\n # Apply the bilateral filter with optimal parameters (described in the report)\n return cv2.bilateralFilter(left_img, 5, 35, 160)", "def compute_derivatives(im1, im2):\n assert im1.shape == im2.shape\n \n Ix = np.empty_like(im1)\n Iy = np.empty_like(im1)\n It = np.empty_like(im1)\n\n #\n # Your code here\n #\n \n # Taken from: Lecture 3 (filtering continued) - Slide 39\n # print(\"Calculating convolutions for derivatives. This might take a while.\")\n # D_x = 1/6 * np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])\n # D_y = 1/6 * np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])\n\n # Vereinfachte Kernel. Haben kein smoothing, nur die Ableitung\n D_x = 1/2 * np.array([1, 0, -1]).reshape((1,3))\n D_y = 1/2 * np.array([1, 0, -1]).reshape((3,1))\n\n \n Ix = convolve2d(im1, D_x, mode=\"same\", boundary=\"symm\")\n Iy = convolve2d(im1, D_y, mode=\"same\", boundary=\"symm\")\n It = im2 - im1\n\n # Debugging\n ## print(\"Following prints should all have the same shape: \")\n ## print(\"shape Im: \", im1.shape)\n ## print(\"shape Ix: \", Ix.shape)\n ## print(\"shape Iy: \", Iy.shape)\n ## print(\"shape It: \", It.shape)\n ## print(\"\\n\")\n\n assert Ix.shape == im1.shape and \\\n Iy.shape == im1.shape and \\\n It.shape == im1.shape\n\n return Ix, Iy, It", "def match_obj_hole(post_grasp_pos_patch,\n pre_grasp_pos_patch,\n post_x=None, post_y=None):\n if post_x is None:\n post_x, post_y, _ = find_single_blob_center(post_grasp_pos_patch)\n obj_center_x = int(pre_grasp_pos_patch.shape[1] / 2)\n obj_center_y = int(pre_grasp_pos_patch.shape[0] / 2)\n\n pre_grasp_pos_patch.dtype = np.int8\n post_grasp_pos_patch.dtype = np.int8\n opt_rot, opt_row_trans, opt_col_trans = 0., 0, 0\n\n old_neg_count = 2048\n neg_count = old_neg_count - 2\n\n is_erosion = False\n erosion_count = 0\n obj_patch = pre_grasp_pos_patch.copy()\n\n # Okay so the matching is actually iterative binary search\n while old_neg_count > neg_count:\n # print(old_neg_count, neg_count)\n\n if neg_count < 1:\n # print('ERODE')\n is_erosion = True\n erosion_count += 1\n kernel = np.ones([3, 3])\n post_grasp_pos_patch.dtype = np.uint8\n post_grasp_pos_patch = cv2.erode(post_grasp_pos_patch,\n kernel,\n iterations=1)\n post_grasp_pos_patch.dtype = np.int8\n neg_count = 2048\n\n old_neg_count = neg_count\n row_trans, col_trans, neg_count_1 = get_opt_translate(obj_img=obj_patch,\n back_img=post_grasp_pos_patch,\n back_center_x=post_x,\n back_center_y=post_y,\n obj_center_x=obj_center_x,\n obj_center_y=obj_center_y,\n prev_row_trans=opt_row_trans,\n prev_col_trans=opt_col_trans,\n is_erosion=is_erosion)\n if neg_count_1 < old_neg_count:\n opt_row_trans = row_trans\n opt_col_trans = col_trans\n rot_res, neg_count_2 = get_opt_rotate(obj_img=pre_grasp_pos_patch,\n back_img=post_grasp_pos_patch,\n back_center_x=post_x + opt_col_trans,\n back_center_y=post_y + opt_row_trans,\n obj_center_x=obj_center_x,\n obj_center_y=obj_center_y,\n prev_rot_angle=opt_rot,\n is_erosion=is_erosion)\n if neg_count_2 < neg_count_1:\n opt_rot = rot_res\n neg_count = min(neg_count_1, neg_count_2)\n obj_patch = ndimage.rotate(pre_grasp_pos_patch, opt_rot, reshape=False)\n return is_erosion, erosion_count, int(opt_row_trans), int(opt_col_trans), opt_rot, post_x, post_y", "def diff_image_feature(image0, image1):\n return 0", "def testComputeImage(self):\n for fiberId in self.detMap.fiberId:\n for fraction in (0.1, 0.5, 0.9):\n yy = self.synthConfig.height*fraction\n if yy == int(yy):\n # Ensure we have a non-integer pixel position,\n # so computeImage and computeKernelImage differ\n yy += 0.5\n wavelength = self.detMap.findWavelength(fiberId, yy)\n image = self.psf.computeImage(fiberId, wavelength)\n kernel = self.psf.computeKernelImage(fiberId, wavelength)\n\n # Image should have xy0 set somewhere in the middle of the larger image\n self.assertNotEqual(image.getX0(), 0)\n self.assertNotEqual(image.getY0(), 0)\n\n # Kernel should have xy0 set to the half-size\n halfSize = (self.size - 1)//2\n self.assertEqual(kernel.getX0(), -halfSize)\n self.assertEqual(kernel.getY0(), -halfSize)\n\n # Centroid on image should be at the point of interest\n xx, yy = self.detMap.findPoint(fiberId, wavelength)\n centroid = calculateCentroid(image)\n self.assertFloatsAlmostEqual(xx, centroid.x, atol=2.0e-2)\n self.assertFloatsAlmostEqual(yy, centroid.y, atol=2.0e-2)\n\n # Centroid on kernel should be zero\n centroid = calculateCentroid(kernel)\n self.assertFloatsAlmostEqual(centroid.x, 0.0, atol=1.0e-7)\n self.assertFloatsAlmostEqual(centroid.y, 0.0, atol=1.0e-7)", "def sliced_wasserstein_distance(real_images,\n fake_images,\n resolution_min=16,\n patches_per_image=64,\n patch_size=7,\n random_sampling_count=1,\n random_projection_dim=7 * 7 * 3,\n use_svd=False):\n height = real_images.shape[1]\n real_images.shape.assert_is_compatible_with([None, None, height, 3])\n fake_images.shape.assert_is_compatible_with(real_images.shape)\n\n # Select resolutions.\n resolution_full = int(height)\n resolution_min = min(resolution_min, resolution_full)\n resolution_max = resolution_full\n # Base loss of detail.\n resolutions = [\n 2**i\n for i in range(\n int(np.log2(resolution_max)),\n int(np.log2(resolution_min)) - 1, -1)\n ]\n\n # Gather patches for each level of the Laplacian pyramids.\n patches_real, patches_fake, patches_test = (\n [[] for _ in resolutions] for _ in range(3))\n for lod, level in enumerate(\n _laplacian_pyramid(real_images, len(resolutions))):\n patches_real[lod].append(\n _batch_to_patches(level, patches_per_image, patch_size))\n patches_test[lod].append(\n _batch_to_patches(level, patches_per_image, patch_size))\n\n for lod, level in enumerate(\n _laplacian_pyramid(fake_images, len(resolutions))):\n patches_fake[lod].append(\n _batch_to_patches(level, patches_per_image, patch_size))\n\n for lod in range(len(resolutions)):\n for patches in [patches_real, patches_test, patches_fake]:\n patches[lod] = _normalize_patches(patches[lod])\n\n # Evaluate scores.\n scores = []\n for lod in range(len(resolutions)):\n if not use_svd:\n scores.append(\n (_sliced_wasserstein(patches_real[lod], patches_test[lod],\n random_sampling_count, random_projection_dim),\n _sliced_wasserstein(patches_real[lod], patches_fake[lod],\n random_sampling_count, random_projection_dim)))\n else:\n scores.append(\n (_sliced_wasserstein_svd(patches_real[lod], patches_test[lod]),\n _sliced_wasserstein_svd(patches_real[lod], patches_fake[lod])))\n return scores" ]
[ "0.6900295", "0.60511535", "0.59079045", "0.58602035", "0.57603073", "0.5694905", "0.5694412", "0.5591769", "0.55177313", "0.5496024", "0.5428157", "0.54159725", "0.5392759", "0.539026", "0.5331411", "0.5300562", "0.52894914", "0.5281148", "0.5274349", "0.5271155", "0.5266134", "0.5263725", "0.52507913", "0.5239199", "0.5213332", "0.52126193", "0.52050513", "0.51967853", "0.5164409", "0.5156257" ]
0.69326293
0
Calculate the cost volume. Each pixel will have D=max_disparity cost values associated with it. Basically for each pixel, we compute the cost of different disparities and put them all into a tensor.
def calculate_cost_volume( left_img: torch.Tensor, right_img: torch.Tensor, max_disparity: int, sim_measure_function: Callable, block_size: int = 9, ): # placeholders H = left_img.shape[0] W = right_img.shape[1] H_offset = block_size//2 W_offset = block_size//2 cost_volume = torch.ones(H, W, max_disparity) * 255 ########################################################################### # Student code begins ########################################################################### for ii in range(H-2*H_offset): for jj in range(W-2*W_offset): left_patch = left_img[ii:ii+block_size,jj:jj+block_size, :] similarity_error_array = np.ones(max_disparity) for kk in range(max_disparity): if jj - kk >= 0: jj_start = max(jj - kk, 0) jj_end = max(jj - kk + block_size, block_size) right_patch = right_img[ii:ii+block_size, jj_start:jj_end, :] similarity_error_array[kk] = sim_measure_function(left_patch, right_patch) else: similarity_error_array[kk] = 255.0 cost_volume[ii+H_offset,jj+W_offset,:] = torch.tensor(similarity_error_array) ########################################################################### # Student code ends ########################################################################### return cost_volume
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def computeNodeVolumes(self):\n for i in np.arange(0,self.ni):\n for j in np.arange(0,self.nj):\n for k in np.arange(0,self.nk):\n \n V = self.dh[0]*self.dh[1]*self.dh[2]\n if (i==0 or i==self.ni-1): V*=0.5\n if (j==0 or j==self.nj-1): V*=0.5\n if (k==0 or k==self.nk-1): V*=0.5\n \n self.node_vol[i][j][k] = V", "def axon_volume_cost(W, D):\n # Make sure self-weights are set to zero\n np.fill_diagonal(W,0)\n # Calculate cost by summing weights with distances\n return (np.triu(W)*D).sum()", "def compute(self):\n # this just initializes all gradients to the vector (0,0,0)\n self.data = [ZERO_GRADIENT] * (self.volume.dim_x * self.volume.dim_y * self.volume.dim_z)\n\n for i in range(1, self.volume.dim_x-1):\n for j in range(1, self.volume.dim_y-1):\n for k in range(1, self.volume.dim_z-1):\n d_x = 0.5 * (self.volume.get_voxel(i+1, j, k) - self.volume.get_voxel(i-1, j, k))\n d_y = 0.5 * (self.volume.get_voxel(i, j+1, k) - self.volume.get_voxel(i, j-1, k))\n d_z = 0.5 * (self.volume.get_voxel(i, j, k+1) - self.volume.get_voxel(i, j, k-1))\n self.set_gradient(i, j, k, VoxelGradient(d_x, d_y, d_z))", "def cost(self) -> float:", "def local_soft_argmin(cost_volume, sigma):\n # type: (torch.Tensor, int) -> torch.Tensor\n if cost_volume.dim() != 4:\n raise ValueError('expected 4D input (got {}D input)'\n .format(cost_volume.dim()))\n\n if not isinstance(sigma, int):\n raise TypeError('argument \\'sigma\\' must be int, not {}'.format(type(sigma)))\n\n # grab max disparity\n max_disp = cost_volume.shape[1]\n N = cost_volume.size()[0]\n H = cost_volume.size()[2]\n W = cost_volume.size()[3]\n\n # d':|d'-d|<=sigma, d' = argmax( P(d) for d in 1:maxDisp ), (BatchSize, 1, Height, Width)\n index = torch.argmax(cost_volume, dim=1, keepdim=True)\n interval = torch.linspace(-sigma, sigma, 2 * sigma + 1).type_as(index).to(cost_volume.device)\n interval = interval.repeat(N, H, W, 1).permute(0, 3, 1, 2).contiguous()\n # (BatchSize, 2*sigma+1, Height, Width)\n index_group = (index + interval)\n\n # get mask in [0, max_disp)\n mask = ((index_group >= 0) & (index_group < max_disp)).detach().type_as(cost_volume)\n index_group = index_group.clamp(0, max_disp - 1)\n\n # gather values in the index_group\n disp_map = torch.gather(cost_volume, dim=1, index=index_group)\n\n # convert index_group from torch.LongTensor to torch.FloatTensor\n index_group = index_group.type_as(cost_volume)\n\n # d * P(d), and mask out index out of [0, max_disp), (BatchSize, 1, Height, Width)\n # if index in [0, max_disp), keep the original disparity value, otherwise -10000.0, as e(-10000.0) approximate 0.0\n disp_map = F.softmax((disp_map * mask + (1 - mask) * (-10000.0)), dim=1)\n disp_map = (disp_map * index_group).sum(dim=1, keepdim=True)\n\n return disp_map", "def variable_costs(dh: DataHandler):\n print(\"PtHydrogen not implemented\")\n\n scen_hor_map = dh.scenarios.horizon\n\n cost_var = dh.get(\"i_cost\").xs(\"varcost\", level=\"par_cost\")\n cost_var = cost_var.groupby([\"alltec\"]).apply(\n extract_horizon_specific_cost, scen_hor_map\n )\n cost_var = add_dimension(cost_var, dh.merge_stored_sets(\"r\"), \"r\")\n cost_var = cost_var.reorder_levels([\"alltec\", \"r\"])\n\n h2_price = dh.get(\"o_h2price_buy\")\n h2_price = add_dimension(h2_price, dh.merge_stored_sets(\"tec_h2g\"), \"alltec\")\n\n elec_price = dh.get(\"o_prices\")\n\n cost_fuel = dh.get(\"cost_fuel\")\n cost_fuel = add_dimension(cost_fuel, dh.merge_stored_sets(\"r\"), \"r\")\n cost_fuel = cost_fuel.reorder_levels([\"alltec\", \"r\"])\n\n cost_fuel.loc[h2_price.index, :] = h2_price\n\n eff = dh.get(\"eff\")\n\n co2_int = dh.get(\"co2_int\").div(1000)\n\n co2_price = dh.get(\"o_co2price\")\n\n co2_costs = co2_int * co2_price\n co2_costs.index.names = [\"alltec\", \"r\"]\n\n var_cost = (\n cost_fuel.add(co2_costs, fill_value=0).div(eff).add(cost_var, fill_value=0)\n )\n\n return var_cost", "def total_cost(self):\n return np.einsum('i->', self.c[self.s])", "def cost(self):\n\t\treturn self.g + self.h", "def _get_cost(self):\n logging.info(\"Cost: {}\".format(self.cost_function.name))\n\n with tf.name_scope(\"cost\"):\n\n if self.cost_function == Cost.BATCH_DICE_LOG or self.cost_function == Cost.BATCH_DICE_SOFT or \\\n self.cost_function == Cost.BATCH_DICE_SOFT_CE:\n # calculate Dice loss over the complete batch (take batch as pseudo 3d Tensor)\n if self._n_class == 1:\n # if nr classes is 1 axis 3 has only one component\n axis = (0, 1, 2, 3)\n else:\n axis = (0, 1, 2)\n else:\n # compute dice for each slice and take average (normally not used but considered as option)\n if self._n_class == 1:\n axis = (1, 2, 3)\n else:\n axis = (1, 2)\n # flatten input and outpout\n flat_logits = tf.reshape(self.logits, [-1, self._n_class])\n flat_labels = tf.reshape(self.y, [-1, self._n_class])\n\n # cross entropy loss\n if self.cost_function == Cost.CROSS_ENTROPY:\n # if class weights are None cross entropy will not be weighted\n loss = tfu.get_cross_entropy(logits=flat_logits, y=flat_labels, n_class=self._n_class,\n weights=self._class_weights_ce)\n # Dice loss\n elif self.cost_function == Cost.DICE_SOFT or self.cost_function == Cost.BATCH_DICE_SOFT:\n loss = 1.0 - tfu.get_dice_loss(logits=self.logits, y=self.y, axis=axis,\n weights=self._class_weights_dice, exclude_zero_label=False)\n # Weighted combination of dice and cross entropy\n elif self.cost_function == Cost.DICE_SOFT_CE or self.cost_function == Cost.BATCH_DICE_SOFT_CE:\n loss = self._loss_weight * (1.0 - tfu.get_dice_loss(logits=self.logits, y=self.y, axis=axis,\n weights=self._class_weights_dice,\n exclude_zero_label=False))\n loss += (1.0 - self._loss_weight) * tfu.get_cross_entropy(logits=flat_logits, y=flat_labels,\n n_class=self._n_class,\n weights=self._class_weights_ce)\n # Dice log loss (-log(dice_score)). Considered to have nicer gradient.\n # But seems to be not realy more valuable in real life\n elif self.cost_function == Cost.DICE_LOG or self.cost_function == Cost.BATCH_DICE_LOG:\n loss = tfu.get_dice_log_loss(self.logits, self.y, axis=axis, exclude_zero_label=False)\n\n # MSE loss used for regression tasks\n elif self.cost_function == Cost.MSE:\n loss = tf.losses.mean_squared_error(flat_logits, flat_labels)\n\n # TV loss (MSE + total variation of output as regularizer). Seems to not work very\n elif self.cost_function == Cost.TV:\n loss = tf.losses.mean_squared_error(flat_logits, flat_labels)\n tv = tf.reduce_sum(tf.image.total_variation(self.logits))\n loss += self._tv_regularizer * tv\n else:\n raise ValueError(\"Unknown cost function: \" % self.cost_function.name)\n\n # if value for l1 or l2 regularizer is given add them to the loss\n if self._l2_regularizer is not None:\n self.l2regularizers = self._l2_regularizer * sum(\n [tf.nn.l2_loss(variable) for variable in self.variables])\n loss += self.l2regularizers\n if self._l1_regularizer is not None:\n self.l1regularizers = self._l1_regularizer * sum([\n tf.reduce_sum(tf.abs(variable)) for variable in self.variables])\n loss += self.l1regularizers\n\n return loss", "def cost_func(plist):\n\t\tgamma, alpha = plist\n\t\tk = ac.Moffat2DKernel(gamma, alpha, x_size=nx, y_size=ny)\n\n\t\tarr_out_predict = ac.convolve(arr_in, k)\n\n\t\tarr_out_fit, arr_out_predict_fit = match_dimension(arr_out, arr_out_predict)\n\t\tdiff = (arr_out_fit - arr_out_predict_fit)*scale_factor\n\n\t\treturn np.sum(diff**2)/diff.size", "def compute_cost(self, del_u : list, u : list):\n print(\"ym: \", self.ym, \"yn: \", self.yn)\n self.cost = 0.0\n\n self.ym = self.d_model.ym\n self.yn = self.d_model.yn\n\n # FIXME : this is supposed to be from N1 to N2\n self.cost+= (self.ym[0] - self.yn[0])\n angle_diff = (self.ym[1] - self.yn[1])\n if angle_diff > np.pi:\n angle_diff -= 2*np.pi\n if angle_diff < -np.pi:\n angle_diff += 2*np.pi\n self.cost += angle_diff\n\n for j in range(self.Nu):\n self.cost += (self.ym[j] - self.yn[j])**2\n\n for j in range(self.Nu):\n self.cost += self.lambd[j]*(del_u[j])**2\n\n for j in range(self.Nu):\n self.cost += self.s / (u[j] + self.r / 2.0 - self.b) + self.s / (self.r/2.0 + self.b - u[j]) - 4.0 / self.r\n\n return self.cost", "def compute_cost(im1, im2):\n assert im1.shape == im2.shape\n\n d = 0.0\n #\n # Your code here\n #\n\n # The minimized cost function should be SSD. Slide 36\n # Code taken from assignment 4\n d = np.sum((im2 - im1)**2)\n\n assert isinstance(d, float)\n return d", "def get_cost_updates(self):\n\n y = self.get_hidden_values()\n z = self.get_reconstructed_input(y)\n\n L = T.sum((self.x-z)**2, axis=1)\n\n cost = T.mean(L)\n\n return cost", "def cost(self, output, labels, weights):\n return tf.multiply(0.5 * tf.square(output - labels), weights)", "def getCost(dat, rateBlocks, key=\"Lintel\"):\n\n x = dat[:]\n\n if key == \"Lintel\":\n edges = [s*100 for s in [5, 6, 7, 10,\n 11, 12, 15, 16, 17, 20, 21, 22, 25]]\n else:\n edges = [s*100 for s in [10, 11, 12, 15, 16, 17, 20,\n 21, 22, 25, 26, 27, 30, 31, 32, 35, 36, 37, 40]]\n for i in edges:\n if i >= x[2]:\n x[2] = i\n break\n\n vol = x[0]*600*x[2]/float(1000000000)\n return vol*rateBlocks # *x[3]", "def get_reconstruction_cost(self, nv):\n cross_entropy = T.mean(\n T.sum(\n self.input * T.log(nv) +\n (1 - self.input) * T.log(1 - nv),\n axis=1\n )\n )\n return cross_entropy", "def cost(self, boards, labels):\n return self._cost(boards, labels, volatile=True).data.numpy()", "def calc_cost(self):\n cost = 0\n for i,[source, sinks] in enumerate(self.nets):\n self.costs[i] = self.calc_half_perimeter(source, sinks)\n cost += self.costs[i]\n self.cost = cost\n return True", "def get_reconstruction_cost(self, updates, pre_sigmoid_nv):\r\n\r\n cross_entropy = T.mean(\r\n T.sum(self.input * T.log(T.nnet.sigmoid(pre_sigmoid_nv)) +\r\n (1 - self.input) * T.log(1 - T.nnet.sigmoid(pre_sigmoid_nv)),\r\n axis=1))\r\n\r\n return cross_entropy", "def DCLoss(img, opt):\n maxpool = nn.MaxPool3d((3, opt.patch_size, opt.patch_size), stride=1, padding=(0, opt.patch_size//2, opt.patch_size//2))\n dc = maxpool(1-img[:, None, :, :, :])\n \n target = torch.FloatTensor(dc.shape).zero_().cuda(opt.gpu_ids[0])\n \n loss = L1Loss(reduction='sum')(dc, target)\n return -loss", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def compute_dsc(estimated_tract, true_tract):\r\n aff=np.array([[-1.25, 0, 0, 90],[0, 1.25, 0, -126],[0, 0, 1.25, -72],[0, 0, 0, 1]])\r\n #aff=utils.affine_for_trackvis(voxel_size=np.array([1.25,1.25,1.25]))\r\n voxel_list_estimated_tract = streamline_mapping(estimated_tract, affine=aff).keys()\r\n voxel_list_true_tract = streamline_mapping(true_tract, affine=aff).keys()\r\n TP = len(set(voxel_list_estimated_tract).intersection(set(voxel_list_true_tract)))\r\n vol_A = len(set(voxel_list_estimated_tract))\r\n vol_B = len(set(voxel_list_true_tract))\r\n DSC = 2.0 * float(TP) / float(vol_A + vol_B)\r\n return DSC", "def compute(self) -> Tensor:\n\n if self.samples:\n return self.average_precisions.float() / self.total\n else:\n # pred_image_indices = torch.cat(self.pred_image_indices, dim=0)\n pred_probs = torch.cat(self.pred_probs, dim=0)\n pred_labels = torch.cat(self.pred_labels, dim=0)\n pred_bboxes = torch.cat(self.pred_bboxes, dim=0)\n\n # target_image_indices = torch.cat(self.target_image_indices, dim=0)\n target_labels = torch.cat(self.target_labels, dim=0)\n target_bboxes = torch.cat(self.target_bboxes, dim=0)\n\n # pred_index = torch.nonzero((pred_labels == 1))\n # pred_probs = pred_probs[pred_index]\n # pred_bboxes = pred_bboxes[pred_index]\n # target_index = torch.nonzero((target_labels == 1))\n # target_bboxes = target_bboxes[target_index]\n\n\n # _, index_sorted = torch.sort(pred_probs)\n # pred_bboxes = pred_bboxes[index_sorted].cpu().detach().numpy()\n # target_bboxes = target_bboxes.cpu().detach().numpy()\n pred_probs = pred_probs.cpu().detach().numpy()\n pred_labels = pred_labels.cpu().detach().numpy()\n pred_bboxes = pred_bboxes.cpu().detach().numpy()\n target_labels = target_labels.cpu().detach().numpy()\n target_bboxes = target_bboxes.cpu().detach().numpy()\n\n pred_probs = pred_probs[pred_labels == 1]\n pred_bboxes = pred_bboxes[pred_labels == 1]\n target_bboxes = target_bboxes[target_labels == 1]\n\n preds_sorted_idx = np.argsort(pred_probs)[::-1]\n pred_bboxes = pred_bboxes[preds_sorted_idx]\n\n x, y = calculate_precision_recall(target_bboxes, pred_bboxes)\n\n if len(x) >= 2:\n return auc(x, y)\n else:\n return 0\n\n # return mean_average_precision(\n # pred_image_indices,\n # pred_probs,\n # pred_labels,\n # pred_bboxes,\n # target_image_indices,\n # target_labels,\n # target_bboxes,\n # self.iou_threshold,\n # self.ap_calculation,\n # )", "def fixed_cost(self):\n return np.einsum('i->', self.c[self.f])", "def calc_cost(self):\n \n correct_pred = tf.equal(self.predictions, tf.argmax(self.y,1))\n batchaccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) \n return self.cost, batchaccuracy, self.predictions", "def calculate_costs(self):\n cost_matrix = self.make_cost_matrix()\n \n if self.greedy:\n # Riesen et al., \"Greedy Graph Edit Distance\"\n costs = []\n psi = []\n \n for row in range(self.N):\n phi = self.M\n row_min = sys.maxint\n for column in range(self.N+self.M):\n if column not in psi:\n if cost_matrix[row, column] < row_min:\n row_min = cost_matrix[row, column]\n phi = column\n \n costs.append(row_min)\n if phi < self.M:\n psi.append(phi)\n \n for row in range(self.N, self.N+self.M):\n if (row - self.N) not in psi:\n costs.append(cost_matrix[row, row - self.N])\n else:\n # Riesen & Bunke, \"Approximate graph edit distance computation by means of bipartite graph matching\"\n row_ind, col_ind = optimize.linear_sum_assignment(cost_matrix)\n \n if self.verbose:\n for row, column in (row_ind, col_ind):\n value = cost_matrix[row, column]\n print '%d, %d, %.4f' % (row, column, value)\n \n return row_ind, col_ind, cost_matrix[row_ind, col_ind]", "def compute_cost(Z6, Y):\n\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Z6, labels=Y))\n\n return cost", "def cost(self, state: Grid2D.State): # pylint: disable=no-self-use\n return 1", "def _dcg(target: Tensor) ->Tensor:\n denom = torch.log2(torch.arange(target.shape[-1], device=target.device) + 2.0)\n return (target / denom).sum(dim=-1)", "def volume(nodes, graph):\n ###TODO\n pass" ]
[ "0.5892088", "0.5845971", "0.5578201", "0.5558536", "0.54403883", "0.542479", "0.5368592", "0.5283514", "0.5274385", "0.5272654", "0.52501583", "0.5215531", "0.5213679", "0.51815045", "0.5129831", "0.5067275", "0.5061512", "0.50612044", "0.50498176", "0.5039792", "0.50319874", "0.5028921", "0.50277007", "0.50183135", "0.49972108", "0.49858943", "0.49671596", "0.49528718", "0.49519718", "0.49495083" ]
0.6094134
0
Verify the init calls the ResponseRetriever and assigns it to a variable.
def test_init_creates_retriever(self, mock_retriever): mediator = GenericMediator() with self.subTest(): mock_retriever.assert_called_once_with() self.assertIsNotNone(mediator.retriever)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_init_creates_retriever(self, mock_retriever):\n\n mediator = AuthenticationMediator()\n with self.subTest():\n mock_retriever.assert_called_once_with()\n self.assertIsNotNone(mediator.retriever)", "def test_init_creates_retriever(self, mock_retriever):\n\n mediator = TimeSeriesMediator()\n with self.subTest():\n mock_retriever.assert_called_once_with()\n self.assertIsNotNone(mediator.retriever)", "def test__init__(self):\n mocked_reconstructor = Mock()\n mocked_reconstructor.reconstruct.return_value = 'mocked'\n db_response = {'Item': {'test': True}}\n resp = GetResponse(db_response, mocked_reconstructor)\n assert resp.item == 'mocked'\n mocked_reconstructor.reconstruct.assert_called_with(db_response['Item'])", "def testInitializer(self):\n request = http.HttpRequest()\n\n data, check, mutator = initialize.MELANGE_INITIALIZER.initialize(\n request, [], {})\n self.assertEqual(request, data.request)\n self.assertEqual(data, check.data)\n self.assertEqual(data, mutator.data)", "def test_set_data(self, mock_retriever):\n\n mock_retriever.get_response_json.return_value = SAMPLE_RESPONSE\n mediator = AuthenticationMediator(**NO_METRIC_REQUEST_PARAMS)\n mediator.get_results()\n\n with self.subTest():\n mock_retriever.assert_called_once()", "def setup_response_collector(self):\n pass", "def test_init_creates_retriever(self, mock_retriever):\n\n mediator = SessionDetailMediator(**DEFAULT_SESSION_DETAIL_PARAMS)\n with self.subTest():\n mock_retriever.assert_called_once_with(**DEFAULT_SESSION_DETAIL_PARAMS)\n with self.subTest():\n self.assertIsNotNone(mediator.retriever)", "def __init__(self, get_response):\n self.get_response = get_response\n\n self.filter = getattr(settings, \"IPINFO_FILTER\", self.is_bot)\n\n ipinfo_token = getattr(settings, \"IPINFO_TOKEN\", None)\n ipinfo_settings = getattr(settings, \"IPINFO_SETTINGS\", {})\n self.ip_selector = getattr(\n settings, \"IPINFO_IP_SELECTOR\", DefaultIPSelector()\n )\n self.ipinfo = ipinfo.getHandlerAsync(ipinfo_token, **ipinfo_settings)", "def test_init_method_with_base_response(self):\n response = Response(\"https://scrapy.org\")\n l = TestItemLoader(response=response)\n self.assertIs(l.selector, None)", "def __init__(self, get_response):\n if not settings.PRODUCTION_ENVIRONMENT and not settings.TESTING:\n self.get_response = get_response\n else:\n raise MiddlewareNotUsed()", "def __init__(self):\n\n super().__init__()\n\n self.__current_request_mock = None", "def test__init__keyerror(self):\n mocked_reconstructor = Mock()\n db_response = {}\n resp = GetResponse(db_response, mocked_reconstructor)\n assert resp.item == None", "def __init__(self, res):\n self.fromResponseObj(res)", "def __init__(self, res):\n self.fromResponseObj(res)", "def _init_(self):\n self.res = {}", "def get_init_response():\n speechOutput = GET_INIT_MESSAGE \n\n return response(speech_response_ssml(speechOutput, False))", "def __init__(self, request: Request, response: Response):\n self.request = request\n self.response = response", "def __init__(self, response):\n self.response = response\n self.object = response['object']\n self.webhook_endpoint_id = response['webhook_endpoint_id']\n self.created_at = response['created_at']\n self.updated_at = response['updated_at']\n self.status = response['status']\n self.url = response['url']\n self.events = response['events']\n self.livemode = response['livemode']\n self.secret = response['secret']", "def __init__(self):\n self._init_site_specifications_()\n\n self.my_params = None # parameters for site requests\n self.rates = None # exchange rates from the site\n self.timeout = 1 # url response timeout in seconds\n\n # retrieved rates validity\n self.valid_from_utc = None\n self.valid_to_utc = None\n\n self.in_ccode = None\n self.response_success = False", "def __init__(self):\n self.setup_called = False", "def test_init(self):\n\n class TestResource(BaseResource):\n\n name = 'test_resource'\n\n def process(self, message):\n pass\n\n api = Mock()\n api.endpoint = 'http://an_endpoint'\n route = '/a_route'\n TestResource.init(api, route)\n\n # validate the attribute values of the class\n self.assertEqual(api, TestResource.api)\n self.assertEqual(route, TestResource.route)\n self.assertEqual(api.mongodb, TestResource.mongodb)\n self.assertEqual(api.conf, TestResource.conf)\n self.assertEqual('http://an_endpoint/a_route', TestResource.endpoint)\n self.assertEqual('test_resource', TestResource.logger.name)", "def request_initialization(self) -> global___Snippet.SimpleRequestInitialization:", "def request_initialization(self) -> global___Snippet.SimpleRequestInitialization:", "def request_initialization(self) -> global___Snippet.SimpleRequestInitialization:", "def request_initialization(self) -> global___Snippet.SimpleRequestInitialization:", "def test_setup(self):\n assert self.http_handler.setup() is None\n self.assert_quantity_in_outbox(0)", "def test01_create_and_initialize(self):\n h = mockedLDPHandler()\n self.assertTrue(h)\n self.assertEqual(h._request_links, None)\n self.assertTrue(isinstance(h.response_links, ResponseLinks))", "def test_init_param(self):\n # Set a new version\n version = \"v3\"\n api_url = self.get_api_url(api_version=version)\n\n # Setup the mocked response\n responses.add(responses.GET, api_url, json=self.valid_response,\n status=200, match_querystring=False)\n\n acme = ACMEAccount(client=self.client, api_version=version)\n data = acme.all(self.org_id)\n\n # Verify all the query information\n # There should only be one call the first time \"all\" is called.\n # Due to pagination, this is only guaranteed as long as the number of\n # entries returned is less than the page size\n self.assertEqual(len(responses.calls), 1)\n self.match_url_with_qs(responses.calls[0].request.url, api_url=api_url)\n self.assertEqual(data, self.valid_response)", "def test_init(self):\n self.view.__init__()\n self.assertIsInstance(self.view.questionnaire, Questionnaire)\n self.assertEqual(self.view.questionnaire, self.questionnaire)", "def setUp(self):\n self.response = self.s.get(self.url, params=self.params)" ]
[ "0.6763478", "0.650543", "0.6442508", "0.6405172", "0.6388726", "0.6354988", "0.6319423", "0.62481976", "0.6236849", "0.62093467", "0.6205554", "0.62045234", "0.61486846", "0.61486846", "0.6147114", "0.60797095", "0.6067892", "0.6054257", "0.60442466", "0.60230684", "0.59969366", "0.5986984", "0.5986984", "0.5986984", "0.5986984", "0.5906506", "0.58918834", "0.5876022", "0.5862522", "0.5848097" ]
0.6619355
1
Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context).
def _get_lines_from_file(filename, lineno, context_lines): try: source = open(filename).readlines() lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines pre_context = \ [line.strip('\n') for line in source[lower_bound:lineno]] context_line = source[lineno].strip('\n') post_context = \ [line.strip('\n') for line in source[lineno + 1:upper_bound]] return lower_bound, pre_context, context_line, post_context except (OSError, IOError): return None, [], None, []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None):\n source = None\n if loader is not None and hasattr(loader, \"get_source\"):\n with suppress(ImportError):\n source = loader.get_source(module_name)\n if source is not None:\n source = source.splitlines()\n if source is None:\n with suppress(OSError, IOError):\n with open(filename, \"rb\") as fp:\n source = fp.read().splitlines()\n if source is None:\n return None, [], None, []\n try:\n # If we just read the source from a file, or if the loader did not\n # apply tokenize.detect_encoding to decode the source into a Unicode\n # string, then we should do that ourselves.\n if isinstance(source[0], bytes):\n encoding = \"ascii\"\n for line in source[:2]:\n # File coding may be specified. Match pattern from PEP-263\n # (http://www.python.org/dev/peps/pep-0263/)\n match = re.search(br\"coding[:=]\\s*([-\\w.]+)\", line)\n if match:\n encoding = match.group(1).decode(\"ascii\")\n break\n source = [str(sline, encoding, \"replace\") for sline in source]\n\n lower_bound = max(0, lineno - context_lines)\n upper_bound = lineno + context_lines\n\n pre_context = source[lower_bound:lineno]\n context_line = source[lineno]\n post_context = source[lineno + 1 : upper_bound]\n\n return lower_bound, pre_context, context_line, post_context\n except Exception as e:\n try:\n context_line = f'<There was an error displaying the source file: \"{repr(e)}\" The loaded source has {len(source)} lines.>'\n except Exception:\n context_line = \"<There was an error displaying the source file. Further, there was an error displaying that error>\"\n return lineno, [], context_line, []", "def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):\n source = None\n if loader is not None and hasattr(loader, \"get_source\"):\n source = loader.get_source(module_name)\n if source is not None:\n source = source.splitlines()\n if source is None:\n try:\n f = open(filename)\n try:\n source = f.readlines()\n finally:\n f.close()\n except (OSError, IOError):\n pass\n if source is None:\n return None, [], None, []\n\n encoding = 'ascii'\n for line in source[:2]:\n # File coding may be specified. Match pattern from PEP-263\n # (http://www.python.org/dev/peps/pep-0263/)\n match = re.search(r'coding[:=]\\s*([-\\w.]+)', line)\n if match:\n encoding = match.group(1)\n break\n source = [unicode(sline, encoding, 'replace') for sline in source]\n\n lower_bound = max(0, lineno - context_lines)\n upper_bound = lineno + context_lines\n\n pre_context = [line.strip('\\n') for line in source[lower_bound:lineno]]\n context_line = source[lineno].strip('\\n')\n post_context = [line.strip('\\n') for line in source[lineno+1:upper_bound]]\n\n return lower_bound, pre_context, context_line, post_context", "def get_source_lines(self, filename, lineno, context=0):\n if not filename or not lineno:\n return ''\n\n return ''.join([' ' + linecache.getline(filename, line) for line in range(lineno - context, lineno + context + 1)])", "def line_offsets(fname):\n line_offset = []\n offset = 0\n for _, line in enumerate( open(fname) ):\n line_offset.append(offset)\n offset += len(line)\n return line_offset", "def _diffContext(diff, n=3):\n nlines = len(diff)\n clines = set() # set of lines to include\n for i, line in enumerate(diff):\n if line[0] != ' ':\n clines |= set(range(max(0, i-n), min(i+n+1, nlines)))\n context = []\n clines = list(clines)\n clines.sort()\n last = -1\n for i in clines:\n if i != last+1:\n context.append(\" ...\\n\")\n context.append((\"%4d: \"%i) + diff[i])\n last = i\n if clines[-1] != nlines-1:\n context.append(\" ...\\n\")\n return context", "def line_range(self) -> Tuple[int, int]:\n if self._line_range is None:\n node_extent = self.node.extent\n comment_extent = self.node.comment_extent\n if comment_extent.start.file is None:\n comment_extent = node_extent\n\n self._line_range = (\n min(node_extent.start.line, comment_extent.start.line),\n max(node_extent.end.line, comment_extent.end.line),\n )\n\n return self._line_range", "def findlinestarts(code):\n byte_increments = [ord(c) for c in code.co_lnotab[0::2]]\n line_increments = [ord(c) for c in code.co_lnotab[1::2]]\n result = []\n lastlineno = None\n lineno = code.co_firstlineno\n addr = 0\n for byte_incr, line_incr in zip(byte_increments, line_increments):\n if byte_incr:\n if lineno != lastlineno:\n result.append((addr, lineno))\n lastlineno = lineno\n addr += byte_incr\n lineno += line_incr\n if lineno != lastlineno:\n result.append((addr, lineno))\n return result", "def outerLineno2():\n cf = inspect.currentframe()\n return cf.f_back.f_back.f_back.f_lineno", "def getlineno(frame):\r\n # FrameType.f_lineno is now a descriptor that grovels co_lnotab\r\n return frame.f_lineno", "def get_linepos(self, pos):\n lnum, cnum = self._get_linepos(pos)\n return lnum + self.LINE_NUM_BASE, cnum", "def linenum(self):\n return self.source_frame_stack.linenum()", "def findlinestarts(code):\n byte_increments = [ord(c) for c in code.co_lnotab[0::2]]\n line_increments = [ord(c) for c in code.co_lnotab[1::2]]\n\n lastlineno = None\n lineno = code.co_firstlineno\n addr = 0\n for byte_incr, line_incr in zip(byte_increments, line_increments):\n if byte_incr:\n if lineno != lastlineno:\n yield (addr, lineno)\n lastlineno = lineno\n addr += byte_incr\n if line_incr >= 0x80:\n # line_increments is an array of 8-bit signed integers\n line_incr -= 0x100\n lineno += line_incr\n if lineno != lastlineno:\n yield (addr, lineno)", "def lineno():\n linenum = inspect.currentframe().f_back.f_lineno\n frameinfo = inspect.getframeinfo(inspect.currentframe())\n filename = frameinfo.filename\n return str(\"File: \" + str(filename) + \" Line: \" + str(linenum))", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def currentLineno():\n cf = inspect.currentframe()\n return cf.f_back.f_lineno", "def get_lineno(self):\n return self.lexer.get_lineno()", "def lineno():\n\treturn inspect.currentframe().f_back.f_lineno", "def lineno():\r\n\treturn inspect.currentframe().f_back.f_lineno", "def GetLineno():\n return inspect.currentframe().f_back.f_lineno", "def _get_linepos(self, pos):\n t = self.input\n if pos < 0 or pos > len(t):\n raise IndexError(\"position %d not in 0..%d\" % (pos, len(t)))\n\n lpc = self.__linepos\n\n # Locate the smallest known line index whose end is at or after p.\n def locate(p):\n self._update_linetab(p)\n lo = 0\n hi = len(lpc) - 1\n if lpc[hi] < p:\n return hi\n\n # Invariant: lpc[lo] < p; lpc[hi] >= p\n while lo + 1 < hi:\n mid = (lo + hi) // 2\n if lpc[mid] > p: hi = mid\n elif lpc[mid] < p: lo = mid\n else: return mid - 1\n return hi - 1\n\n lnum = locate(pos)\n start, end = self._get_linespan(lnum)\n cnum = pos - start\n return lnum, cnum", "def extract_lines(infile):\n with open(infile, 'r') as src:\n return read_on(get_line, src)", "def get_frame_info(tb, context_lines=7):\n # line numbers / function / variables\n lineno = tb.tb_lineno\n function = tb.tb_frame.f_code.co_name\n variables = tb.tb_frame.f_locals\n\n # get filename\n fn = tb.tb_frame.f_globals.get('__file__')\n if not fn:\n fn = _os.path.realpath(\n _inspect.getsourcefile(tb) or _inspect.getfile(tb)\n )\n if fn[-4:] in ('.pyc', '.pyo'):\n fn = fn[:-1]\n\n # module name\n modname = tb.tb_frame.f_globals.get('__name__')\n\n # get loader\n loader = tb.tb_frame.f_globals.get('__loader__')\n\n # sourcecode\n try:\n if not loader is None:\n source = loader.get_source(modname)\n else:\n source = file(fn).read()\n except (SystemExit, KeyboardInterrupt):\n raise\n except:\n source = ''\n pre_context, post_context = [], []\n context_line, context_lineno = None, None\n else:\n parser = PythonParser(source)\n parser.parse()\n parsed_source = parser.get_html_output()\n lbound = max(0, lineno - context_lines - 1)\n ubound = lineno + context_lines\n try:\n context_line = parsed_source[lineno - 1]\n pre_context = parsed_source[lbound:lineno - 1]\n post_context = parsed_source[lineno:ubound]\n except IndexError:\n context_line = None\n pre_context = post_context = [], []\n context_lineno = lbound\n\n return {\n 'tb': tb,\n 'filename': fn,\n 'loader': loader,\n 'function': function,\n 'lineno': lineno,\n 'vars': variables,\n 'pre_context': pre_context,\n 'context_line': context_line,\n 'post_context': post_context,\n 'context_lineno': context_lineno,\n 'source': source\n }", "def lineno():\n\n return inspect.currentframe().f_back.f_lineno", "def get_lines_in_file(config_file):\n lines = []\n\n line = config_file.readline()\n lines.append([1, line])\n\n line_counter = 1\n while line:\n line = config_file.readline()\n if not (line.lstrip().startswith(\"#\")):\n lines.append([line_counter, line])\n\n line_counter += 1\n\n return lines", "def outerLineno():\n cf = inspect.currentframe()\n return cf.f_back.f_back.f_lineno" ]
[ "0.68238574", "0.66814256", "0.6353968", "0.60285145", "0.56442934", "0.55935204", "0.5548546", "0.55324066", "0.5492608", "0.5492226", "0.5484061", "0.5471936", "0.5433974", "0.54219884", "0.54219884", "0.54219884", "0.54219884", "0.54219884", "0.54219884", "0.5399224", "0.5389617", "0.5383173", "0.5371696", "0.53687406", "0.533369", "0.53265804", "0.5317886", "0.5297474", "0.52922", "0.5284879" ]
0.75511456
0
Counts the number of mines in the neighboring cells
def count_neighbor_mines(self, i, j): n_neighbor_mines = -1 if not self.mines[i, j]: n_neighbor_mines = np.count_nonzero( self.mines[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2]) return n_neighbor_mines
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_neighbor_mines(self, x, y):\n\t\treturn sum(self.mines[n][m] for (n, m) in self.get_valid_neighbors(x, y))", "def count_mines(row, col):\r\n total = 0\r\n for r,c in ((-1,-1),(-1,0),(-1,1),(0,-1),(0,1),(1,-1),(1,0),(1,1)):\r\n try:\r\n if mines[row+r][col+c] == 1:\r\n total += 1\r\n except KeyError:\r\n pass\r\n return total", "def numNeighbors(minesSet, row_index, cols_index, num_cols, num_rows):\n mines = 0\n for j in np.arange(max(0, cols_index-1), min(num_cols-1, cols_index+1)+1):\n for i in np.arange(max(0, row_index-1), min(num_rows-1, row_index+1)+1):\n if ((i, j) in minesSet):\n mines+=1\n return mines", "def count_neighbour_mines(self, x, y):\n neighbour_mines = 0\n for _x in range(x - 1, x + 2):\n for _y in range(y - 1, y + 2):\n if is_valid(_x, _y):\n if is_mine(self.board, _x, _y):\n neighbour_mines += 1\n return neighbour_mines", "def count_neighbour_mines(self, x, y):\n neighbour_mines = 0\n for _x in range(x - 1, x + 2):\n for _y in range(y - 1, y + 2):\n if is_valid(_x, _y):\n if is_mine(self.board, _x, _y):\n neighbour_mines += 1\n return neighbour_mines", "def nearby_mines(self, cell):\n\n # Keep count of nearby mines\n count = 0\n\n # Loop over all cells within one row and column\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n # Ignore the cell itself\n if (i, j) == cell:\n continue\n\n # Update count if cell in bounds and is mine\n if 0 <= i < self.height and 0 <= j < self.width:\n if self.board[i][j]:\n count += 1\n return count", "def nearby_mines(self, cell):\n\n # Keep count of nearby mines\n count = 0\n\n # Loop over all cells within one row and column\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n # Ignore the cell itself\n if (i, j) == cell:\n continue\n\n # Update count if cell in bounds and is mine\n if 0 <= i < self.height and 0 <= j < self.width:\n if self.board[i][j]:\n count += 1\n\n return count", "def nearby_mines(self, cell):\n\n # Keep count of nearby mines\n count = 0\n\n # Loop over all cells within one row and column\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n # Ignore the cell itself\n if (i, j) == cell:\n continue\n\n # Update count if cell in bounds and is mine\n if 0 <= i < self.height and 0 <= j < self.width:\n if self.board[i][j]:\n count += 1\n\n return count", "def nearby_mines(self, cell):\n\n # Keep count of nearby mines\n count = 0\n\n # Loop over all cells within one row and column\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n # Ignore the cell itself\n if (i, j) == cell:\n continue\n\n # Update count if cell in bounds and is mine\n if 0 <= i < self.height and 0 <= j < self.width:\n if self.board[i][j]:\n count += 1\n\n return count", "def nearby_mines(self, cell):\n\n # Keep count of nearby mines\n count = 0\n\n # Loop over all cells within one row and column\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n # Ignore the cell itself\n if (i, j) == cell:\n continue\n\n # Update count if cell in bounds and is mine\n if 0 <= i < self.height and 0 <= j < self.width:\n if self.board[i][j]:\n count += 1\n\n return count", "def nearby_mines(self, cell):\n\n # Keep count of nearby mines\n count = 0\n\n # Loop over all cells within one row and column\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n # Ignore the cell itself\n if (i, j) == cell:\n continue\n\n # Update count if cell in bounds and is mine\n if 0 <= i < self.height and 0 <= j < self.width:\n if self.board[i][j]:\n count += 1\n\n return count", "def nearby_mines(self, cell):\n\n # Keep count of nearby mines\n count = 0\n\n # Loop over all cells within one row and column\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n # Ignore the cell itself\n if (i, j) == cell:\n continue\n\n # Update count if cell in bounds and is mine\n if 0 <= i < self.height and 0 <= j < self.width:\n if self.board[i][j]:\n count += 1\n\n return count", "def get_num_mines_around_position(self, x, y):\n mines = 0\n for row in range(y-1, y+2):\n for col in range(x-1, x+2):\n if row >= 0 and col >= 0 and row < len(self.mine_map) and col < len(self.mine_map[row]): # Don't check spaces that are outside of the array\n if self.mine_map[row][col]:\n mines += 1\n return mines", "def count_ones(self):\r\n count = 0\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n count = count + 1\r\n return count", "def _count_seen_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n count += 1 if _is_occupied(grid, row, col, dx, dy) else 0\n return count", "def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count", "def num_cells(self):\n if hasattr(self, '__num_cells__'):\n return self.__num_cells__\n if self.x is not None:\n return self.x.size(self.__cat_dim__('x', self.x))\n if self.boundary_index is not None:\n return int(self.boundary_index[1,:].max()) + 1\n assert self.upper_index is None and self.lower_index is None\n return None", "def _count_adj_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n if row - 1 >= 0:\n if col - 1 >= 0:\n count += 1 if grid[row - 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row - 1][col + 1] == '#' else 0\n count += 1 if grid[row - 1][col] == '#' else 0\n if row + 1 < len(grid):\n if col - 1 >= 0:\n count += 1 if grid[row + 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row + 1][col + 1] == '#' else 0\n count += 1 if grid[row + 1][col] == '#' else 0\n if col - 1 >= 0:\n count += 1 if grid[row][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row][col + 1] == '#' else 0\n return count", "def get_number_neighbours_of_cell(self, x_cell, y_cell):\n alive_neighbours = 0\n \n # neighbour indices\n x_indices = [x_cell-1, x_cell, x_cell+1]\n y_indices = [y_cell-1, y_cell, y_cell+1]\n\n\n #TODO: use functional programming ^^^^^^\n #x_indices = list(filter(lambda x: x < 0 and x > self.size[0], x_indices))\n #y_indices = list(filter(lambda y: y < 0 and y > self.size[1], y_indices))\n \n # correct indices for cell neighbours based on wrap_around_borders\n #TODO: this so far only works for x,y same size..\n if self.wrap_around_borders:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n indices.append(self.board_size[0] - 1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n indices.append(0)\n else:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n\n # check each neighbour status and add to counter\n for x in x_indices:\n for y in y_indices:\n alive_neighbours = alive_neighbours + self.board_state[x][y]\n\n # dont count own value\n alive_neighbours = alive_neighbours - self.board_state[x_cell][y_cell]\n\n return alive_neighbours", "def count_neighbors(self, row, col):\n neighbors = 0\n neighbors += self.get_cell_value(row - 1, col - 1)\n neighbors += self.get_cell_value(row - 1, col)\n neighbors += self.get_cell_value(row - 1, col + 1)\n neighbors += self.get_cell_value(row, col - 1)\n neighbors += self.get_cell_value(row, col + 1)\n neighbors += self.get_cell_value(row + 1, col - 1)\n neighbors += self.get_cell_value(row + 1, col)\n neighbors += self.get_cell_value(row + 1, col + 1)\n\n return neighbors", "def _count_living_neighbors(self, cell: Cell) -> int:\n count = 0\n # borders of the area in which we are trying to find neighbors\n # Let's assume y axis directs downside and x axis directs to the left\n \n for x in range(cell.x - 1, cell.x + 2):\n for y in range(cell.y - 1, cell.y + 2):\n if cell.x == x and cell.y == y:\n continue\n if (x, y) in self.living_cells.keys():\n count += 1\n \n return count", "def countNeighbors(row, col, A):\n h = len(A)\n w = len(A[0])\n count = 0\n for x in range(-1, 2, 1):\n for y in range(-1, 2, 1):\n if abs(x) + abs(y) != 0:\n count += A[row+x][col+y]\n return count", "def num_black_neighbors(tile, tiles):\n return sum([tiles[add(tile, step)] for step in NEIGHBORS])", "def count_alive_cells(self, x, y):\n\n # indices of surrounding cells.\n ul = max(y - 1, 0) # upper left\n ur = min(y + 2, self.f_shape[1]) # upper right\n bl = max(x - 1, 0) # bottom left\n br = min(x + 2, self.f_shape[0]) # bottom right\n\n # slice\n cells = self.cells[bl:br, ul:ur]\n n_cells = np.count_nonzero(cells)\n\n return n_cells - self.cells[x][y]", "def get_neighbors_of(cell, board):\n count = 0\n (x, y) = cell\n for cell in board:\n if cell == (x - 1, y - 1):\n count += 1\n elif cell == (x, y - 1):\n count += 1\n elif cell == (x + 1, y - 1):\n count += 1\n elif cell == (x - 1, y):\n count += 1\n elif cell == (x + 1, y):\n count += 1\n elif cell == (x - 1, y + 1):\n count += 1\n elif cell == (x, y + 1):\n count += 1\n elif cell == (x + 1, y + 1):\n count += 1\n return count", "def count_islands(matrix):\n visited = init_visited(matrix)\n num_islands = 0\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n if matrix[i][j] and not visited[i][j]:\n check_neighbours(matrix, (i, j), visited)\n num_islands += 1\n # print(visited)\n return num_islands", "def checkNumNeighbors():", "def num_cells_up(self):\n if hasattr(self, '__num_cells_up__'):\n return self.__num_cells_up__\n elif self.shared_coboundaries is not None:\n assert self.upper_index is not None\n return int(self.shared_coboundaries.max()) + 1\n assert self.upper_index is None\n return 0", "def mineNeighbor(self, cell):\n\n # Keep count of nearby mines\n counter = 0\n\n # Loop over all cells within one row and column\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n # Ignore the cell itself\n if (i, j) == cell:\n continue\n\n # Update count if cell in bounds and is mine\n if 0 <= i < self.height and 0 <= j < self.width:\n if self.board[i][j]:\n counter += 1\n\n return counter", "def mine_neighbor_count(array, game_input, row_length, col_length):\n\tlength = len(game_input)\n\toutput_num = ''\n\tfor x in xrange(length):\n\t\tnum_of_mines = 0\n\t\tposition = x + 1\n\t\trow_num = x / row_length # 0 0 0 1 1 1 2 2 2 3 3 3 4 4 4\n\t\tcol_num = x % row_length # 0 1 2 0 1 2 0 1 2 0 1 2 0 1 2\n\t\tif game_input[x] == \"*\":\n\t\t\toutput_num += \"*\"\n\t\t\tcontinue\n\t\tif col_num > 0:\n\t\t\t# left\n\t\t\tif array[row_num][0][col_num - 1] == \"*\":\tnum_of_mines += 1\n\t\tif col_num < (row_length - 1):\n\t\t\t# right\n\t\t\tif array[row_num][0][col_num + 1] == \"*\":\tnum_of_mines += 1\n\t\tif row_num > 0:\n\t\t\t# deals with top of the array\n\t\t\tif array[row_num - 1][0][col_num] == \"*\":\tnum_of_mines += 1\n\t\t\tif col_num > 0:\n\t\t\t\t#top left\n\t\t\t\tif array[row_num - 1][0][col_num - 1] == \"*\":\tnum_of_mines += 1\n\t\t\tif col_num < row_length - 1:\n\t\t\t\t# top right\n\t\t\t\tif array[row_num - 1][0][col_num + 1] == \"*\":\tnum_of_mines += 1\n\t\tif row_num < col_length - 1:\n\t\t\t# deals with bottom of the array\n\t\t\tif array[row_num + 1][0][col_num] == \"*\":\tnum_of_mines += 1\n\t\t\tif col_num > 0:\n\t\t\t\t# bottom left\n\t\t\t\tif array[row_num + 1][0][col_num - 1] == \"*\":\tnum_of_mines += 1\n\t\t\tif col_num < row_length - 1:\n\t\t\t\t# bottom right\n\t\t\t\tif array[row_num + 1][0][col_num + 1] == \"*\":\tnum_of_mines += 1\n\t\toutput_num += str(num_of_mines)\n\treturn output_num" ]
[ "0.7669013", "0.7432709", "0.7387753", "0.73015106", "0.73015106", "0.71925914", "0.717505", "0.717505", "0.717505", "0.717505", "0.717505", "0.717505", "0.7049067", "0.6970313", "0.6896128", "0.6819619", "0.6798525", "0.6792678", "0.6749823", "0.6746723", "0.67394674", "0.6705148", "0.65675443", "0.6566222", "0.6565232", "0.65650225", "0.6564528", "0.6560242", "0.6553614", "0.6549442" ]
0.75128317
1
Counts the number of flags in the neighboring cells
def count_neighbor_flags(self, i, j): return np.count_nonzero(self.flags[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_neighbor_flags(self, x, y):\n\t\treturn sum(self.marks[n][m] == FLAG for (n, m) in self.get_valid_neighbors(x, y))", "def count_ones(self):\r\n count = 0\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n count = count + 1\r\n return count", "def get_neighbours_count(self, cell: Position) -> int:\n possible_neighbours = self.get_neighbours(cell)\n return sum(self.is_alive(n) for n in possible_neighbours)", "def count_island(row, col, island):\n count = 0\n for i in range(row):\n for j in range(col):\n count = count + floodfill(i, j, row, col, island)\n return count", "def _count_living_neighbors(self, cell: Cell) -> int:\n count = 0\n # borders of the area in which we are trying to find neighbors\n # Let's assume y axis directs downside and x axis directs to the left\n \n for x in range(cell.x - 1, cell.x + 2):\n for y in range(cell.y - 1, cell.y + 2):\n if cell.x == x and cell.y == y:\n continue\n if (x, y) in self.living_cells.keys():\n count += 1\n \n return count", "def neighbor_count(A):\n sum2 = lambda A, B: map2(add, A, B)\n neighbors = ((-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1))\n return reduce(sum2,\n map(lambda d: rotate2(A, d[0], d[1]),\n neighbors))", "def _count_adj_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n if row - 1 >= 0:\n if col - 1 >= 0:\n count += 1 if grid[row - 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row - 1][col + 1] == '#' else 0\n count += 1 if grid[row - 1][col] == '#' else 0\n if row + 1 < len(grid):\n if col - 1 >= 0:\n count += 1 if grid[row + 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row + 1][col + 1] == '#' else 0\n count += 1 if grid[row + 1][col] == '#' else 0\n if col - 1 >= 0:\n count += 1 if grid[row][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row][col + 1] == '#' else 0\n return count", "def flagser_contain(adjacency_matrix):\n N=adjacency_matrix.shape[0]\n row,col=convertCOO(adjacency_matrix,ret_data=False)\n return compute_cell_count(N, np.transpose(np.array( (row,col))))", "def count_num_masked_tiles(subgrid):\n\n\tnum_masked_tiles = 0\n\tfor tile in subgrid:\n\t\tif (tile == MaskedTile.MASKED) or (tile == MaskedTile.FLAG):\n\t\t\tnum_masked_tiles += 1\n\n\treturn num_masked_tiles", "def _count_seen_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n count += 1 if _is_occupied(grid, row, col, dx, dy) else 0\n return count", "def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count", "def cell_detects(self, masked=False):\r\n grd = self.grd\r\n ncells = len(grd.cells['depth'])\r\n detects_i_tr = np.zeros(ncells, np.int32)\r\n if masked:\r\n not_flagged = np.where(self.rec_track.flagged==0)[0]\r\n rec_track = self.rec_track[not_flagged]\r\n else:\r\n rec_track = self.rec_track\r\n ndetects = len(rec_track)\r\n for nd in range(ndetects):\r\n tr = rec_track[nd]\r\n i = tr.i\r\n if i >= 0:\r\n detects_i_tr[i] += 1\r\n \r\n return detects_i_tr", "def main():\n row, col, island = make_matrix()\n print(count_island(row, col, island))", "def count_alive_cells(self, x, y):\n\n # indices of surrounding cells.\n ul = max(y - 1, 0) # upper left\n ur = min(y + 2, self.f_shape[1]) # upper right\n bl = max(x - 1, 0) # bottom left\n br = min(x + 2, self.f_shape[0]) # bottom right\n\n # slice\n cells = self.cells[bl:br, ul:ur]\n n_cells = np.count_nonzero(cells)\n\n return n_cells - self.cells[x][y]", "def numIslands(grid):\n # count to store each new island found\n count = 0\n # If the grid is empty, return 0\n if not grid:\n return count\n\n y_max = len(grid)\n x_max = len(grid[0])\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n return count", "def count_neighbors(self, row, col):\n neighbors = 0\n neighbors += self.get_cell_value(row - 1, col - 1)\n neighbors += self.get_cell_value(row - 1, col)\n neighbors += self.get_cell_value(row - 1, col + 1)\n neighbors += self.get_cell_value(row, col - 1)\n neighbors += self.get_cell_value(row, col + 1)\n neighbors += self.get_cell_value(row + 1, col - 1)\n neighbors += self.get_cell_value(row + 1, col)\n neighbors += self.get_cell_value(row + 1, col + 1)\n\n return neighbors", "def get_neighbors_of(cell, board):\n count = 0\n (x, y) = cell\n for cell in board:\n if cell == (x - 1, y - 1):\n count += 1\n elif cell == (x, y - 1):\n count += 1\n elif cell == (x + 1, y - 1):\n count += 1\n elif cell == (x - 1, y):\n count += 1\n elif cell == (x + 1, y):\n count += 1\n elif cell == (x - 1, y + 1):\n count += 1\n elif cell == (x, y + 1):\n count += 1\n elif cell == (x + 1, y + 1):\n count += 1\n return count", "def num_black_neighbors(tile, tiles):\n return sum([tiles[add(tile, step)] for step in NEIGHBORS])", "def checkNumNeighbors():", "def count(self):\n return sum([self.bits[x][y] for x in range(self.n_rows)\n for y in range(self.n_columns)])", "def get_number_neighbours_of_cell(self, x_cell, y_cell):\n alive_neighbours = 0\n \n # neighbour indices\n x_indices = [x_cell-1, x_cell, x_cell+1]\n y_indices = [y_cell-1, y_cell, y_cell+1]\n\n\n #TODO: use functional programming ^^^^^^\n #x_indices = list(filter(lambda x: x < 0 and x > self.size[0], x_indices))\n #y_indices = list(filter(lambda y: y < 0 and y > self.size[1], y_indices))\n \n # correct indices for cell neighbours based on wrap_around_borders\n #TODO: this so far only works for x,y same size..\n if self.wrap_around_borders:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n indices.append(self.board_size[0] - 1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n indices.append(0)\n else:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n\n # check each neighbour status and add to counter\n for x in x_indices:\n for y in y_indices:\n alive_neighbours = alive_neighbours + self.board_state[x][y]\n\n # dont count own value\n alive_neighbours = alive_neighbours - self.board_state[x_cell][y_cell]\n\n return alive_neighbours", "def count_islands(grid):\n grid_copy = list(grid)\n count = 0\n for i in range(0, len(grid_copy)):\n for j in range (0, len(grid_copy[0])):\n if grid[i][j] and grid_copy[i][j]:\n _dfs(grid_copy, i, j)\n count += 1\n return count", "def countNeighbors(row, col, A):\n h = len(A)\n w = len(A[0])\n count = 0\n for x in range(-1, 2, 1):\n for y in range(-1, 2, 1):\n if abs(x) + abs(y) != 0:\n count += A[row+x][col+y]\n return count", "def island_perimeter(grid):\n count = 0\n for row in grid:\n size = len(row)\n row.insert(0, 0)\n row.append(0)\n grid.insert(0, [0 for x in range(size + 2)])\n grid.append([0 for x in range(size + 2)])\n\n for e, row in enumerate(grid):\n for i, num in enumerate(row):\n if num == 1:\n if grid[e][i - 1] != 1:\n count += 1\n if grid[e][i + 1] != 1:\n count += 1\n if grid[e - 1][i] != 1:\n count += 1\n if grid[e + 1][i] != 1:\n count += 1\n return count", "def island_perimeter(grid):\n count = 0\n for j, r in enumerate(grid):\n for i, c in enumerate(r):\n if c == 1:\n if j == 0 or grid[j - 1][i] == 0:\n count += 1\n if i == 0 or grid[j][i - 1] == 0:\n count += 1\n if j == len(grid) - 1 or grid[j + 1][i] == 0:\n count += 1\n if i == len(r) - 1 or grid[j][i + 1] == 0:\n count += 1\n return count", "def count_islands(matrix):\n visited = init_visited(matrix)\n num_islands = 0\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n if matrix[i][j] and not visited[i][j]:\n check_neighbours(matrix, (i, j), visited)\n num_islands += 1\n # print(visited)\n return num_islands", "def count_alive_neighbors(self, status):\n kernel = np.array(\n [[1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\n\n count = convolve2d(status, kernel, mode='same', boundary=\"wrap\")\n return count", "def count_mask(mask):\n count = int(mask.sum())\n if count == 0:\n return count, None, None, None, None\n\n # argmax for mask finds the first True value\n x_min = (mask.argmax(axis=0) != 0).argmax()\n x_max = mask.shape[1] - np.flip((mask.argmax(axis=0) != 0), axis=0).argmax() - 1\n w = (mask.shape[1] - np.flip((mask.argmax(axis=0) != 0), axis=0).argmax()\n - (mask.argmax(axis=0) != 0).argmax())\n h = (mask.shape[0] - np.flip((mask.argmax(axis=1) != 0), axis=0).argmax()\n - (mask.argmax(axis=1) != 0).argmax())\n return count, w, h, x_min, x_max", "def numIslands3(self, grid: List[List[str]]) -> int:\n m = len(grid)\n if m > 0:\n n = len(grid[0])\n else:\n return 0\n\n def dfs(grid, i, j):\n if grid[i][j] != '0':\n grid[i][j] = '0'\n\n for direction in self.directions(grid, i, j):\n dfs(grid, direction[0], direction[1])\n\n island = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1':\n island += 1 # count the number of CCs\n dfs(grid, i, j)\n return island", "def nnz(self):\n t = self.get_MSC()\n return len(np.unique(t['masks']))" ]
[ "0.7226011", "0.70463705", "0.7027355", "0.6893158", "0.68669856", "0.68662775", "0.68303967", "0.6791698", "0.6785953", "0.6753818", "0.6682841", "0.66777", "0.6659068", "0.6614095", "0.66125906", "0.66072136", "0.6595425", "0.6584443", "0.65721947", "0.65716237", "0.65361273", "0.65274376", "0.6526229", "0.6511221", "0.6490599", "0.64715976", "0.64673245", "0.64657336", "0.64554983", "0.6443545" ]
0.77383655
0
Updates revealed cells by checking i, j cell and, recursevely, the contiguous cells without mines
def update_revealed(self, i, j): if not self.revealed[i, j]: # If not revealed cell if self.mines_count[i, j] < 0: # If wrong guess, games is over self.wrong = ~self.mines & self.flags self.wrong[i, j] = True self.game_over() else: # If guess is correct self.revealed[i, j] = True if self.mines_count[i, j] == 0: # Recursively looks for contiguous cells without mines for _i, _j in self.get_ij_neighbors(i, j): if self.mines_count[_i, _j] >= 0 and not self.revealed[_i, _j]: self.flags[_i, _j] = False self.update_revealed(_i, _j) elif self.mines_count[i, j] > 0: # The line below only makes sense when it's in the middle of the # recursion. For instance, a cell is flagged, but it is part of a # big blob that's going to be revealed. The game doesn't punish # the player in this scenario. This behavior has been copied # from gnome-mines self.flags[i, j] = False # Reveals mine count self.mines_count_txt[i, j].set_visible(True) elif self.mines_count[i, j] == self.count_neighbor_flags(i, j): # If cell that's already revealed is clicked and the number of # neighboring flags is the same as the number of neighboring # mines, then the hidden neighbor cells are recursevely # revealed. Evidently, if any flag guess is wrong, the game is # over. for _i, _j in self.get_ij_neighbors(i, j): if not self.flags[_i, _j] and not self.revealed[_i, _j]: self.update_revealed(_i, _j)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n alive_neighbours = self._get_neighbours(row_number,col_number)\n \n self.to_be_updated[row_number][col_number] = False\n if self.cells[row_number][col_number].get_status():\n if alive_neighbours < 2:\n self.to_be_updated[row_number][col_number] = True\n elif alive_neighbours > 3:\n self.to_be_updated[row_number][col_number] = True\n else:\n if alive_neighbours == 3:\n self.to_be_updated[row_number][col_number] = True", "def _update_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n if self.to_be_updated[row_number][col_number]:\n self.cells[row_number][col_number].update()", "def mark_mines(self, cells):\r\n for cell in cells:\r\n row, col = cell\r\n self.mine_field[row][col] = 'x'\r\n self.mines_left -= 1\r\n return", "def computeNeighbors(self, row, col):\n self.mines = np.zeros((self.rows, self.cols))\n for row in range(0, self.rows):\n for col in range(0, self.cols):\n if self.isMine(row, col):\n self.board[row, col] = 0\n self.addMine(row, col)", "def updateCells(cell_positions):\n # Build a set of canditates for live cells at the next generation, instead of looking through the whole grid\n # These will be dead neighbours of living cells\n possible_future_cells = set()\n # Make sets of cells to add and remove at the end of the check\n cells_remove = set()\n cells_add = set()\n for cell in cell_positions:\n # Get adjacent squares\n neighbours_dict = cellNeighbours(cell)\n number_live_neighbours = 0\n # Check which of these corresponds to another living cell\n for square in neighbours_dict.values():\n if square in cell_positions:\n number_live_neighbours+=1\n else:\n possible_future_cells.add(square)\n\n # Any live cell with fewer than two live neighbours dies, as if caused by under-population\n if number_live_neighbours<2:\n cells_remove.add(cell)\n # Any live cell with two or three live neighbours lives on to the next generation\n # do nothing\n # Any live cell with more than three live neighbours dies, as if by overcrowding\n elif number_live_neighbours>3:\n cells_remove.add(cell)\n # Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction\n for cell_candidate in possible_future_cells:\n cell_candidate_neighbours = cellNeighbours(cell_candidate).values()\n # Count number of live neighbours\n count = 0\n for square in cell_candidate_neighbours:\n if square in cell_positions:\n count+=1\n if count == 3:\n cells_add.add(cell_candidate)\n # Update cell_positions by removing dead cells and adding new-born cells\n for cell in cells_add:\n cell_positions.add(cell)\n for cell in cells_remove:\n cell_positions.remove(cell)\n # Return the update live cell list\n return cell_positions", "def find_affected_cells(i, j):\n GRID_SIZE = 9\n MINI_GRID_SIZE = 3\n cells = set()\n \n # Cells in same row\n for row_idx in range(GRID_SIZE):\n cells.add((row_idx, j))\n\n # Cells in same col\n for col_idx in range(GRID_SIZE):\n cells.add((i, col_idx))\n\n # Cells in local square\n top_left_row = MINI_GRID_SIZE * (i // MINI_GRID_SIZE)\n top_left_col = MINI_GRID_SIZE * (j // MINI_GRID_SIZE)\n\n for row_idx in range(top_left_row, top_left_row + 3):\n for col_idx in range(top_left_col, top_left_col + 3):\n cells.add((row_idx, col_idx))\n\n # Remove reference cell itself\n cells.remove((i, j))\n\n return cells", "def solve(self):\r\n while not self.done():\r\n self.no_open_cells()\r\n self.all_cells_are_mines()\r\n self.no_mines()\r\n if not self.done():\r\n self.obvious_cells()\r\n if not self.done():\r\n made_progress = self.safe_neighbour_difference()\r\n if made_progress:\r\n continue\r\n if not self.done():\r\n made_progress = self.adjacent_combinations()\r\n if made_progress:\r\n continue\r\n return", "def update2(self):\r\n tmp = [row.copy() for row in self.grid]\r\n changed = False\r\n for y in range(self.height):\r\n for x in range(self.width):\r\n count = sum(self.see_occupant(x, y, i, j) for i in [-1, 0, 1] for j in [-1, 0, 1])\r\n if self.grid[y][x] == '#' and count >= 5:\r\n tmp[y][x] = 'L'\r\n changed = True\r\n elif self.grid[y][x] == 'L' and count == 0:\r\n tmp[y][x] = '#'\r\n changed = True\r\n else:\r\n tmp[y][x] = self.grid[y][x]\r\n self.grid = tmp\r\n return changed", "def occupied_cells(self):\n\n for lm in self.landmarks:\n if self.cell_size < 1:\n # expand the range the landmark exists\n lm_x_range = np.arange(lm[0]-self.R, lm[0]+self.R, self.cell_size)\n lm_y_range = np.arange(lm[1]-self.R, lm[1]+self.R, self.cell_size)\n\n # loop through expanded ranges and compute grid positions\n for lm_x in lm_x_range:\n for lm_y in lm_y_range:\n\n row, col = self.cell_index([lm_x, lm_y])\n\n # apply cost of occupied cell\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass\n\n else:\n # apply cost of occupied cell\n row, col = self.cell_index(lm)\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass", "def Check(self):\n cleared = False\n while not cleared:\n for i in list(combinations([cell.Check() for cell in self.cells], 2)):\n # for i in list(combinations(zip(self.locations.x,self.locations.y,self.locations.length,self.locations.index),2)):\n x1 = i[0][0]\n y1 = i[0][1]\n r1 = i[0][2] / 2\n idx1 = i[0][3]\n x2 = i[1][0]\n y2 = i[1][1]\n r2 = i[1][2] / 2\n idx1 = i[0][3]\n idx2 = i[1][3]\n distance = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)\n radii = (r1 + r2) * (r1 + r2)\n if distance == radii:\n cleared = True\n elif distance > radii:\n cleared = True\n else:\n if x1 > x2 and y1 > y2:\n if (\n x1 + r1 > 0\n and x1 + r1 < self.boundaries[0]\n and y1 + r1 > 0\n and y1 + r1 < self.boundaries[1]\n ):\n self.cells[idx1].x = x1 + r1 / 2\n self.cells[idx1].y = y1 + r1 / 2\n elif x1 > x2 and y1 < y2:\n if (\n x1 + r1 > 0\n and x1 + r1 < self.boundaries[0]\n and y1 - r1 > 0\n and y1 - r1 < self.boundaries[1]\n ):\n self.cells[idx1].x = x1 + r1 / 2\n self.cells[idx1].y = y1 - r1 / 2\n elif x1 < x2 and y1 > y2:\n if (\n x1 - r1 > 0\n and x1 - r1 < self.boundaries[0]\n and y1 + r1 > 0\n and y1 + r1 < self.boundaries[1]\n ):\n self.cells[idx1].x = x1 - r1 / 2\n self.cells[idx1].y = y1 + r1 / 2\n else:\n if (\n x1 - r1 > 0\n and x1 - r1 < self.boundaries[0]\n and y1 - r1 > 0\n and y1 - r1 < self.boundaries[1]\n ):\n self.cells[idx1].x = x1 - r1 / 2\n self.cells[idx1].y = y1 - r1 / 2\n _logger.debug(\n f\"Bumped from {x1 :.2e}, {y1 :.2e} to {self.cells[idx1].x :.2e}, {self.cells[idx1].y :.2e}\"\n )\n cleared = False\n return", "def update_cell_membranes(cells):\r\n membrane_polys = [Polygon(cell['membrane']).buffer(0) for cell in cells]\r\n membrane_bounds = [p.bounds for p in membrane_polys]\r\n\r\n # Get normal vectors for membrane vertices\r\n vert_normals = [geometry.get_vert_normals(\r\n geometry.get_edge_normals(cell['membrane'])) for cell in cells]\r\n\r\n all_membranes = np.concatenate([cell['membrane'] for cell in cells], axis=0)\r\n # [(cell idx, vert idx), ...]\r\n all_membrane_map = np.concatenate([\r\n np.stack([\r\n np.repeat([i], cell['membrane'].shape[0]),\r\n np.arange(cell['membrane'].shape[0],)\r\n ], axis=1)\r\n for i, cell in enumerate(cells)\r\n ], axis=0).astype(np.int32)\r\n\r\n # Find inter-cell membrane vertices that are close enough for adhesion/diffusion\r\n nearby_membranes, nearby_membrane_map = find_nearby_membranes(\r\n all_membranes, all_membrane_map, vert_normals)\r\n\r\n # Change membrane rest length according with the cell volume\r\n membrane_rdists = []\r\n for i, cell in enumerate(cells):\r\n # Get all the pairwise distances between membrane vertices\r\n membrane_dists = scipy.spatial.distance.squareform(\r\n scipy.spatial.distance.pdist(cell['membrane']))\r\n membrane_rdists_i = 1.0 / (membrane_dists + 1e-6)\r\n membrane_rdists_i[np.where(membrane_dists == 0)] = 0\r\n membrane_rdists.append(membrane_rdists_i)\r\n\r\n return membrane_bounds, membrane_polys, vert_normals, \\\r\n all_membranes, all_membrane_map, \\\r\n nearby_membranes, nearby_membrane_map, \\\r\n membrane_rdists", "def update1(self):\r\n tmp = [row.copy() for row in self.grid]\r\n changed = False\r\n for y in range(self.height):\r\n for x in range(self.width):\r\n if self.grid[y][x] == '#' and 5 <= sum(\r\n self.is_occupied((x + i, y + j)) for i in [-1, 0, 1] for j in [-1, 0, 1]):\r\n # >= 5, because we also count (x,y)\r\n tmp[y][x] = 'L'\r\n changed = True\r\n elif self.grid[y][x] == 'L' and self.is_available(x, y):\r\n tmp[y][x] = '#'\r\n changed = True\r\n else:\r\n tmp[y][x] = self.grid[y][x]\r\n self.grid = tmp\r\n return changed", "def reduce_possibilities_by_row(self):\n x = self.targetCell.x\n for i in range(1,10): #content\n for n in range(9): #y-coord adjacent cells\n neighbour_cell = self.puzzleGrid.grid[x][n]\n if self.targetCell != neighbour_cell:\n self.targetCell.row_neighbour_possibilities.append( neighbour_cell.possibilities)\n if str(i) == neighbour_cell.finalNumber:\n self.RemovePossiblityFromTargetCell(i)\n self.targetCell.row_neighbour_possibilities = flatten_list(self.targetCell.row_neighbour_possibilities)", "def mineNeighbor(self, cell):\n\n # Keep count of nearby mines\n counter = 0\n\n # Loop over all cells within one row and column\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n # Ignore the cell itself\n if (i, j) == cell:\n continue\n\n # Update count if cell in bounds and is mine\n if 0 <= i < self.height and 0 <= j < self.width:\n if self.board[i][j]:\n counter += 1\n\n return counter", "def process_cell_cell_collision(i, membrane, vert_normals, cell, next_cell, cells, membrane_bounds, membrane_polys, next_momentum, sim_speed):\r\n for j, other in enumerate(cells):\r\n if i == j:\r\n continue\r\n\r\n b1 = membrane_bounds[i]\r\n b2 = membrane_bounds[j]\r\n # AABB test\r\n if b1[2] < b2[0] or b2[2] < b1[0] or b1[3] < b2[1] or b2[3] < b1[1]:\r\n intersection = None\r\n else:\r\n # Pressure from overlapping cells\r\n intersects = matplotlib.path.Path(\r\n other['membrane']).contains_points(membrane)\r\n next_momentum -= vert_normals[i] * 0.5 * np.expand_dims(np.minimum(\r\n (0.0 + 0.1) * intersects, 0.2 * next_cell['volume']), -1) * sim_speed\r\n\r\n # if b1[2] < b2[0] or b2[2] < b1[0] or b1[3] < b2[1] or b2[3] < b1[1]:\r\n # intersection = None\r\n # else:\r\n # intersection = membrane_polys[i].intersection(\r\n # membrane_polys[j])\r\n\r\n # # Pressure from overlapping cells\r\n # if intersection is not None and not intersection.is_empty:\r\n # intersects = matplotlib.path.Path(\r\n # other['membrane']).contains_points(membrane)\r\n # next_momentum -= vert_normals[i] * 0.5 * np.expand_dims(np.minimum(\r\n # (intersection.area + 0.1) * intersects, 0.2 * next_cell['volume']), -1) * sim_speed\r", "def find_next_moves(self):\n # iterate through all cells, and group them with upper cells and left\n # cells\n\n # generate separated cells then merge the them with same neighbours\n matrix_rows = len(self.status)\n if matrix_rows == 0:\n matrix_cols = 0\n else:\n matrix_cols = len(self.status[0])\n matrix = []\n for i in range(matrix_rows):\n matrix.append([[(i, j)] for j in range(matrix_cols)])\n # merge coordinations\n for i in range(matrix_rows):\n for j in range(matrix_cols):\n if self.status[i][j] != '':\n # is same with right cell?\n if j < matrix_cols - 1 and self.status[i][j] == self.status[i][j + 1]:\n new_item = matrix[i][j] + matrix[i][j + 1]\n matrix[i][j] = matrix[i][j + 1] = new_item\n # is same with down cell?\n if i < matrix_rows - 1 and self.status[i][j] == self.status[i + 1][j]:\n new_item = matrix[i][j] + matrix[i + 1][j]\n matrix[i][j] = matrix[i + 1][j] = new_item\n\n # filter out all unvalid results\n result = []\n # filter out all single-cell groups\n for i in range(matrix_rows):\n for j in range(matrix_cols):\n if (len(matrix[i][j]) > 1 and\n matrix[i][j] not in result):\n result.append(matrix[i][j])\n\n # filter sublists\n result = sorted(result, key=len, reverse=True)\n changed = True\n while changed:\n changed = False\n for i in range(len(result)):\n for j in range(i + 1, len(result)):\n if set(result[i]).issuperset(set(result[j])):\n result.remove(result[j])\n changed = True\n break\n if changed:\n break\n\n if result:\n for i in result:\n yield (self.convert_coordinations(i),\n len(i) * len(i) * 5,\n self.calc_new_status(i))\n else:\n left_cells = sum([len(i) - i.count('') for i in self.status])\n left_cells_score = 2000 - 20 * left_cells * left_cells\n if left_cells_score < 0:\n left_cells_score = 0\n for i in self.parents:\n i.children[self] = [(i.children[self][0][0] + left_cells_score,\n i.children[self][0][1],\n i.children[self][0][2])]", "def minesweeper(matrix):\n \n num_rows = len(matrix)\n num_cols = len(matrix[0])\n \n adj_mines = []\n \n adj_row = [0]*num_cols\n \n for i in range(num_rows):\n adj_mines.append(adj_row[:])\n \n for r in range(num_rows):\n for c in range(num_cols):\n if matrix[r][c] == True:\n if (r-1) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r-1][c-1] += 1\n if (r-1) in range(num_rows) and (c) in range(num_cols):\n adj_mines[r-1][c] += 1\n if (r-1) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r-1][c+1] += 1\n if (r) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r][c-1] += 1\n if (r) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r][c+1] += 1\n if (r+1) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r+1][c-1] += 1\n if (r+1) in range(num_rows) and (c) in range(num_cols):\n adj_mines[r+1][c] += 1\n if (r+1) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r+1][c+1] += 1\n\n \n return adj_mines", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def original(arr):\n height = np.shape(arr)[0]\n width = np.shape(arr)[1]\n result = np.array(arr)\n\n for row in range(height):\n for col in range(width):\n neighbors = 0\n val = result[row][col]\n for i in range(-1, 2):\n for j in range(-1, 2):\n if i == 0 and j == 0: # The cell itself cannot be counted as a neighbor\n continue\n if row + i < 0 or col + j < 0 or row + i > height or col + j > width: # Out of bounds\n continue\n with suppress(IndexError):\n if arr[row + i][col + j] == 1:\n neighbors += 1\n\n if neighbors == 3 and val == 0: # Cell becomes alive\n result[row][col] = 1\n\n elif neighbors > 3 and val == 1 or neighbors < 2 and val == 1: # Cell dies\n result[row][col] = 0\n\n return result", "def mark_mine(self, cell):\n if cell in self.cells:\n self.mines.add(cell)\n self.cells.remove(cell)\n self.count -= 1", "def mark_mine(self, cell):\n \n if cell in self.cells:\n self.cells.discard(cell)\n self.count -= 1", "def cell(x, y):\n try:\n if cells[y][x]['filled'] == 1:\n return # this has already been processed\n except IndexError:\n return\n cells[y][x]['filled'] = 1 # this cell is now filled\n\n nn = []\n for nx, ny in neighbours(x, y):\n try:\n if cells[ny][nx]['filled']:\n nn.append(cells[ny][nx])\n except IndexError:\n continue\n \n c = 0 # colour weighting\n \n #------ Flippedness\n flipped = sum([i['inverted'] for i in nn if i['inverted']])\n cells[y][x]['inverted'] = (randint(0, 3) + flipped) % 4\n \n #------- Colour calculation\n avg_colour = sum([i['colour'][0] for i in nn]) / len(nn)\n avg_sat = sum([i['colour'][1] for i in nn]) / len(nn)\n avg_bri = sum([i['colour'][2] for i in nn]) / len(nn)\n \n # small chance of going totally random otherwise small variation from neighbours\n if random(100) > 90:\n h = randint(0, 100)\n s = randint(0, 100)\n b = randint(0, 100)\n else:\n h = (avg_colour + randint(-15, 15)) % 100\n s = (avg_sat + randint(-15, 15)) % 100\n b = (avg_bri + randint(-15, 15)) % 100\n cells[y][x]['colour'] = (h, s, b)\n \n #------- Alpha calculation\n d = sqrt((x*cell_size - rx)**2 + (y*cell_size - ry)**2) # distance from epicenter\n mx = sqrt((w-rx*cell_size)**2 + (h-ry*cell_size)**2)\n a = d/sqrt(w**2+h**2)*255\n cells[y][x]['alpha'] = a\n \n for cx,cy in neighbours(x, y):\n cell(cx, cy)", "def update_poi (POIn, POInm1, new, current_cell_mask):\n row, col = cuda.grid(2)\n\n if row < POIn.shape[0] and col < POIn.shape[1]:\n POIn[row,col] = 0 \n if current_cell_mask[row,col] == True:\n POIn[row,col] = POInm1[row,col] + new[row,col]", "def update_filled(self, filled_edges, filled_surrounded):\n surrounded_cells = []\n for cell in filled_edges:\n coord_x = cell[1]\n coord_y = cell[0]\n color = self.get_color(cell)\n surrounded = True\n\n # up\n if coord_y - 1 >= 0:\n surrounded &= self.check_if_filled((coord_y-1, coord_x), color, filled_edges, filled_surrounded)\n\n # down\n if coord_y + 1 < self.height:\n surrounded &= self.check_if_filled((coord_y+1, coord_x), color, filled_edges, filled_surrounded)\n\n # left\n if coord_x - 1 >= 0:\n surrounded &= self.check_if_filled((coord_y, coord_x-1), color, filled_edges, filled_surrounded)\n\n # right\n if coord_x + 1 < self.width:\n surrounded &= self.check_if_filled((coord_y, coord_x+1), color, filled_edges, filled_surrounded)\n\n if surrounded:\n surrounded_cells.append(cell)\n\n for cell in surrounded_cells:\n filled_surrounded.append(cell)\n filled_edges.remove(cell)", "def simplify_puzzle(board, done_cells):\n # Initialization\n not_done = True\n # Main loop for propagation\n while not_done:\n old_length = get_length(board)\n for i in range(n):\n for j in range(n):\n # If the value is the only possibility, propagate its effects\n # Append the coordinates to a list to keep track of what has already been done_cells\n if len(board[i][j]) == 1:# and (i,j) not in done_cells:\n done_cells.append((i,j))\n eliminate(board, i,j)\n # If the value is the only possibility within a row/column/square\n # fix that value and propagate its effects\n elif len(board[i][j]) > 1:\n check_single_value(board, done_cells, i, j)\n # Check if nothing changes or if the puzzle is solved\n new_length = get_length(board)\n if new_length == old_length:\n not_done = False\n return board", "def update_pop_matrix(self):\n for row in self.unique_rows[1:-1]: # First and last cell is water\n for col in self.unique_cols[1:-1]: # First and last cell is water\n cell = self.landscape[(row, col)]\n if cell.is_mainland:\n # print(cell)\n self.herb_pop_matrix[row - 1][col - 1] = cell.herb_count\n self.carn_pop_matrix[row - 1][col - 1] = cell.carn_count", "def update_cell_nodes(self):\n self.cells['nodes'] = -1\n\n for c in range(self.Ncells()):\n # consider two edges at a time, and find the common node\n for i,(ja,jb) in enumerate(circular_pairs(self.cell_to_edges(c))):\n for n in self.edges['nodes'][ja,:]: \n if n in self.edges['nodes'][jb]:\n self.cells['nodes'][c,i] = n\n break", "def set_adjacent_mine_count(self):\n for position in self.grid_coords:\n x, y = position\n if self.grid[y][x] >= 0:\n grid_value = sum(map(self.is_mine, get_adjacent.get_adjacent(position)))\n self.grid[y][x] = grid_value", "def changeCell(self, i, j):\n\t\t#If Cell is on Top row\n\t\tif(i==0):\n\t\t\tif(j==0):\n\t\t\t\tn = self.board[0][1] + self.board[1][0] + self.board[1][1]\n\t\t\telif(j==(self.size-1)):\n\t\t\t\tn = self.board[0][self.size-2] + self.board[1][self.size-2] + self.board[1][self.size-1]\n\t\t\telse:\n\t\t\t\tn = self.board[0][j-1] + self.board[1][j] + self.board[0][j+1] + self.board[1][j-1] + self.board[1][j+1]\n\t\t\t\n\t\t\tif((n == 2 and self.board[i][j] == 1) or n == 3):\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0\n\t\t#If Cell on Bottom row\n\t\telif(i==(self.size-1)):\n\t\t\tif(j==0):\n\t\t\t\tn = self.board[self.size-1][1] + self.board[self.size-2][0] + self.board[self.size-2][1]\n\t\t\telif(j==(self.size-1)):\n\t\t\t\tn = self.board[self.size-1][self.size-2] + self.board[self.size-2][self.size-2] + self.board[self.size-2][self.size-1]\n\t\t\telse:\n\t\t\t\tn = self.board[self.size-1][j-1] + self.board[self.size-2][j] + self.board[self.size-1][j+1] + self.board[self.size-2][j-1] + self.board[self.size-2][j+1]\n\t\t\tif((n == 2 and self.board[i][j] == 1) or n == 3):\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0\n\t\t#If Cell is in a middle row\n\t\telse:\n\t\t\tif(j==0):\n\t\t\t\tn = self.board[i-1][j] + self.board[i+1][j] + self.board[i][j+1] + self.board[i-1][j+1] + self.board[i+1][j+1]\n\t\t\telif(j==(self.size-1)):\n\t\t\t\tn = self.board[i-1][j] + self.board[i+1][j] + self.board[i][j-1] + self.board[i-1][j-1] + self.board[i+1][j-1]\n\t\t\telse:\n\t\t\t\tn = self.board[i-1][j] + self.board[i+1][j] + self.board[i][j-1] + self.board[i-1][j-1] + self.board[i+1][j-1] + self.board[i][j+1] + self.board[i-1][j+1] + self.board[i+1][j+1]\n\t\t\tif((n == 2 and self.board[i][j] == 1) or n == 3):\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0", "def candidate_map(self):\n candidates = [[set(range(1, 10)) for _dummy in range(9)] for _dummy in range(9)]\n vertex_value_unknown = [[True for _dummy in range(9)] for _dummy in range(9)]\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] in range(1, 10):\n candidates[line][row] = set([self.grid[line][row]])\n vertex_value_unknown[line][row] = False\n for i in range(9):\n if i != row:\n candidates[line][i].discard(self.grid[line][row])\n if i != line:\n candidates[i][row].discard(self.grid[line][row])\n if line - line%3 + i//3 != line or row - row%3 + i%3 != row:\n candidates[line - line%3 + i//3][row - row%3 + i%3].discard(self.grid[line][row])\n # Further reduce candidate map\n reduce_cadidate_map_further = True\n while reduce_cadidate_map_further:\n reduce_cadidate_map_further = False\n total_number_of_candidates = sum([len(candidates[ln][rw]) for ln in range(9) for rw in range(9)])\n for number in range(1, 10):\n for i in range(9):\n # Check for single possible vertex for *number* in candidate map line *i*\n seen_in_j = []\n for j in range(9):\n if number in candidates[i][j]:\n seen_in_j.append(j)\n if len(seen_in_j) == 1 and vertex_value_unknown[i][seen_in_j[0]]:\n candidates[i][seen_in_j[0]] = set([number])\n vertex_value_unknown[i][seen_in_j[0]] = False\n # Discard other candidates for *number* in corresponding row and subsquare\n for j in range(9):\n if j != i:\n candidates[j][seen_in_j[0]].discard(number)\n if i - i%3 + j//3 != i:\n candidates[i - i%3 + j//3][seen_in_j[0] - seen_in_j[0]%3 + j%3].discard(number)\n # otherwise add check wheter all candidates for *number* are in the same subsquare\n elif 1 < len(seen_in_j) < 4:\n subsquares = set()\n for j in seen_in_j:\n subsquares.add(3*(i//3) + j//3)\n if len(subsquares) == 1:\n subsquare = subsquares.pop()\n for j in range(9):\n if 3*(subsquare//3) + j//3 != i:\n candidates[3*(subsquare//3) + j//3][3*(subsquare%3) + j%3].discard(number)\n # Check for single possible vertex for *number* in candidate map row *i*\n seen_in_j = []\n for j in range(9):\n if number in candidates[j][i]:\n seen_in_j.append(j)\n if len(seen_in_j) == 1 and vertex_value_unknown[seen_in_j[0]][i]:\n candidates[seen_in_j[0]][i] = set([number])\n vertex_value_unknown[seen_in_j[0]][i] = False\n # Discard other candidates for *number* in corresponding line and subsquare\n for j in range(9):\n if j != i:\n candidates[seen_in_j[0]][j].discard(number)\n if i - i%3 + j%3 != i:\n candidates[seen_in_j[0] - seen_in_j[0]%3 + j//3][i - i%3 + j%3].discard(number)\n # otherwise add check wheter all candidates for *number* are in the same subsquare\n elif 1 < len(seen_in_j) < 4:\n subsquares = set()\n for j in seen_in_j:\n subsquares.add(3*(j//3) + i//3)\n if len(subsquares) == 1:\n subsquare = subsquares.pop()\n for j in range(9):\n if 3*(subsquare%3) + j%3 != i:\n candidates[3*(subsquare//3) + j//3][3*(subsquare%3) + j%3].discard(number)\n # Check for single possible vertex for *number* in candidate map subsquare *i*\n seen_in_j = []\n for j in range(9):\n if number in candidates[3*(i//3) + j//3][3*(i%3) + j%3]:\n seen_in_j.append(j)\n if len(seen_in_j) == 1 and vertex_value_unknown[3*(i//3) + seen_in_j[0]//3][3*(i%3) + seen_in_j[0]%3]:\n candidates[3*(i//3) + seen_in_j[0]//3][3*(i%3) + seen_in_j[0]%3] = set([number])\n vertex_value_unknown[3*(i//3) + seen_in_j[0]//3][3*(i%3) + seen_in_j[0]%3] = False\n # Discard other candidates for *number* in corresponding line and row\n for j in range(9):\n if j not in [3*(i%3), 3*(i%3) + 1, 3*(i%3) + 2]:\n candidates[3*(i//3) + seen_in_j[0]//3][j].discard(number)\n if j not in [3*(i//3), 3*(i//3) + 1, 3*(i//3) + 2]:\n candidates[j][3*(i%3) + seen_in_j[0]%3].discard(number)\n # otherwise add check wheter all candidates for *number* are in the same line/row\n elif 1 < len(seen_in_j) < 4:\n lines = set()\n rows = set()\n for j in seen_in_j:\n lines.add(3*(i//3) + j//3)\n rows.add(3*(i%3) + j%3)\n if len(lines) == 1:\n line = lines.pop()\n for row in [rw for rw in range(9) if rw not in [3*(i%3), 3*(i%3) + 1, 3*(i%3) + 2]]:\n candidates[line][row].discard(number)\n elif len(rows) == 1:\n row = rows.pop()\n for line in [ln for ln in range(9) if ln not in [3*(i//3), 3*(i//3) + 1, 3*(i//3) + 2]]:\n candidates[line][row].discard(number)\n if sum([len(candidates[ln][rw]) for ln in range(9) for rw in range(9)]) < total_number_of_candidates:\n reduce_cadidate_map_further = True\n return candidates" ]
[ "0.6543957", "0.6512995", "0.64951646", "0.6436034", "0.64001656", "0.6396528", "0.6367996", "0.6297945", "0.6249026", "0.6216629", "0.62085146", "0.616299", "0.6137553", "0.6112846", "0.60776544", "0.60543495", "0.6053125", "0.6035872", "0.6023709", "0.6005335", "0.59641814", "0.5959055", "0.5952778", "0.59508395", "0.59316987", "0.59194607", "0.59154624", "0.5900925", "0.5879951", "0.58787954" ]
0.7017881
0
model_is_cuda = next(model.parameters()).is_cuda return model.module._labels if model_is_cuda else model._labels
def get_labels(model): return model._labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_cuda(model):\n\treturn next(model.parameters()).is_cuda", "def get_all_labels(self):\n\t\tself.batch_y=Variable(torch.from_numpy(self.config.batch_y)).cuda()\n\t\treturn self.batch_y", "def get_label(image, model):\n x = Variable(image, volatile=True)\n label = model(x).data.max(1)[1].numpy()[0]\n # We have string labels for ImageNet\n if isinstance(model, torchvision.models.inception.Inception3):\n label_string = labels.get(label)\n return label_string\n return label", "def get_labels():\n return if_found(dao.get_labels())", "def labels_available(self):\n return self.training_labels.dtype.names", "def get_labels(info):\n return info.features[\"labels\"].names", "def get_labels(info):\n return info.features[\"labels\"].names", "def model_device(model):\n # Source: https://discuss.pytorch.org/t/how-to-check-if-model-is-on-cuda/180\n try:\n return str(next(model.parameters()).device)\n except StopIteration:\n # Model has no parameters\n pass\n return 'cpu'", "def get_labels(self):\r\n return None", "def gen_labels(loader, model):\r\n y_true, y_pred = [], []\r\n for X, y in loader:\r\n with torch.no_grad():\r\n output = model(X)\r\n predicted = predictions(output.data)\r\n y_true = np.append(y_true, y.numpy())\r\n y_pred = np.append(y_pred, predicted.numpy())\r\n return y_true, y_pred", "def get_labels(rf_pipeline):\n return rf_pipeline.stages[0].labels", "def get_class_labels(self):\r\n \r\n y = self.get_data()['y']\r\n if type(y) == torch.Tensor:\r\n return y.unique().numpy()\r\n else:\r\n return sorted(list(set(y)))", "def get_train_labels(self):\n raise NotImplementedError", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"labels\")", "def get_labels(self):\n raise NotImplementedError", "def _get_label(self):\n if self.model.name == '':\n return \"KPI\"\n return \"KPI: {} ({})\".format(self.model.name, self.model.objective)", "def get_labels(self):\n return self.labels", "def labels(self):\n return self._labels", "def nr_labels(self):\n if self.is_predict_only:\n return clib.xlinear_get_int_attr(self.model_chain, \"nr_labels\")\n else:\n return self.model_chain[-1].nr_labels", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()" ]
[ "0.6806257", "0.669914", "0.65282595", "0.6442955", "0.64141417", "0.6413409", "0.6413409", "0.6398103", "0.6295529", "0.62921506", "0.6242659", "0.62091666", "0.6193946", "0.6167976", "0.61455184", "0.61455184", "0.61455184", "0.61322767", "0.61322767", "0.61322767", "0.61322767", "0.6107985", "0.60832036", "0.6076156", "0.60702527", "0.6051118", "0.59731054", "0.59731054", "0.59731054", "0.59731054" ]
0.70128405
0
model_is_cuda = next(model.parameters()).is_cuda return model.module._labels if model_is_cuda else model._labels
def get_labels(model): return model._labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_cuda(model):\n\treturn next(model.parameters()).is_cuda", "def get_all_labels(self):\n\t\tself.batch_y=Variable(torch.from_numpy(self.config.batch_y)).cuda()\n\t\treturn self.batch_y", "def get_label(image, model):\n x = Variable(image, volatile=True)\n label = model(x).data.max(1)[1].numpy()[0]\n # We have string labels for ImageNet\n if isinstance(model, torchvision.models.inception.Inception3):\n label_string = labels.get(label)\n return label_string\n return label", "def get_labels():\n return if_found(dao.get_labels())", "def labels_available(self):\n return self.training_labels.dtype.names", "def get_labels(info):\n return info.features[\"labels\"].names", "def get_labels(info):\n return info.features[\"labels\"].names", "def model_device(model):\n # Source: https://discuss.pytorch.org/t/how-to-check-if-model-is-on-cuda/180\n try:\n return str(next(model.parameters()).device)\n except StopIteration:\n # Model has no parameters\n pass\n return 'cpu'", "def get_labels(self):\r\n return None", "def gen_labels(loader, model):\r\n y_true, y_pred = [], []\r\n for X, y in loader:\r\n with torch.no_grad():\r\n output = model(X)\r\n predicted = predictions(output.data)\r\n y_true = np.append(y_true, y.numpy())\r\n y_pred = np.append(y_pred, predicted.numpy())\r\n return y_true, y_pred", "def get_labels(rf_pipeline):\n return rf_pipeline.stages[0].labels", "def get_class_labels(self):\r\n \r\n y = self.get_data()['y']\r\n if type(y) == torch.Tensor:\r\n return y.unique().numpy()\r\n else:\r\n return sorted(list(set(y)))", "def get_train_labels(self):\n raise NotImplementedError", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"labels\")", "def get_labels(self):\n raise NotImplementedError", "def _get_label(self):\n if self.model.name == '':\n return \"KPI\"\n return \"KPI: {} ({})\".format(self.model.name, self.model.objective)", "def get_labels(self):\n return self.labels", "def labels(self):\n return self._labels", "def nr_labels(self):\n if self.is_predict_only:\n return clib.xlinear_get_int_attr(self.model_chain, \"nr_labels\")\n else:\n return self.model_chain[-1].nr_labels", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()" ]
[ "0.6810129", "0.6698149", "0.65257776", "0.6438587", "0.64099574", "0.6408952", "0.6408952", "0.6402114", "0.6291775", "0.6290124", "0.6238092", "0.6206064", "0.6190825", "0.61644405", "0.6142085", "0.6142085", "0.6142085", "0.6127087", "0.6127087", "0.6127087", "0.6127087", "0.610403", "0.60812074", "0.6071739", "0.6065869", "0.6046416", "0.5969426", "0.5969426", "0.5969426", "0.5969426" ]
0.7009926
1
Just a display() helper to print html code
def print_html(html): display(HTML(html))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show(self):\n import IPython.display\n disp = IPython.display.HTML(self.render())\n return IPython.display.display(disp, display_id=str(id(self)))", "def display(self):\n\n # This will automatically choose the best representation among repr and repr_html\n\n display(self)", "def display(self):\n\n # This will automatically choose the best representation among repr and repr_html\n\n display(self)", "def _repr_html_(self):\n return self.__repr__()", "def _repr_html_(self):\n return self.__repr__()", "def command_show(problem):\r\n print problem.get_html()", "def _repr_html_(self):\n\n return self._repr__base(rich_output=True)", "def _repr_html_(self):\n\n return self._repr__base(rich_output=True)", "def Display(self, unused_args, result):\n util.PrettyPrint(result)", "def Display(self, unused_args, result):\n util.PrettyPrint(result)", "def __repr__(self):\n return self.display()", "def show(self):\n import IPython\n if self._output is None:\n self.render()\n IPython.display.display(self._output, display_id=str(id(self)))", "def __html__(self):\n return str(self)", "def show(self):\n\t\tself.html += '<head>\\n' + self.head + '</head>\\n<body>\\n' + self.body + '</body>\\n</html>'\n\n\t\treturn self.html", "def __str__(self): # pragma: no cover\n return self.display()", "def summary(self):\n if _have_ipython:\n IPython.display.display(IPython.display.HTML(self._repr_html_()))\n else:\n print(self)", "def display(self) -> str:\n lines, _, _, _ = self._display_aux()\n return '\\n'.join(lines)", "def __html__(self):\n return self.html", "def _repr_html_(self):\n return self.data.to_html()", "def display(self):\n print(self)", "def _show(self, indent = 0):\n print(\" \"*indent, \"Name:\", self.name)\n print(\" \"*indent, \"Description:\", self.description)", "def the_display(self):\r\n return f\"\"\"\r\n {self.display[0]}\\n\r\n {self.display[1]}\\n\r\n {self.display[2]}\\n\r\n {self.display[3]}\\n\r\n {self.display[4]}\\n\r\n \"\"\"", "def result_display(self, arg):\n if self.rc.pprint:\n out = stringify_func(arg)\n\n if '\\n' in out:\n print\n\n print out\n else:\n print repr(arg)", "def _repr_html_(self) -> str:\n output_html = self.template_base.render(context=self.context)\n return output_html", "def printContent(self):\n if self.content != \"Failed to find HTML template.\":\n print self.content", "def display_html_report():\n display(HTML('report_page.html'))", "def display(self, assignment):\r\n # Subclasses can print in a prettier way, or display with a GUI\r\n print(assignment)", "def display(self):\n print(str(self))", "def display(self,message):\r\n \r\n print(message)", "def display(self):\n disptxt = str(self)\n if self.width == 0 or self.has_output:\n print(disptxt)\n else:\n print(\"\\r\", end='')\n print(disptxt, end='')\n sys.stdout.flush()" ]
[ "0.73515886", "0.723867", "0.723867", "0.7181439", "0.7181439", "0.70992374", "0.7065982", "0.7065982", "0.7032903", "0.7032903", "0.69843686", "0.6875104", "0.6814682", "0.67885303", "0.6762089", "0.6761663", "0.67187154", "0.6695939", "0.66857904", "0.6666695", "0.66522455", "0.66471356", "0.6595335", "0.65448636", "0.65277106", "0.65126574", "0.6512021", "0.6488471", "0.6478903", "0.6473012" ]
0.7675851
0
Finds the cell in choices that is nearest from the target_cell.
def nearest_hex_cell(target_cell, choices): if choices: return _first(sorted(choices, key=lambda cell: distance_between_hex_cells(target_cell, cell)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nearest(self, query):\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query), self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query), self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in nearest_trees + distances_pool:\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best", "def determine_closest(self, targets):\n min_distance = None\n closest = None\n targets = filter(lambda x: not x.owner or x.owner is self, targets)\n for target in targets:\n # If target currently in use, skip it\n if target.occupied_by:\n print(f\"{target.name}: {target.x},{target.y} occupied by {target.occupied_by.name}\")\n continue\n\n # If target is known to be broken, skip it\n if target in self.memories.broken_items:\n continue\n\n dx = target.x - self.x\n dy = target.y - self.y\n distance = math.sqrt(dx**2 + dy**2)\n if min_distance is None or distance < min_distance:\n min_distance = distance\n closest = target\n\n return closest", "def find_nearest(numbers, target):\n numbers = np.asarray(numbers)\n idx = (np.abs(numbers - target)).argmin()\n return numbers[idx]", "def best_cell(self, coord):\n if coord[0] == self.pos[0] and coord[1] == self.pos[1]:\n return self.pos\n\n # Get all available cells\n free_cells = self.get_moves()\n smal_dist = float(\"Inf\")\n\n for cell in free_cells:\n d_x = abs(coord[0] - cell[0])\n d_y = abs(coord[1] - cell[1])\n dist = (d_x**2 + d_y**2)**0.5\n if dist < smal_dist:\n smal_dist = dist\n new_cell = cell\n\n return new_cell", "def getNearestTarget(self):\n if self.myShipHull.abr in globals.targetPreference.keys():\n closestShip = self.getNearestPreference(self.myShipHull.abr)\n if closestShip != None:\n return closestShip\n closestRange = 99999\n closestShip = None\n for shipID in self.targets:\n enemyShip = self.myGalaxy.ships[shipID]\n if enemyShip.alive == 1:\n range = funcs.getTargetRange(self.posX, self.posY, enemyShip.posX, enemyShip.posY)\n if range < closestRange:\n closestRange = range\n closestShip = enemyShip\n if closestShip == None and self.myGalaxy.shipsUnderAssault() == 0:\n try:\n self.myGalaxy.endSimulation(self.empireID)\n except:\n pass\n return closestShip", "def find_nearest_neighbor(src, dst):\n return sp.spatial.KDTree(dst).query(src)", "def nearest(self, value):\n coords = value[:2] # value only has 2 coords (x, y) right now, but it may have theta in the future\n hits = self.idx.nearest(self.make_bounding_box(coords), 1, objects=False)\n for hit in hits:\n # take the first index in the event of any ties\n return self.nodes[hit]\n \n \n \n #assert that value is valid here\n \"\"\"def recur(node, depth=0):\n closest, distance = node, self.cost(node.value, value)\n if depth < self.max_size:\n for child in node.children:\n (child_closest, child_distance) = recur(child, depth+1)\n if child_distance < distance:\n closest = child_closest\n distance = child_distance \n return closest, distance\n return recur(self.root)[0]\"\"\"", "def closest_other_location(state):\n locations = others_locations(state)\n target = closest_other(state)\n return locations[target]", "def choose_next_cell(self, row, col, greatest):\n\n greatestArr = []\n for nRow in range(self.dim):\n for nCol in range(self.dim):\n if self.kb[nRow][nCol] == greatest:\n m = get_manhattan_dist(row, col, nRow, nCol)\n greatestArr.append((nRow, nCol, m))\n\n leastDistArr = get_least_manhattan(greatestArr)\n if len(leastDistArr) == 0:\n return (row, col)\n elif len(leastDistArr) == 1:\n self.dist = self.dist + get_manhattan_dist(row, col, leastDistArr[0][0], leastDistArr[0][1])\n return leastDistArr[0]\n else:\n random_elem = randint(0, len(leastDistArr)-1)\n self.dist = self.dist + get_manhattan_dist(row, col, leastDistArr[random_elem][0],\n leastDistArr[random_elem][1])\n return leastDistArr[random_elem]", "def choose_cell_to_assign(self):\r\n min_domain = 10\r\n max_degree = -1\r\n chosen_row = None\r\n chosen_col = None\r\n for row in range(9):\r\n for col in range(9):\r\n if self.puzzle[row][col] == 0:\r\n domain_size = len(self.grid[row][col].domain)\r\n if domain_size < min_domain:\r\n min_domain = domain_size\r\n chosen_row = row\r\n chosen_col = col\r\n elif domain_size == min_domain:\r\n degree = len(self.grid[row][col].neighbors)\r\n if degree > max_degree:\r\n max_degree = degree\r\n chosen_row = row\r\n chosen_col = col\r\n return self.grid[chosen_row][chosen_col]", "def furthest_hex_cell(target_cell, choices):\n if choices:\n return _first(sorted(choices, reverse=True,\n key=lambda cell: distance_between_hex_cells(target_cell, cell)))", "def cell_containing(self,xy,neighbors_to_test=4): \n hit = self.select_cells_nearest(xy, count=neighbors_to_test, inside=True)\n if hit is None:\n return -1\n else:\n return hit", "def get_nearest_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) <= 1: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def findClosestTarget(self, enemyRobots):\n # self.log(\"finding closest target\")\n closest = {'target': None}\n myLoc = {'x': self.me['x'], 'y': self.me['y']}\n for bot in enemyRobots:\n enemyLoc = {'x':bot['x'], 'y': bot['y']}\n distance = self.getRangeToTarget(myLoc, enemyLoc)\n if closest['target'] is None:\n closest['target'] = bot \n closest['distance'] = distance\n closest['location'] = enemyLoc\n else:\n if distance < closest['distance']:\n closest['target'] = bot\n closest['distance'] = distance\n closest['location'] = enemyLoc\n self.log(str(closest['target']))\n self.log(str(closest['distance']))\n return closest", "def choose_bestnext(self, round):\n board_percentage = []\n \n for i in self.possible_coords:\n iSq = round.getSq(i[0], i[1])\n \n if round.pr_hook(iSq) == ' X ':\n sq_percentage = []\n surroundings = iSq.point_neighbors()\n \n for j in surroundings:\n jSq = round.getSq(j[0], j[1])\n\n if round.as_int(jSq) != None:\n count_X = 0\n count_F = 0\n check = jSq.point_neighbors()\n\n for k in check:\n kSq = round.getSq(k[0], k[1])\n if round.pr_hook(kSq) == ' X ':\n count_X += 1\n elif round.pr_hook(kSq) == ' f ':\n count_F += 1 \n if count_X != 0:\n sq_percentage.append((jSq.mine_neighbors() - count_F)/ count_X)\n\n avg_percent = 0\n if len(sq_percentage) == 0:\n avg_percent = 0.8\n elif sq_percentage.count(1) != 0:\n avg_percent = 1\n round.flagSq(i[0], i[1])\n else:\n sum_so_far = 0\n for p in sq_percentage:\n sum_so_far += p\n avg_percent = sum_so_far / len(sq_percentage)\n \n board_percentage.append(avg_percent)\n\n else:\n board_percentage.append(100)\n\n sorted_percentages = board_percentage.copy()\n sorted_percentages.sort()\n\n best_choice = board_percentage.index(sorted_percentages[0])\n\n return self.possible_coords[best_choice]", "def search_best_goal_node(self):\n\n dist_to_goal_list = [self.calc_dist_to_goal(n.x, n.y) for n in self.node_list]\n goal_indexes = [\n dist_to_goal_list.index(i)\n for i in dist_to_goal_list\n if i <= self.expand_dis\n ]\n\n safe_goal_indexes = []\n for goal_index in goal_indexes:\n t_node = self.steer(self.node_list[goal_index], self.goal_node)\n if self.check_collision(t_node, self.obstacle_list):\n safe_goal_indexes.append(goal_index)\n\n if not safe_goal_indexes:\n return None\n\n min_cost = min([self.node_list[i].cost for i in safe_goal_indexes])\n for i in safe_goal_indexes:\n if self.node_list[i].cost == min_cost:\n return i\n\n return None", "def getNearestPreference(self, myABR):\n closestRange = 99999\n closestShip = None\n for shipID in self.targets:\n enemyShip = self.myGalaxy.ships[shipID]\n if enemyShip.alive == 1 and (enemyShip.myShipHull.abr in globals.targetPreference[myABR]):\n range = funcs.getTargetRange(self.posX, self.posY, enemyShip.posX, enemyShip.posY)\n if range < closestRange:\n closestRange = range\n closestShip = enemyShip\n return closestShip", "def closest_other(state):\n locations = others_locations(state)\n distances_ = distances(my_location(state), list(locations.values()))\n dist_dict = {key: dist for key, dist in zip(locations, distances_)}\n target = util.argmin_dict(dist_dict)\n return target", "def argnearest(options, targets, assume_sorted=False):\n options = np.atleast_1d(options)\n scalar = np.isscalar(targets)\n targets = np.atleast_1d(targets)\n # Sort the input array if needed\n if not assume_sorted:\n srt = np.argsort(options)\n options = options[srt]\n\n idx = np.searchsorted(options, targets, side=\"left\").clip(max=options.size-1)\n dist_lo = np.fabs(targets - options[idx-1])\n dist_hi = np.fabs(targets - options[idx])\n mask = (idx > 0) & ((idx == options.size) | (dist_lo < dist_hi))\n idx = idx - mask\n\n # Reorder the indices if the input was unsorted\n if not assume_sorted:\n idx = [srt[ii] for ii in idx]\n\n if scalar:\n idx = idx[0]\n\n return idx", "def FindClosestPoint(self, ):\n ...", "def search(self):\n best_coords = None\n for coords in self.cells():\n size = len(self[coords])\n if size == 1:\n continue\n elif size == 0:\n return False\n elif best_coords is None or size < len(self[best_coords]):\n best_coords = coords\n\n if best_coords is None:\n return self\n\n possible_values = list(self[best_coords])\n random.shuffle(possible_values)\n for val in possible_values:\n new_puzzle = self.copy()\n if new_puzzle.assign_value(best_coords, val):\n result = new_puzzle.search()\n if result:\n return result\n\n return False", "def find_centermost_cell(self, cells):\n \n closest_cell = None\n \n for current_cell in cells:\n current_dist = abs(current_cell.rect.centery - self.player.rect.centery)\n if closest_cell is None or current_dist < closest_dist:\n closest_cell = current_cell\n closest_dist = current_dist\n\n return closest_cell", "def find_closest(A, target):\n idx = A.searchsorted(target)\n idx = np.clip(idx, 1, len(A)-1)\n left = A[idx-1]\n right = A[idx]\n idx -= target - left < right - target\n return idx", "def nearest_neighbor(data_set, target):\n \n tree = KDT(data_set)\n k = tree.k\n p = KDTNode(target)\n \n def KDsearch(current, target, neighbor, distance):\n \"\"\"The actual nearest neighbor search algorithm.\n Inputs:\n current (KDTNode): the node to examine.\n target (KDTNode): the target (stored in a KDTNode).\n neighbor (KDTNode): the current nearest neighbor.\n distance (float): the current minimum distance.\n \"\"\"\n \n # Base case. Return the distance and the nearest neighbor.\n if current is None:\n return neighbor, distance\n index = current.axis\n d = target - current\n if d < distance:\n distance = d\n neighbor = current\n if target < current: # Recursively search 'left'\n neighbor, distance = KDsearch(\n current.left, target, neighbor, distance)\n # Back up if needed\n if target.data[index] + distance >= current.data[index]: # (?)\n neighbor, distance = KDsearch(\n current.right, target, neighbor, distance)\n else: # Recursively search 'right'\n neighbor, distance = KDsearch(\n current.right, target, neighbor, distance)\n # Back up if needed\n if target.data[index] - distance <= current.data[index]: # (?)\n neighbor, distance = KDsearch(\n current.left, target, neighbor, distance)\n \n return neighbor, distance\n \n # Search the KD-tree.\n result = KDsearch(tree.root, p, tree.root, tree.root - p)\n return result[0].data, result[1]", "def nearest_neighbor(self, xRand):\n # TODO: Make this more efficient?\n #within a neighborhood of XRand, determine the lowest cost to go\n minCost = np.inf\n minNode = None\n\n for node in self.Tree:\n\n cost = self.compute_dist(node.state_time[0:6], xRand)\n\n if cost < minCost:\n minNode = node\n minCost = cost\n\n return minNode", "def nearest_neighbor(self, xRand):\n # TODO: Make this more efficient?\n #within a neighborhood of XRand, determine the lowest cost to go\n minCost = np.inf\n minNode = None\n\n for node in self.Tree:\n\n cost = self.compute_dist(node.state_time[0:6], xRand)\n\n if cost < minCost:\n minNode = node\n minCost = cost\n\n return minNode", "def find_nearest(ref_array,target_array):\n ref_tree = scipy.spatial.cKDTree(ref_array)\n dist, indices = ref_tree.query(target_array, k=1)\n return indices", "def exhaustive_search(data_set, target):\n\n # Initialize the outputs\n minimum_distance = float(\"inf\")\n nearest_neighbor = None\n\n # Search through the data set for the nearest neighbor\n for point in data_set:\n distance = euclidean_metric(target, point)\n if distance < minimum_distance:\n nearest_neighbor = point\n minimum_distance = distance\n return nearest_neighbor, minimum_distance", "def _nearest(arrlist_1, arrlist_2):\n tree = KDTree(arrlist_1);\n pts = tree.query(arrlist_2)\n\n return tree.data[pts[1][pts[0].argmin()]]", "def _find(self, candidates, target, lb, rb):\n # we'v made sure there's no duplicate in candidates\n li, ri = lb, rb\n while li < ri:\n mi = (li + ri) // 2\n if candidates[mi] < target:\n li = mi + 1\n elif candidates[mi] > target:\n ri = mi - 1\n else:\n return mi\n\n if li == ri:\n if candidates[li] <= target:\n return li\n else:\n return li - 1\n\n if ri < lb:\n return ri\n\n if li == rb:\n return rb - 1\n\n # now it's like c[ri] < target < c[li]\n # actually these 3 cases are all ri...\n return ri" ]
[ "0.6510009", "0.6492891", "0.64590746", "0.633086", "0.6319562", "0.62440336", "0.6179793", "0.6140357", "0.6127919", "0.61007917", "0.6088066", "0.6043872", "0.60174316", "0.6001085", "0.5987224", "0.59601593", "0.59468365", "0.59194386", "0.59109855", "0.58930826", "0.5885619", "0.58679116", "0.5830661", "0.5809265", "0.5804318", "0.5804318", "0.58015627", "0.5799048", "0.57709175", "0.5761234" ]
0.7604984
0
Finds the cell in choices that is furthest from the target_cell.
def furthest_hex_cell(target_cell, choices): if choices: return _first(sorted(choices, reverse=True, key=lambda cell: distance_between_hex_cells(target_cell, cell)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nearest_hex_cell(target_cell, choices):\n if choices:\n return _first(sorted(choices,\n key=lambda cell: distance_between_hex_cells(target_cell, cell)))", "def search_best_goal_node(self):\n\n dist_to_goal_list = [self.calc_dist_to_goal(n.x, n.y) for n in self.node_list]\n goal_indexes = [\n dist_to_goal_list.index(i)\n for i in dist_to_goal_list\n if i <= self.expand_dis\n ]\n\n safe_goal_indexes = []\n for goal_index in goal_indexes:\n t_node = self.steer(self.node_list[goal_index], self.goal_node)\n if self.check_collision(t_node, self.obstacle_list):\n safe_goal_indexes.append(goal_index)\n\n if not safe_goal_indexes:\n return None\n\n min_cost = min([self.node_list[i].cost for i in safe_goal_indexes])\n for i in safe_goal_indexes:\n if self.node_list[i].cost == min_cost:\n return i\n\n return None", "def choose_best_neighbour_simple(self):\n\t\trejected = set([]) #list of prohibited indexes which are rejected because of tabu and energy\n\t\tnIndex = -1\n\t\twhile(True):\n\t\t\tnIndex = self._find_min_diff(rejected=rejected)\t\t#index of best neighbor\n\n\t\t\tif self.is_tabu(nIndex):\n\t\t\t\toutput(message=\"\\t Neuron is in tabu. Need to check the aspiration criteria\",isDebug=True)\n\t\t\t\tif self.aspiration_criteria_satisfied(nIndex):\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\trejected.add(nIndex)\n\t\t\telse:\n\t\t\t\tbreak\n\t\t# output(\"Neuron is found\",isDebug=True)\n\t\treturn nIndex", "def _choose_best_option(self):", "def best_cell(self, coord):\n if coord[0] == self.pos[0] and coord[1] == self.pos[1]:\n return self.pos\n\n # Get all available cells\n free_cells = self.get_moves()\n smal_dist = float(\"Inf\")\n\n for cell in free_cells:\n d_x = abs(coord[0] - cell[0])\n d_y = abs(coord[1] - cell[1])\n dist = (d_x**2 + d_y**2)**0.5\n if dist < smal_dist:\n smal_dist = dist\n new_cell = cell\n\n return new_cell", "def choose_next_cell(self, row, col, greatest):\n\n greatestArr = []\n for nRow in range(self.dim):\n for nCol in range(self.dim):\n if self.kb[nRow][nCol] == greatest:\n m = get_manhattan_dist(row, col, nRow, nCol)\n greatestArr.append((nRow, nCol, m))\n\n leastDistArr = get_least_manhattan(greatestArr)\n if len(leastDistArr) == 0:\n return (row, col)\n elif len(leastDistArr) == 1:\n self.dist = self.dist + get_manhattan_dist(row, col, leastDistArr[0][0], leastDistArr[0][1])\n return leastDistArr[0]\n else:\n random_elem = randint(0, len(leastDistArr)-1)\n self.dist = self.dist + get_manhattan_dist(row, col, leastDistArr[random_elem][0],\n leastDistArr[random_elem][1])\n return leastDistArr[random_elem]", "def optimal_solution_single_pickup(memo):\n # Calculates what the maximum value is and saves which row\n maxvalue = None\n for i in range(len(memo)+1):\n if maxvalue is None:\n if memo[len(memo)-1][i] is not None:\n maxvalue = (memo[len(memo)-1][i], i)\n else:\n if memo[len(memo)-1][i] is not None:\n if memo[len(memo)-1][i] > maxvalue[0]:\n maxvalue = (memo[len(memo)-1][i], i)\n\n # Goes back and calculates how the solution was formed\n optimal_solution = [0] * len(memo)\n current_row = maxvalue[1]\n # Goes backwards through the array starting at the best value\n for j in range(len(memo)-1, 0, -1):\n if current_row > 0:\n # Checks if it did pick up. If current cell does not have the same value as the previous column with\n # 1 less energy then it must have picked up\n if memo[j][current_row] != memo[j-1][current_row-1]:\n optimal_solution[j] = 1\n current_row += 1\n else:\n current_row -= 1\n # If at 0 energy then it must have picked up\n else:\n optimal_solution[j] = 1\n current_row += 1\n return maxvalue[0], optimal_solution", "def _find_closest_in_range(ranges: Iterable[CT], what_to_find: CT) -> Optional[CT]:\n\n ranges = sorted(ranges)\n\n while ranges:\n\n middle_item_index = len(ranges) // 2\n middle_item = ranges[middle_item_index]\n\n if what_to_find == middle_item:\n return what_to_find\n\n elif what_to_find > middle_item:\n\n if len(ranges) == 1:\n return middle_item\n\n ranges = ranges[middle_item_index:]\n\n elif what_to_find < middle_item:\n\n if ranges[middle_item_index - 1] < what_to_find:\n return ranges[middle_item_index - 1]\n\n ranges = ranges[:middle_item_index]", "def __find_best(self):\n # First look for offensive moves\n for i in range(0, 3):\n col = self.__get_col(i)\n if len(col.get('empty')) == 1:\n if col.get(self.opponent_char) == 2:\n return col.get('empty')[0]\n for i in range(0, 3):\n row = self.__get_row(i)\n if len(row.get('empty')) == 1:\n if row.get(self.opponent_char) == 2:\n return row.get('empty')[0]\n for i in range(0, 2):\n diag = self.__get_diag(i)\n if len(diag.get('empty')) == 1:\n if diag.get(self.opponent_char) == 2:\n return diag.get('empty')[0]\n\n # Then check again looking for defensive moves\n for i in range(0, 3):\n col = self.__get_col(i)\n if len(col.get('empty')) == 1:\n if col.get(self.player_char) == 2:\n return col.get('empty')[0]\n for i in range(0, 3):\n row = self.__get_row(i)\n if len(row.get('empty')) == 1:\n if row.get(self.player_char) == 2:\n return row.get('empty')[0]\n for i in range(0, 2):\n diag = self.__get_diag(i)\n if len(diag.get('empty')) == 1:\n if diag.get(self.player_char) == 2:\n return diag.get('empty')[0]\n\n ##### CLEAN THIS METHOD UP LATER #####\n return None", "def choose_bestnext(self, round):\n board_percentage = []\n \n for i in self.possible_coords:\n iSq = round.getSq(i[0], i[1])\n \n if round.pr_hook(iSq) == ' X ':\n sq_percentage = []\n surroundings = iSq.point_neighbors()\n \n for j in surroundings:\n jSq = round.getSq(j[0], j[1])\n\n if round.as_int(jSq) != None:\n count_X = 0\n count_F = 0\n check = jSq.point_neighbors()\n\n for k in check:\n kSq = round.getSq(k[0], k[1])\n if round.pr_hook(kSq) == ' X ':\n count_X += 1\n elif round.pr_hook(kSq) == ' f ':\n count_F += 1 \n if count_X != 0:\n sq_percentage.append((jSq.mine_neighbors() - count_F)/ count_X)\n\n avg_percent = 0\n if len(sq_percentage) == 0:\n avg_percent = 0.8\n elif sq_percentage.count(1) != 0:\n avg_percent = 1\n round.flagSq(i[0], i[1])\n else:\n sum_so_far = 0\n for p in sq_percentage:\n sum_so_far += p\n avg_percent = sum_so_far / len(sq_percentage)\n \n board_percentage.append(avg_percent)\n\n else:\n board_percentage.append(100)\n\n sorted_percentages = board_percentage.copy()\n sorted_percentages.sort()\n\n best_choice = board_percentage.index(sorted_percentages[0])\n\n return self.possible_coords[best_choice]", "def getNearestTarget(self):\n if self.myShipHull.abr in globals.targetPreference.keys():\n closestShip = self.getNearestPreference(self.myShipHull.abr)\n if closestShip != None:\n return closestShip\n closestRange = 99999\n closestShip = None\n for shipID in self.targets:\n enemyShip = self.myGalaxy.ships[shipID]\n if enemyShip.alive == 1:\n range = funcs.getTargetRange(self.posX, self.posY, enemyShip.posX, enemyShip.posY)\n if range < closestRange:\n closestRange = range\n closestShip = enemyShip\n if closestShip == None and self.myGalaxy.shipsUnderAssault() == 0:\n try:\n self.myGalaxy.endSimulation(self.empireID)\n except:\n pass\n return closestShip", "def target_location(self):\n lst = self.cell_list()\n return lst[-1]", "def choose_target(attacker, opponent):\n\n target = target_index = None\n for index, defender in enumerate(opponent.boards):\n lneighbor, rneighbor = get_neighbors(opponent.boards, index)\n if not defender.is_attackable(attacker, lneighbor, rneighbor):\n continue\n if not target or target < defender:\n target = defender\n target_index = index\n return target, target_index", "def determine_closest(self, targets):\n min_distance = None\n closest = None\n targets = filter(lambda x: not x.owner or x.owner is self, targets)\n for target in targets:\n # If target currently in use, skip it\n if target.occupied_by:\n print(f\"{target.name}: {target.x},{target.y} occupied by {target.occupied_by.name}\")\n continue\n\n # If target is known to be broken, skip it\n if target in self.memories.broken_items:\n continue\n\n dx = target.x - self.x\n dy = target.y - self.y\n distance = math.sqrt(dx**2 + dy**2)\n if min_distance is None or distance < min_distance:\n min_distance = distance\n closest = target\n\n return closest", "def _choose_best_option(self) -> None:\r\n pawn = choice(list(self._state.game.engine.get_movable_pawns()))\r\n move = choice(self._state.game.engine.get_moves_for_pawn(pawn))\r\n self._selected_pawn = pawn\r\n self._selected_move = move", "def get_best_match(self, list):\n raise NotImplementedError", "def search(self):\n best_coords = None\n for coords in self.cells():\n size = len(self[coords])\n if size == 1:\n continue\n elif size == 0:\n return False\n elif best_coords is None or size < len(self[best_coords]):\n best_coords = coords\n\n if best_coords is None:\n return self\n\n possible_values = list(self[best_coords])\n random.shuffle(possible_values)\n for val in possible_values:\n new_puzzle = self.copy()\n if new_puzzle.assign_value(best_coords, val):\n result = new_puzzle.search()\n if result:\n return result\n\n return False", "def get_best_solution(self):\n if not self.tours:\n raise Exception('No solution has been computed yet')\n scores = {s:get_cost(self.tours[s],self) for s in self.tours}\n best = min(scores,key=scores.get)\n print('The best solution is given by {} with score {}'.format(best,scores[best]))\n return self.tours[best]", "def _select_heuristic(self):\n\n # take a sample of rewards from the current prior of heuristics\n sample_rewards = np.random.normal(self.prior_mus, self.prior_sigmas)\n\n # select the heuristic that has the highest reward sample value\n self.best_heuristic_idx = np.argmax(sample_rewards)\n self.best_heuristic = self.heuristics[self.best_heuristic_idx]\n self.heuristic_selection.append(self.best_heuristic_idx)", "def action(self):\n\n # assume the smart opponent can always choose the best step\n # Depth First Search\n steps = 2\n stack = [(self.game_in_head, (), 0)]\n maxmin = None\n good_paths = []\n\n while len(stack) > 0:\n parent_node, path, score = stack.pop(-1)\n if len(path) >= steps*2:\n \n # leaf node in the search tree\n if maxmin is None:\n maxmin = score\n good_paths.append(path)\n elif maxmin == score:\n good_paths.append(path)\n elif maxmin < score:\n maxmin = score\n good_paths.clear()\n good_paths.append(path)\n else:\n # root node, find its leaves\n children_nodes = self.one_step_infe(parent_node, path, score)\n stack += children_nodes\n\n path_dec = random.choice(good_paths) \n if self.colour == 'upper':\n return path_dec[0] \n elif self.colour == 'lower':\n return path_dec[1]", "def getNextNodeUsingCellDiff(kGoalState):\n \n global fringe\n global solutions\n\n \n\n\n\n minNode = None\n minCost = 99999999999\n minNodeIndex = -1\n\n \n pnode = None\n pcost = None\n\n if len(solutions)>0 and solutions[0] != None:\n pnode = solutions[0];\n pcost = getHValueForNode(pnode,kGoalState)\n #print pnode, pcost\n # raw_input()\n \n\n\n\n for idx,node in enumerate(fringe):\n #get the heu. function values\n g_value = getHValueForNode(node,kGoalState)\n \n\n if g_value < minCost:\n minNode = node\n minNodeIndex = idx\n minCost = g_value\n\n\n fringe.pop(minNodeIndex)\n c = getHValueForNode(minNode,kGoalState)\n if pnode != None:\n if c > pcost:\n minNode = None\n \n return minNode", "def closest_other_location(state):\n locations = others_locations(state)\n target = closest_other(state)\n return locations[target]", "def find_best_move(state: GameState) -> None:", "def select_leader(self):\n\n if self.leaders.size() == 1:\n return self.leaders.rand_choice()\n\n candidates = self.leaders.rand_sample(2)\n\n # randomly favourize one of them\n # best_global = choice(candidates)\n\n # should select those which has bigger fitness\n # # if one of them dominates, it will be selected as global best\n # dom = self.dominance.compare(candidates[0].costs_signed, candidates[1].costs_signed)\n #\n # if dom == 1:\n # best_global = candidates[0]\n #\n # if dom == 2:\n # best_global = candidates[1]\n\n if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:\n best_global = candidates[1]\n else:\n best_global = candidates[0]\n return best_global", "def select_leader(self):\n\n if self.leaders.size() == 1:\n return self.leaders.rand_choice()\n\n candidates = self.leaders.rand_sample(2)\n\n # randomly favourize one of them\n # best_global = choice(candidates)\n\n # should select those which has bigger fitness\n # # if one of them dominates, it will be selected as global best\n # dom = self.dominance.compare(candidates[0].costs_signed, candidates[1].costs_signed)\n #\n # if dom == 1:\n # best_global = candidates[0]\n #\n # if dom == 2:\n # best_global = candidates[1]\n\n if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:\n best_global = candidates[1]\n else:\n best_global = candidates[0]\n return best_global", "def optimal_min(board):\n if terminal(board):\n return [None, utility(board)]\n\n available_actions = list(actions(board))\n\n # Naive baseline comparison is positive infinity\n global_optimum = [None, math.inf]\n\n for action in available_actions:\n # Anticipates optimal adversarial moves.\n local_optimum = optimal_max(result(board, action))\n\n if global_optimum[1] >= local_optimum[1]:\n global_optimum = [action, local_optimum[1]]\n\n return global_optimum", "def best_last_option(self):\n \n # get essential values\n board = self.get_game_space()\n affinity = self.get_affinity()\n \n # pick the right check for the game we are playing\n if isinstance(board, Gomoku):\n \n # get all possible blocks to make a move in\n winning_blocks = board.get_winning_blocks(affinity)\n print('total winning blocks:'+str(len(winning_blocks)))\n best_blocks = []\n best_block = None\n\n # find the largest blocks to place a stone in\n for block in winning_blocks:\n if affinity == BLUE_TILE():\n if len(best_blocks) == 0: best_blocks.append(block)\n elif len(block.blue) > len(best_blocks[0].blue):\n best_blocks = []\n best_blocks.append(block)\n elif len(block.blue) == len(best_blocks[0].blue):\n best_blocks.append(block)\n elif affinity ==RED_TILE():\n if len(best_blocks) == 0: best_blocks.append(block)\n if len(block.red) > len(best_blocks[0].red):\n best_blocks = []\n best_blocks.append(block)\n elif len(block.red) == len(best_blocks[0].red):\n best_blocks.append(block)\n\n # find the best block to place a stone in\n for block in best_blocks:\n if best_block is None: best_block = block \n elif block.tiles[0][0] <= best_block.tiles[0][0]: \n if (block.tiles[0][1] != block.tiles[1][1]):\n if block.direction == 'vertical':\n if block.tiles[WINNING_ROW_SIZE()-1][1] >= best_block.tiles[WINNING_ROW_SIZE()-1][1]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block\n else:\n if block.tiles[0][1] >= best_block.tiles[0][1]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block \n else:\n if block.tiles[0][1] >= best_block.tiles[0][1] and block.tiles[1][0] <= best_block.tiles[1][0]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block \n\n # find the best move to make out of the best block \n # print('best block:'+str(best_block.tiles))\n best_move = (7,-1)\n for tile_i in range(len(best_block.tiles)):\n tile = best_block.tiles[tile_i]\n next_tile = None\n prev_tile = None \n if tile_i+1 in range(len(best_block.tiles)):\n next_tile = best_block.tiles[tile_i+1]\n if tile_i-1 in range(len(best_block.tiles)):\n prev_tile = best_block.tiles[tile_i-1]\n if board.get_tile(tile[0],tile[1]) == BLANK_TILE():\n if prev_tile is not None and next_tile is None:\n if board.get_tile(prev_tile[0],prev_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n elif next_tile is not None and prev_tile is None:\n if board.get_tile(next_tile[0],next_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n elif next_tile is not None and prev_tile is not None:\n if board.get_tile(prev_tile[0],prev_tile[1]) == affinity or \\\n board.get_tile(next_tile[0],next_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n \n return best_move", "def _find(self, candidates, target, lb, rb):\n # we'v made sure there's no duplicate in candidates\n li, ri = lb, rb\n while li < ri:\n mi = (li + ri) // 2\n if candidates[mi] < target:\n li = mi + 1\n elif candidates[mi] > target:\n ri = mi - 1\n else:\n return mi\n\n if li == ri:\n if candidates[li] <= target:\n return li\n else:\n return li - 1\n\n if ri < lb:\n return ri\n\n if li == rb:\n return rb - 1\n\n # now it's like c[ri] < target < c[li]\n # actually these 3 cases are all ri...\n return ri", "def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n\n\n explored = []\n actions = []\n initial = problem.getStartState()\n frontier = util.Queue()\n\n frontier.push((initial, actions))\n\n while not frontier.isEmpty():\n node, actions = frontier.pop()\n if node in explored:\n continue\n explored.append(node)\n if problem.isGoalState(node):\n return actions\n for successor, action, cost in problem.getSuccessors(node):\n frontier.push((successor, actions + [action]))", "def _select_heuristic(self):\n\n return None" ]
[ "0.7355767", "0.6371808", "0.6272887", "0.61813295", "0.6043925", "0.6002132", "0.5980083", "0.59294474", "0.5925985", "0.5899022", "0.5879226", "0.5807166", "0.5806372", "0.5797924", "0.5781504", "0.5767348", "0.5766683", "0.57633674", "0.57139504", "0.5713386", "0.57126397", "0.57103944", "0.57099116", "0.56804276", "0.56804276", "0.5662055", "0.5632438", "0.5627328", "0.5622726", "0.56202984" ]
0.74614465
0
Tests to see if an object starting at start with a given heading will pass through the target cell
def is_on_course_with(start, heading, target): v = Volant(*(tuple(start) + (heading,))) if start == target: return True for _ in range(distance_between_hex_cells(start, target)): v = v.advance() if v.xyh == (tuple(target) + (heading,)): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def atHead(self):\n return self.cursor == self.head", "def is_header_part(cell: str) -> bool:\n pattern = '|'.join([\n rf'(?:(?:three|3|six|6|nine|9|twelve|12)\\s+months?(?:\\s+periods?)?|quarters?|year|ytd)(?!ly)',\n rf'\\b(?:{MONTH})\\b',\n rf'^(?:end(?:ed|ing))?(?:20)\\s*[0-2]\\s*[0-9]{FOOTNOTE}$',\n rf'^\\d{1, 2}/\\d{1, 2}/\\d{2, 4}{FOOTNOTE}$',\n rf'^q[1-4](?:\\s*\\(\\w+\\))?{FOOTNOTE}$',\n rf'^[1-4]q(?:tr)?(?:\\d{2, 4})?',\n rf'as\\s+(?:reported|adjusted)',\n rf'year-?\\s*to-?\\s*date',\n rf'^year-$',\n rf'^to-date$',\n rf'full\\s+year',\n rf'^(?:28|29|30|31){FOOTNOTE}$',\n rf'^(?:month|quarter|year)s?{FOOTNOTE}$',\n rf'^(?:three|six|nine|twelve){FOOTNOTE}$',\n rf'^(?:operating|reported|baseline|percent|%|end(?:ed|ing)){FOOTNOTE}$',\n ORDINAL,\n rf'^(?:(?:20)\\s*[0-2]\\s*[0-9]\\*\\s*)?{UNAUDITED_EXACT}$'\n ])\n prepped = str(cell).lower().strip()\n match = re.search(allow_space_between_letters(pattern), prepped)\n return match is not None or parse_fiscal_period(cell) is not None", "def contains(self, mention):\n return self.start <= mention.start and mention.end <= self.end", "def check_headerRow(self, expected, found):\n\n # spreadsheets must have either a barcode field or a object ID field, but both are not required\n header1 = 'barcode'\n header2 = ('object identifier\\n(edit heading to specify type' +\n ' - e.g. barcode)')\n expected = self.remove_annoying(header1, header2, expected, found)\n\n missing = []\n\n for header in expected:\n if header not in found:\n missing.append(header)\n\n if missing:\n self.raise_excelerror(\"Missing required value- {0}.\"\n .format(missing))\n\n return True", "def check_scroll_to_annotatable(self, step):\r\n assert_true(world.css_visible('.annotation-header'))", "def hdu_contains_gti(hdu):\n colnames = [c.lower() for c in hdu.data.columns.names]\n return \"start\" in colnames and \"stop\" in colnames", "def check_headerEntries(self, expected, found):\n\n # spreadsheets must have either a barcode field or a object ID field, but both are not required\n header1 = ('original master', 'object', 'barcode')\n header2 = ('original master', 'object',\n 'object identifier\\n(edit heading to specify type ' +\n '- e.g. barcode)')\n expected = self.remove_annoying(header1, header2, expected, found)\n\n bad_entries = []\n\n for header in expected:\n if header not in found:\n bad_entries.append(header)\n\n if bad_entries:\n self.raise_excelerror(\"Incorrect header entry for {0}.\"\n .format(bad_entries))\n return True", "def valid(self, nt_header):\n try:\n return (self.OriginalFirstThunk != 0 and\n self.OriginalFirstThunk < nt_header.OptionalHeader.SizeOfImage and\n self.FirstThunk != 0 and\n self.FirstThunk < nt_header.OptionalHeader.SizeOfImage and\n self.Name < nt_header.OptionalHeader.SizeOfImage)\n except obj.InvalidOffsetError:\n return False", "def one_head_test(self, item):\n v = [i for i, j in self.A if j == item]\n return len(v) == 0", "def _pre_check(self) -> bool:\n if self._fuse_row:\n rows = (\n self._tiling.cells_in_row(self._row_idx),\n self._tiling.cells_in_row(self._row_idx + 1),\n )\n else:\n rows = (\n self._tiling.cells_in_col(self._col_idx),\n self._tiling.cells_in_col(self._col_idx + 1),\n )\n has_a_long_row = any(len(row) > 1 for row in rows)\n if has_a_long_row:\n return False\n first_cell = next(iter(rows[0]))\n second_cell = next(iter(rows[1]))\n cells_are_adjacent = (\n first_cell[0] == second_cell[0] or first_cell[1] == second_cell[1]\n )\n if not cells_are_adjacent:\n return False\n same_basis = (\n self._tiling.cell_basis()[first_cell][0]\n == self._tiling.cell_basis()[second_cell][0]\n )\n if not same_basis:\n return False\n self._first_cell = first_cell\n self._second_cell = second_cell\n return True", "def ahead(self, obs, object):\n if(obs[self.listOfObjects.index(object)][int((self.no_rays-1)/2)] > 0):\n # print(\"found \" + str(object) + \" ahead\")\n return True\n return False", "def in_lattice(self, ref):\n assert ref[0] == self.start.sym, 'The first word is not null.'\n cur_node = set([self.start])\n for word in ref[1:]:\n next_node = set()\n for i in cur_node:\n for j in i.exits:\n if word == j.dest.sym:\n next_node.add(j.dest)\n if not next_node:\n return False\n else:\n cur_node = next_node\n if sum([i == self.end for i in cur_node]) == 0:\n return False\n return True", "def findWithinHorizon(self) -> str:\n raise NotImplementedError", "def check_rows(self, win: list) -> bool:\r\n for row in self.tags:\r\n for j in range(len(row) - len(win) + 1):\r\n if win == row[j:j+self.win_condition]:\r\n return True", "def cell_is_laser_movable(self, y, x, heading, map_data):\n return map_data[y][x] in self.MOVABLE_SYMBOLS[heading]", "def is_horizontal(self):\n return self.start.x == self.end.x", "def depth_heading_isterm(self, goal):\n depth_ok = abs(goal.target_depth - self.curr_depth) < self.depth_tol\n heading_ok = abs(goal.target_heading - self.curr_heading)\\\n < self.heading_tol\n return depth_ok and heading_ok", "def check_start (wordsearch, word, start_pos):\n directions = [[-1,1], [0,1], [1,1], [-1,0], [1,0], [-1,-1], [0,-1], [1,-1]]\n # Iterate through all directions and check each for the word\n for d in directions:\n if (check_dir(wordsearch, word, start_pos, d)):\n return True", "def _in_huc(shp, hucstr, source):\n logging.debug(\"Checking: shp in '%s'?\"%hucstr)\n\n try:\n fname = source.download(hucstr)\n profile, huc = source.load_huc(hucstr)\n except RuntimeError as err:\n logging.debug(\"No such HUC %s found? %s\"%(hucstr,str(err)))\n raise err\n\n if profile['crs']['init'] != 'epsg:4269':\n # latlong\n raise RuntimeError(\"HUC file for '%s' not in Lat-Lon?\"%hucstr)\n\n huc_shp = shapely.geometry.shape(huc['geometry'])\n logging.debug(\" shp bounds = %r\"%list(shp.bounds))\n logging.debug(\" huc bounds = %r\"%list(huc_shp.bounds))\n if huc_shp.contains(shp):\n logging.debug(' yes!')\n return 2\n elif huc_shp.intersects(shp):\n logging.debug(' sorta!')\n return 1\n else:\n logging.debug(' no!')\n return 0", "def left(self, obs, object):\n for i in range(int((self.no_rays-1)/2)):\n if(obs[self.listOfObjects.index(object)][i] > 0):\n # print(\"found \" + str(object) + \" left\")\n return True\n return False", "def is_break_point(h_list, i):\n item = h_list[i]\n # a) at glue, provided that this glue is immediately preceded by a non-\n # discardable item, and that it is not part of a math formula (i.e.,\n # not between math-on and math-off).\n # A break 'at glue' occurs at the left edge of the glue space.\n # TODO: Add math conditions.\n if (isinstance(item, Glue)\n # Check a previous item exists, and it is not discardable.\n and ((i - 1) >= 0) and (not h_list[i - 1].discardable)):\n return True\n # b) at a kern, provided that this kern is immediately followed by\n # glue, and that it is not part of a math formula.\n # TODO: Add math conditions.\n elif (isinstance(item, Kern)\n # Check a following item exists, and it is glue.\n and ((i + 1) <= (len(h_list) - 1))\n and isinstance(h_list[i + 1], Glue)):\n return True\n # c) at a math-off that is immediately followed by glue.\n elif (isinstance(item, MathOff)\n # Check a following item exists, and it is glue.\n and ((i + 1) <= (len(h_list) - 1))\n and isinstance(h_list[i + 1], Glue)):\n return True\n # d) at a penalty (which might have been inserted automatically in a\n # formula).\n elif isinstance(item, Penalty):\n return True\n # e) at a discretionary break.\n elif isinstance(item, DiscretionaryBreak):\n return True\n else:\n return False", "def ate_itself(self):\r\n ate_flag = False\r\n\r\n for i in self.body:\r\n if self.head[0] + self.direction[0]*10 == i[0] and self.head[1] + self.direction[1]*10 == i[1]:\r\n ate_flag = True\r\n\r\n return ate_flag", "def continues_above(self):\n if self.row_num == 0:\n return False\n return (self.master_grid.matrix[self.row_num-1][self.col_num] \n == self.character)", "def interior_contains(self, Vobj):\n return False", "def interior_contains(self, Vobj):\n return False", "def _row_or_col_is_header(s_count, v_count):\n if s_count == 1 and v_count == 1:\n return False\n else:\n return (s_count + 1) / (v_count + s_count + 1) >= 2. / 3.", "def is_match(cells):\n if len(cells) == 1 and \"-\" not in cells:\n return list(cells)[0]\n return None", "def in_range(table, index):\n if index > len(table):\n print(\"Error: index out of range\")\n return False\n if index < 0:\n print(\"Error: negative index\")\n return False\n return True", "def __check_headers_contains_elem(headers, element, nested_elem=None):\n\n if element in headers:\n if nested_elem is not None and nested_elem not in headers[element]:\n return __get_eval_result(\n \"FAILED\", f\"No {element} header with {nested_elem} found\", 90\n )\n\n return __get_eval_result(\n \"PASSED\", f\"Detected Header with the value of: {headers[element]}\", 90\n )\n\n else:\n return __get_eval_result(\"FAILED\", f\"No {element} header found\", 90)", "def present_in_slice(self, start, stop):\n return self.starts_before(start) and self.ends_after(stop - 1)" ]
[ "0.5828779", "0.5647116", "0.55674917", "0.5562699", "0.5555092", "0.5537959", "0.54243815", "0.5332083", "0.5316014", "0.5296079", "0.5247089", "0.52382934", "0.52101386", "0.5176555", "0.51518446", "0.5150946", "0.5148843", "0.51462966", "0.51339036", "0.5132057", "0.51300246", "0.5125683", "0.51090974", "0.5104972", "0.5104972", "0.5094656", "0.5092129", "0.5085922", "0.5084047", "0.5061815" ]
0.70463324
0
Converts the numpy image saved to 'fn' into a .png The resulting image is saved with the same filename, except with the .png extension (input image must have extension .npy).
def convert(fn): assert fn[-4:] == ".npy", "%s: File extension should match '.npy'" % fn print "Converting file '%s' to .png" % fn numpy_img = np.load(fn) #Rescale to 0-255 and convert to uint8 rescaled = (255.0 / numpy_img.max() * (numpy_img - numpy_img.min())).astype(np.uint8) im = Image.fromarray(rescaled) im.save(fn[:-4] + ".png")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, fn):\n plt.imsave(fn, self.image)", "def save_to_png(arr,\n path=None,\n image_mode=None,\n show=True,\n labels=None,\n scale=None):\n if image_mode is None:\n image_mode = _get_image_type_from_array(arr)\n\n img = Image.fromarray(arr, mode=image_mode)\n\n if labels is not None:\n img = add_header(img, labels)\n\n if scale is None:\n scale = max(1, int(500 / max(arr.shape)))\n\n if scale != 1:\n img = img.resize((img.size[0] * scale, img.size[1] * scale))\n\n # Saving to a temporary file is needed even when showing in a notebook\n if path is None:\n path = '/tmp/tmp.png'\n elif not path.endswith('.png'):\n # Only PNG is supported because JPEG files are unnecessarily 3 times larger.\n path = '{}.png'.format(path)\n with gfile.Open(path, 'wb') as fout:\n img.save(fout, format=path.split('.')[-1])\n\n # Show image (great for notebooks)\n if show:\n display.display(display.Image(path))", "def new_func():\n dirname, _ = os.path.split(os.path.abspath(__file__))\n pngfile = os.path.sep.join([dirname, \"out.png\"])\n img = [\"110010010011\", \"101011010100\", \"110010110101\", \"100010010011\"]\n img = [[int(val) for val in value] for value in img]\n writer = png.Writer(len(img[0]), len(img), greyscale=True, bitdepth=16)\n with open(pngfile, \"wb\") as file:\n writer.write(file, img)\n try:\n func(pngfile)\n finally:\n os.remove(pngfile)", "def saveImage(np_img, output_path, mode=None, scale=255.0):\n\n if mode is not None:\n if mode == 'dist_norm':\n np_img *= scale/np_img.max()\n np_out = Image.fromarray((np_img).astype('uint8'))\n elif mode == 'blob_labels':\n np_out = Image.fromarray((np_img*255).astype(np.uint8))\n else:\n return (mode, \" is not a supported mode.\")\n else:\n np_out = Image.fromarray(np_img)\n np_out.save(output_path)\n\n if os.path.exists(output_path):\n return 0\n else:\n return 1", "def save_numpy_array_to_png(array, mode='RGB', size=None): \n\n im = Image.fromarray(array, mode=mode)\n\n if size:\n im = im.resize(size)\n\n im.save('numpy_img.png', format=\"PNG\")", "def save_npimg(array: np.ndarray, path: str) -> None:\r\n img = Image.fromarray(array.squeeze())\r\n img.save(path)", "def _save_annotation(annotation, filename):\n\n pil_image = Image.fromarray(annotation.astype(dtype=np.uint8))\n '''\n with tf.io.gfile.GFile(filename, mode='w') as f:\n #with open(filename, mode='w') as f:\n print(f)\n pil_image.save(f, 'PNG')\n '''\n pil_image.save(filename)", "def get_png_image(filename):\n im_arr = io.imread(filename)\n return im_arr", "def export_PNG(filename, I):\n\n type = 'L'\n if depth(I) == 3:\n type = 'RGB'\n if depth(I) == 4:\n type = 'RGBA'\n if istorch(I):\n Image.fromarray(torch2numpy(I * 255).astype(numpy.uint8), type).save(filename)\n if isnumpy(I):\n Image.fromarray((I * 255).astype(numpy.uint8), type).save(filename)\n return", "def save_numpy_array_as_image(narray, path, mode=\"uint8\", image_mode=\"L\"):\n img = Image.fromarray(np.asarray(np.clip(narray, 0, 255), dtype=mode), image_mode)\n img.save(path)", "def fits_to_png(fn_in, fn_out=None, vmin=None, vmax=None, scaling='arcsinh'):\n\n\t# setting fn_out\n\textension = '.png'\n\n\tif fn_out is None:\n\t\tbase_in, ext_in = os.path.splitext(fn_in)\n\n\t\tif ext_in == '.fits': \n\t\t\tfn_out = base_in+extension\n\t\telse: \n\t\t\tfn_out = fn_in+extension\n\n\tif not os.path.isfile(fn_in):\n\t\tprint(\"skipping \"+fn_in+\" as in file does not exist\")\n\telse:\n\t\t# read in\n\t\timg = fits.getdata(fn_in)\n\n\t\timg_scaled = scale_img(img, vmin=vmin, vmax=vmax, scaling=scaling)\n\n\t\tsi.imsave(fn_out, img_scaled)", "def save_array_as_image(arr, filename):\n arr = arr.copy().clip(0, 255).astype('uint8')\n im = Image.fromarray(arr)\n im.save(filename)", "def save_tensor_image(fn, x):\n\n if ( 3 == len( x.size() ) ):\n x = x.permute((1, 2, 0))\n \n # Get the CPU NumPy version.\n x = torch.clamp(x, 0, 255)\n x = x.cpu().numpy().astype(np.uint8)\n\n # Save the iamge.\n cv2.imwrite(fn, x, [cv2.IMWRITE_PNG_COMPRESSION, 0])", "def array_to_file(filename, a):\n a = normalize_array(a)\n i = Image.fromarray(a.astype('uint8'))\n return i.save(filename)", "def save_img(img, file_to_save):\n nparray_rep = img\n if not isinstance(img, np.ndarray):\n nparray_rep = k_image.img_to_array(img)\n k_image.save_img(file_to_save, nparray_rep)", "def save_img(img: np.ndarray, path: str) -> None:\n\n img_obj = Image.fromarray(img)\n img_obj.save(path)", "def _save(filename, img):\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n # filename = filename+'.png'\n filename = os.path.join(OUTPUT_DIR, filename)\n # print(filename, img.shape)\n cv.imwrite(filename, img)", "def array_to_imagefile(data, imagefname,verbose=False):\n if data.ndim == 2:\n data = np.dstack([data,data,data])\n data = np.rollaxis(data,-1)\n # print(data.shape)\n img = Image.fromarray(np.uint8(np.rollaxis(np.rollaxis(data,-1),-1)))\n if data.ndim == 2:\n if data.shape[3] == 3:\n img = img.convert(mode=\"RGB\")\n img.mode='RGB'\n if data.shape[3] == 4:\n img = img.convert(mode=\"RGBA\")\n img.mode='RGBA'\n \n \n if verbose:\n print(\"saving \", os.path.realpath(imagefname))\n img.save(imagefname)\n return 1", "def save_image(image, filename, mode='PNG'):\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()", "def save_image(image, filename, mode='PNG'):\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()", "def _save_frame_as_png(\n self : \"animation\",\n frame : \"np.ndarray\",\n filename : \"str\"\n ):\n im = Image.fromarray(frame)\n im.save(filename)", "def save_rgba_tiff(array: np.ndarray, filename: str, mode: str):\n if mode == \"RGBA\":\n array = np.concatenate([array, 255 * np.ones_like(array[0])[np.newaxis]]).astype(np.uint8)\n\n img_rgb = array.transpose(1, 2, 0)\n imwrite(filename, img_rgb, shape=img_rgb.shape, tile=(16, 16))\n\n return filename", "def __make_png(self, abspath_img_rgb):\n if not os.path.exists(DIR_PNG):\n os.makedirs(DIR_PNG)\n\n outsize = '{}%'.format(OUTSIZE_RGB)\n img_name_rgb = os.path.basename(abspath_img_rgb)\n suffix_extension_tif = Utils.get_suffix_tif(img_name_rgb)\n img_png = img_name_rgb.replace(suffix_extension_tif, '.png')\n path_img_png = os.path.join(DIR_PNG, img_png)\n\n command = \"gdal_translate -ot byte -of PNG -outsize {} {} \" \\\n \"-a_nodata 0 -q {} {}\".format(\n outsize, outsize, abspath_img_rgb, path_img_png\n )\n os.system(command)\n return os.path.join(DIR_PNG_TO_DB, img_png)", "def save_image(imgs, filename, grayscale=False, nrow=8, padding=2):\n from PIL import Image\n if imgs.dtype != np.uint8:\n if grayscale:\n imgs=imgs*255\n else:\n imgs = (imgs * 0.5 + 0.5) * 255\n imgs = imgs.astype('uint8')\n grid = make_grid(imgs, nrow=nrow, padding=padding)\n im = Image.fromarray(grid)\n im.save(filename)", "def saveImage(self, fileName=\"mandelbrot.frac\"):\n # Save the image as a PNG\n if fileName == \"\":\n fileName = \"mandelbrot.frac\"\n directories = fileName.split(\"/\")\n for n in directories:\n if \".frac\" in n:\n name = n.rsplit(\".\", 1)[0]\n self.img.write(f\"{name}.png\")\n print(f\"Wrote image {name}.png\")", "def imwrite(img, file_path, params=None, auto_mkdir=True):\n if not isinstance(img, np.ndarray):\n raise TypeError('\"img\" must be a numpy array!')\n if auto_mkdir:\n cvtools.makedirs(file_path)\n # return cv.imwrite(file_path, img, params)\n # support path included chinese\n return cv.imencode(osp.splitext(file_path)[-1], img, params)[1].tofile(file_path)", "def save_image(image, file_name):\n io.imsave(file_name,image)", "def image_save(path, na: numpy.ndarray):\n # change type\n na = numpy.fmax(numpy.fmin(na * 255.0, 255), 0).astype(\"uint8\")\n # shape is now (1,3,h,w), remove 1\n na = na.reshape(3,na.shape[2],na.shape[3])\n # fix shape\n na = numpy.moveaxis(na, [0,1,2], [2,0,1])\n # shape is now (h,w,3)\n # file\n Image.fromarray(na).save(path)", "def imwrite(image, path):\n\n if image.ndim == 3 and image.shape[2] == 1: # for gray image\n image = np.array(image, copy=True)\n image.shape = image.shape[0:2]\n\n imgarray=((image+1.0)*127.5).astype(np.uint8)\n img=Image.fromarray(imgarray)\n img.save(path)", "def save(self, filename):\n try:\n import PIL\n except ImportError:\n raise RuntimeError('Could not import PIL. PIL (pillow) is required to save fresnel images.')\n else:\n if self._output is None:\n self.render()\n image = PIL.Image.fromarray(self._output[:], mode='RGBA')\n image.save(filename)" ]
[ "0.68175745", "0.6526428", "0.65264", "0.64868677", "0.64535123", "0.63635564", "0.6300293", "0.6246505", "0.6224143", "0.62207234", "0.6219505", "0.62152684", "0.62044156", "0.6155904", "0.6146134", "0.60844654", "0.6057763", "0.6055479", "0.60439825", "0.60439825", "0.6043075", "0.60298043", "0.6015856", "0.6014368", "0.60058147", "0.59923726", "0.59792614", "0.5979187", "0.5969925", "0.5925736" ]
0.7737924
0
Return the name of the Controller.
def name(self) -> str: return "Controller"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self):\n return f\"{NAME} {RES_CONTROLLER} {self._controller.controller_index + 1} {RES_MASTER}\"", "def network_fabric_controller_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_fabric_controller_name\")", "def show_controller(cls, args, config):\n if len(args) == 0:\n raise MOLNSException(\"USAGE: molns controller show name\")\n return {'msg': str(config.get_object(name=args[0], kind='Controller'))}", "def getController(self):\n return self.__controller", "def name(self):\n return f\"{NAME} {RES_CONTROLLER} {self._zone.controller_index + 1} {RES_ZONE} {self._zone.zone_index + 1}\"", "def get_name(cls):\n return cls.__name__", "def getname(self):\n return self.__class__.__name__", "def controller( self ):\n\t\ttry:\n\t\t\treturn self._controller\n\t\texcept Exception as e:\n\t\t\tself.logToConsole( \"controller: %s\" % str(e) )", "def module_name(self) -> Union[str, None]:\n if not self.view_func:\n return None\n elif self._controller_cls:\n return inspect.getmodule(self._controller_cls).__name__\n return inspect.getmodule(self.view_func).__name__", "def name(self):\n return self.__class__.__name__", "def name(self):\n return self.__class__.__name__", "def name(self):\n return self.__class__.__name__", "def name(self):\n return self.__class__.__name__", "def name(self):\n return self.__class__.__name__", "def name(cls) -> str:\n\n return cls.__name__", "def get_name(cls):\n return cls.name or cls.__name__.lower()", "def full_name(self) -> Union[str, None]:\n if not self.view_func:\n return None\n\n prefix = self.view_func.__module__\n if self._controller_cls:\n module_name = self._controller_cls.__module__\n class_name = self._controller_cls.__name__\n prefix = f'{module_name}.{class_name}'\n return f'{prefix}.{self.method_name}'", "def name(self) -> str:\n return self.__class__.__name__", "def name(self) -> str:\n return self.__class__.__name__", "def name(self):\n return self.__name__", "def name(self):\n\n return self.__class__.__name__", "def _get_controller(self):\n return self.__controller", "def getName(self):\n return self.__name__", "def name(cls) -> str:\n return cls.__name__", "def get_name(self):\n return \"{0}: \".format(self.__class__.__name__)", "def getName(self):\r\n return self.__name__", "def name(self) -> str:\n return self.__class__.__name__ # pragma: no cover", "def name(self):\n return self.device.name()", "def get_name(self) -> str:\n return self.__name", "def get_current_controller():\n controllers = parse_yaml_file(JUJU_CONTROLLERS_YAML)\n return controllers.get(\"current-controller\", \"\")" ]
[ "0.74996763", "0.70226", "0.6873626", "0.6845823", "0.678562", "0.6770048", "0.6729016", "0.6712965", "0.6664675", "0.66610116", "0.66610116", "0.66610116", "0.66610116", "0.66610116", "0.6641972", "0.6631533", "0.6608326", "0.6596609", "0.6596609", "0.65897757", "0.6588887", "0.65672505", "0.65653217", "0.6564307", "0.65625566", "0.65243423", "0.65216976", "0.6498384", "0.6486951", "0.64825934" ]
0.870959
0
Set the preset mode; if None, then revert to 'Auto' mode.
def set_preset_mode(self, preset_mode: str | None) -> None: self.svc_set_system_mode(PRESET_TO_TCS.get(preset_mode, SystemMode.AUTO))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_preset_mode(self, preset_mode: str) -> None:\n if self.target_temperature == 0:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id, self._room_id, STATE_NETATMO_HOME,\n )\n\n if (\n preset_mode in [PRESET_BOOST, STATE_NETATMO_MAX]\n and self._module_type == NA_VALVE\n ):\n self._data.homestatus.setroomThermpoint(\n self._data.home_id,\n self._room_id,\n STATE_NETATMO_MANUAL,\n DEFAULT_MAX_TEMP,\n )\n elif preset_mode in [PRESET_BOOST, STATE_NETATMO_MAX]:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id, self._room_id, PRESET_MAP_NETATMO[preset_mode]\n )\n elif preset_mode in [PRESET_SCHEDULE, PRESET_FROST_GUARD, PRESET_AWAY]:\n self._data.homestatus.setThermmode(\n self._data.home_id, PRESET_MAP_NETATMO[preset_mode]\n )\n else:\n _LOGGER.error(\"Preset mode '%s' not available\", preset_mode)\n\n self.update_without_throttle = True\n self.schedule_update_ha_state()", "async def async_set_preset_mode(self, preset_mode: str) -> None:\n if preset_mode == PRESET_MODE_WHOOSH:\n # Sleep mode must be off for Whoosh to work.\n if self._device.sleep_mode:\n self._device.sleep_mode = False\n self._device.fan_whoosh_mode = True\n return\n raise ValueError(f\"Invalid preset mode: {preset_mode}\")", "def set_preset_mode(self, preset_mode: str | None) -> None:\n self.svc_set_zone_mode(\n mode=PRESET_TO_ZONE.get(preset_mode),\n setpoint=self.target_temperature if preset_mode == \"permanent\" else None,\n )", "async def async_set_preset_mode(self, preset_mode: str) -> None:\n if self._on != \"1\":\n if preset_mode == PRESET_NONE:\n return\n await self.async_turn_on()\n\n _LOGGER.debug(\"Setting preset mode of %s to %s\", self._unique_id, preset_mode)\n\n if preset_mode == PRESET_ECO:\n await self._device.command(\"energysave_on\")\n self._previous_state = preset_mode\n elif preset_mode == PRESET_BOOST:\n await self._device.command(\"turbo_on\")\n self._previous_state = preset_mode\n elif preset_mode == PRESET_SLEEP:\n await self._device.command(\"sleep_1\")\n self._previous_state = self._attr_hvac_mode\n elif preset_mode == \"sleep_2\":\n await self._device.command(\"sleep_2\")\n self._previous_state = self._attr_hvac_mode\n elif preset_mode == \"sleep_3\":\n await self._device.command(\"sleep_3\")\n self._previous_state = self._attr_hvac_mode\n elif preset_mode == \"sleep_4\":\n await self._device.command(\"sleep_4\")\n self._previous_state = self._attr_hvac_mode\n elif self._previous_state is not None:\n if self._previous_state == PRESET_ECO:\n await self._device.command(\"energysave_off\")\n elif self._previous_state == PRESET_BOOST:\n await self._device.command(\"turbo_off\")\n elif self._previous_state in HA_STATE_TO_AC:\n await self._device.command(HA_STATE_TO_AC[self._previous_state])\n self._previous_state = None", "async def async_set_preset_mode(self, preset_mode):\n if preset_mode == PRESET_AWAY and not self._is_away:\n self._is_away = True\n self._saved_target_temp = self._target_temp\n self._target_temp = self._away_temp\n elif preset_mode == PRESET_NONE and self._is_away:\n self._is_away = False\n self._target_temp = self._saved_target_temp\n await self.send_ir()\n self.async_write_ha_state()", "def set_preset_mode(self, preset_mode: str) -> None:\n if preset_mode == PRESET_AWAY:\n success = self._client.set_away(self._client.AWAY_AWAY)\n elif preset_mode == HOLD_MODE_TEMPERATURE:\n success = self._client.set_away(self._client.AWAY_HOME)\n success = success and self._client.set_schedule(0)\n elif preset_mode == PRESET_NONE:\n success = self._client.set_away(self._client.AWAY_HOME)\n success = success and self._client.set_schedule(1)\n else:\n _LOGGER.error(\"Unknown hold mode: %s\", preset_mode)\n success = False\n\n if not success:\n _LOGGER.error(\"Failed to change the schedule/hold state\")\n self.schedule_update_ha_state()", "async def async_set_preset_mode(self, preset_mode: str) -> None:\n if self._feature_preset_mode and self.preset_modes:\n if preset_mode not in self.preset_modes and preset_mode is not PRESET_NONE:\n _LOGGER.warning(\"'%s' is not a valid preset mode\", preset_mode)\n return\n mqtt_payload = self._command_templates[CONF_PRESET_MODE_COMMAND_TEMPLATE](\n preset_mode\n )\n await self._publish(\n CONF_PRESET_MODE_COMMAND_TOPIC,\n mqtt_payload,\n )\n\n if self._optimistic_preset_mode:\n self._attr_preset_mode = preset_mode\n self.async_write_ha_state()\n\n return", "def set_preset_mode(self, preset_mode):\n dps_mode = PRESET_MODE_TO_DPS_MODE[preset_mode]\n self._device.set_property(PROPERTY_TO_DPS_ID[ATTR_PRESET_MODE], dps_mode)", "def set_preset_mode(self, preset_mode):\n\n if preset_mode == PRESET_HOME:\n \"\"\"Turn away mode off.\"\"\"\n self._away = False\n self._device.set_temperature_to_auto()\n\n elif preset_mode == PRESET_AWAY:\n \"\"\"Turn away mode on.\"\"\"\n self._away = True\n self._device.set_location_to_frost()\n\n else:\n raise InvalidStateError\n\n pass", "async def async_set_preset_mode(self, preset_mode: str) -> None:\n if preset_mode not in self.preset_modes:\n LOG.warning(\n f\"LUNOS preset '{preset_mode}' is not valid: {self.preset_modes}\"\n )\n return\n\n if preset_mode in self._fan_speeds:\n await self.async_set_speed(preset_mode)\n\n elif preset_mode in self._vent_modes:\n await self.async_set_ventilation_mode(preset_mode)\n\n elif preset_mode == PRESET_SUMMER_VENT:\n await self.async_turn_on_summer_ventilation()\n\n else:\n LOG.warning(\n f\"LUNOS preset '{preset_mode}' not supported: {self.preset_modes}\"\n )", "def preset_mode(self):\n return PRESET_AWAY if self._is_away else PRESET_NONE", "def preset_mode(self):\n dps_mode = self._device.get_property(PROPERTY_TO_DPS_ID[ATTR_PRESET_MODE])\n if dps_mode is not None:\n return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode)\n else:\n return None", "def preset_mode(self) -> Optional[str]:\n return self._preset", "def preset_mode(self):\n return self._preset_mode", "def preset_mode(self) -> Optional[str]:\n if self._device.fan_whoosh_mode:\n return PRESET_MODE_WHOOSH\n return None", "def preset_mode(self):\n if self._away:\n return PRESET_AWAY\n return PRESET_HOME", "def preset_mode(self) -> str | None:\n state = self._state\n return state.custom_preset or _PRESETS.from_esphome(\n state.preset_compat(self._api_version)\n )", "def set_mode(self, mode):\n if mode in self.MODES:\n self.mode = self.MODES[mode]", "def setMode(cls, mode):\n global CURRENT_MODE\n assert isinstance(mode, cls), \"Invalid mode {}\".format(mode)\n CURRENT_MODE = mode", "def preset_mode(self) -> str:\n # NOTE: fan speeds are not really presets...the only presets LUNOS has is vent mode\n return self._vent_mode", "def _select_mode(self):\n self.__check_mode()\n if self.mode[\"auto_mode\"]:\n self.mode_auto()\n elif self.mode[\"auto_mode\"] is None: # Do Nothing\n self.mode_standby()\n else:\n self.mode_manual()", "async def async_set_preset_mode(self, preset_mode: str) -> None:\n if preset_mode not in PRESET_MODE_TO_DECONZ:\n raise ValueError(f\"Unsupported preset mode {preset_mode}\")\n\n await self.gateway.api.sensors.thermostat.set_config(\n id=self._device.resource_id,\n preset=PRESET_MODE_TO_DECONZ[preset_mode],\n )", "def mode_auto(self):\n if self.__check_mode_change():\n self.communications.set_status(\"Bot Auto Mode Set\")\n self.patrol()", "async def async_set_preset_mode(self, preset_mode: str) -> None:\n update_needed = await self._async_set_preset_mode_internal(preset_mode)\n\n if update_needed:\n # This state change affects other entities like sensors. Force an immediate update that\n # can be observed by all parties involved.\n await self.coordinator.async_request_refresh()", "def set_automatic(self, mode):\n self.slam.controlled = not mode\n if mode:\n self.slam.resume()", "def mode(self, mode):\n self.set_mode(mode)", "def set_preset(self, preset_id):\n\n preset_id = self.clamp(preset_id, 0x00, 0x7f)\n command = [0x00, 0x03, 0x00, preset_id]\n self.send_command(command)", "def set_mode(self, mode):\n print('set_mode', mode)\n self._mode = int(mode)", "def preset_mode(self) -> str | None:\n\n if self._device.tcs.system_mode is None:\n return # unable to determine\n # if self._device.tcs.system_mode[CONF_SYSTEM_MODE] in MODE_TCS_TO_HA:\n if self._device.tcs.system_mode[CONF_SYSTEM_MODE] in (\n SystemMode.AWAY,\n SystemMode.HEAT_OFF,\n ):\n return PRESET_TCS_TO_HA[self._device.tcs.system_mode[CONF_SYSTEM_MODE]]\n\n if self._device.mode is None:\n return # unable to determine\n if self._device.mode[CONF_MODE] == ZoneMode.SCHEDULE:\n return PRESET_TCS_TO_HA[self._device.tcs.system_mode[CONF_SYSTEM_MODE]]\n return PRESET_ZONE_TO_HA.get(self._device.mode[CONF_MODE])", "def preset(self):\n self._clear_read_buffer()\n self._write_cmd(\"PP\")" ]
[ "0.7440283", "0.7355509", "0.73498267", "0.7344056", "0.72323555", "0.7204877", "0.7160711", "0.71337557", "0.70753354", "0.70729864", "0.70568067", "0.70467067", "0.70342845", "0.6982908", "0.68103224", "0.6772423", "0.67633486", "0.6668619", "0.66624355", "0.665485", "0.6639274", "0.6627967", "0.6618422", "0.66053295", "0.65533465", "0.64364845", "0.6429783", "0.637301", "0.63545394", "0.6326196" ]
0.79922223
0
Reset the (native) operating mode of the Controller.
def svc_reset_system_mode(self) -> None: self._call_client_api(self._device.reset_mode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svc_reset_zone_mode(self) -> None:\n self._call_client_api(self._device.reset_mode)", "def resetDeviceStates(self):", "def softreset(self):\n try:\n self.device.write(b'\\x03') # abort\n self.device.write(b'\\x04') # reset\n self.device.write(b'\\r')\n self.__read_until(b'raw REPL; CTRL-B to exit\\r\\n>')\n except Exception as e:\n raise ReplError(e)", "def reset(self):\n # The camera will give no response to this command\n self._serial_io('\\x55\\x99\\x66\\x11', None)\n while True:\n try:\n self.system_state = 0x11\n if self.system_state == 0x11:\n break\n except CygnetExc:\n time.sleep(.2)\n while True:\n self.system_state = 0x12\n time.sleep(.2)\n if self.system_state == 0x16:\n break", "def reset():\n if os.name == \"posix\": #In linux\n os.system(\"clear\")\n elif os.name == (\"ce\", \"nt\", \"dos\"): #In windows\n os.system(\"cls\")", "def reset(self):\n self.reset_dev_via_serial(self.forced_reset_timeout)", "def reset(self):\n\n ## Turn off controller to bring to a known state\n try:\n self.logger.info(\"Turning off sta3800 controller (sta3800_off).\")\n ccdsetup.sta3800_off()\n except Exception:\n self.logger.exception(\"Unable to turn off controller! State may be unknown.\")\n raise\n else:\n self.logger.info(\"Controller turned off successfully.\")\n\n ## Initialize controller\n try:\n self.logger.info(\"Turning on sta3800 controller (sta3800_setup).\")\n ccdsetup.sta3800_setup()\n except Exception:\n self.logger.exception(\"Unable to turn on sta3800 controller!\")\n raise\n else:\n self.logger.info(\"Controller turned on successfully.\")", "def resetToMainSection(self):\n wValue = 0\n wIndex = 0\n wLength = 0\n try:\n self.__bootCommand(op.BootloaderCommands.Reset,1,[0,0,0],[])\n except:\n #This will always throw an exception because it disconnects the device and re-enumerates as a normal Power Monitor\n print(\"Resetting to Main Section.\")", "def __mode_reset(self):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tval.reset_restart()", "def soft_reset(self):\n self.ser.write(\"\\030\")\n self._handle_reset()", "def SetOperateMode(self):\n handler = self.get_command_object(\"SetOperateMode\")\n handler()", "def reset_device_bridge(self, client, device_type):\r\n client.resetDeviceBridgeOS(device_type)", "def reset(self, pure=True, **kwargs):\n self.circuit.reset(self._init_modes)", "def reset(self):\n return self.set_command(\"Z\")", "def reset(self):\n self.write_to_serial('*RST')", "def change_mode(self):\n master.destroy()\n os.system(\"add_mode_run.py\")", "async def reset(self):\n\n self.__do_action(self.motor.moveto_edge(MotorDriver.LEFT))", "def soft_reset():", "def clear(self):\n self.cmd(0x33) # $33 8-bit mode\n self.cmd(0x32) # $32 8-bit mode\n self.cmd(0x28) # $28 8-bit mode\n self.cmd(0x0C) # $0C 8-bit mode\n self.cmd(0x06) # $06 8-bit mode\n self.cmd(0x01) # $01 8-bit mode", "def reset(cls):\n\n cls._set_mode_stopped()\n TimeDisplay.reset_time(erase=True)\n TimeDisplay.show_default()\n Notes.clear()\n for callback in cls.reset_callback:\n callback()", "def set_manual_mode(self):\n self._kernel.set_manual_mode()", "async def reset(self):\n await self.set_param(\"ContinuousExposures\", 0)\n await self.set_param(\"Exposures\", 0)\n cmd = await self.send_command(\"RESETTIMING\", timeout=1)\n if not cmd.succeeded():\n self.status = ControllerStatus.ERROR\n raise ArchonError(f\"Failed sending RESETTIMING ({cmd.status.name})\")\n\n # TODO: here we should do some more checks before we say it's IDLE.\n self.status = ControllerStatus.IDLE", "def _doReset(self):\n self._cmdReset()", "def reset(self):\n\n\t\tself._send_message(\"RESET\", \"\\x00\")", "def reset(self):\r\r\n self.read(\"*cls\")\r\r\n self.waitForCompletion()\r\r\n self.read(\"*RST\") # Reset and query\r\r\n self.dev.write(\"*cls\")\r\r\n while self.read(\"*OPC?\") != \"1\": time.sleep(1) # Wait until completion\r\r", "def reset(self):\n self._write(0x16, 1, 3, 0x08)", "def setOff(self, command):\r\n self.setDriver('ST', 0)", "def resetSimulator():\n\tif settings._telnet == True:\n\t\toutput('Resetting simulator...')\n\t\tsettings.obj = []\n\t\tsendData('RESET', read=True, flush=True)\n\t\t\n\t\ttry:\n\t\t\tsettings._tn.close()\n\t\texcept:\n\t\t\tpass\n\t\t\n\t\tsettings._tn = None\n\t\tsettings._telnet = False\n\t\ttime.sleep(5)\n\t\t\n\t\toutput('Reconnecting...')\n\t\tinitTelnet(settings.ip, settings.port, retries=10)\n\telse:\n\t\traise 'No active telnet connection!'", "def reset(self):\n\t\tself.write(\"*rst\")\n\t\tpass", "def _reset(self):\n self._interface.set('fw_wp_en', 'off')" ]
[ "0.65417933", "0.6428434", "0.637627", "0.63660645", "0.6290326", "0.62352115", "0.6198992", "0.6187783", "0.6177051", "0.61114603", "0.60926646", "0.6088796", "0.60403967", "0.59939355", "0.5975111", "0.596282", "0.59573853", "0.594717", "0.59382457", "0.5926726", "0.5913849", "0.59099424", "0.59079784", "0.5886515", "0.58807737", "0.58475876", "0.5844898", "0.5842527", "0.5838206", "0.5835471" ]
0.7390385
0
Return the Zone's current preset mode, e.g., home, away, temp.
def preset_mode(self) -> str | None: if self._device.tcs.system_mode is None: return # unable to determine # if self._device.tcs.system_mode[CONF_SYSTEM_MODE] in MODE_TCS_TO_HA: if self._device.tcs.system_mode[CONF_SYSTEM_MODE] in ( SystemMode.AWAY, SystemMode.HEAT_OFF, ): return PRESET_TCS_TO_HA[self._device.tcs.system_mode[CONF_SYSTEM_MODE]] if self._device.mode is None: return # unable to determine if self._device.mode[CONF_MODE] == ZoneMode.SCHEDULE: return PRESET_TCS_TO_HA[self._device.tcs.system_mode[CONF_SYSTEM_MODE]] return PRESET_ZONE_TO_HA.get(self._device.mode[CONF_MODE])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preset_mode(self) -> str | None:\n state = self._state\n return state.custom_preset or _PRESETS.from_esphome(\n state.preset_compat(self._api_version)\n )", "def preset_mode(self):\n return self._preset_mode", "def preset_mode(self):\n dps_mode = self._device.get_property(PROPERTY_TO_DPS_ID[ATTR_PRESET_MODE])\n if dps_mode is not None:\n return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode)\n else:\n return None", "def preset_mode(self) -> Optional[str]:\n return self._preset", "def preset_mode(self):\n if self._away:\n return PRESET_AWAY\n return PRESET_HOME", "def preset_mode(self) -> str:\n # NOTE: fan speeds are not really presets...the only presets LUNOS has is vent mode\n return self._vent_mode", "def preset_mode(self):\n return PRESET_AWAY if self._is_away else PRESET_NONE", "def preset_modes(self):\n return [PRESET_NONE, PRESET_AWAY] if self._away_temp else PRESET_NONE", "def preset_mode(self) -> str | None:\n\n if self._device.system_mode is None:\n return # unable to determine\n return PRESET_TCS_TO_HA[self._device.system_mode[CONF_SYSTEM_MODE]]", "def preset_modes(self):\n return self._preset_modes", "def preset_modes(self):\n return [PRESET_NONE, PRESET_AWAY, HOLD_MODE_TEMPERATURE]", "def mode(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n return out[get_key(zonekeys.MODE, self._SW_VER)]", "def set_preset_mode(self, preset_mode: str | None) -> None:\n self.svc_set_zone_mode(\n mode=PRESET_TO_ZONE.get(preset_mode),\n setpoint=self.target_temperature if preset_mode == \"permanent\" else None,\n )", "def currentMode(self):\n logger.debug(\"Func: currentMode/getter\")\n\n return self._currentsDict[\"currentMode\"]", "def preset_mode(self) -> Optional[str]:\n if self._device.fan_whoosh_mode:\n return PRESET_MODE_WHOOSH\n return None", "def preset_modes(self):\n return SUPPORT_PRESET", "def mode(self) -> str:\n return pulumi.get(self, \"mode\")", "def mode(self) -> str:\n return pulumi.get(self, \"mode\")", "def preset_modes(self):\n return list(PRESET_MODE_TO_DPS_MODE.keys())", "def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")", "def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")", "def preset_modes(self) -> list:\n return self._preset_modes", "def current_option(self) -> str:\n return self.coordinator.data.settings.lamp_mode.name.lower()", "def preset_modes(self) -> Optional[List[str]]:\n return SUPPORT_PRESET", "def get_mode(self):\r\n return self._api.get_mode()", "def get_mode(self):\r\n return self.mode", "def get_current_preset(self):\n if self._legacy_anna:\n active_rule = self._domain_objects.find(\"rule[active='true']/directives/when/then\")\n if active_rule is None or \"icon\" not in active_rule.keys():\n return \"none\"\n return active_rule.attrib[\"icon\"]\n\n log_type = \"preset_state\"\n locator = (\n \"appliance[type='thermostat']/logs/point_log[type='\"\n + log_type\n + \"']/period/measurement\"\n )\n return self._domain_objects.find(locator).text", "def preset_modes(self) -> list[str]:\n # Use the Vallox profile names for the preset names.\n return list(STR_TO_VALLOX_PROFILE_SETTABLE.keys())", "def current_option(self) -> str | None:\n # If the translation key is \"zone_sleep\", we need to translate\n # the value to make it compatible with Home Assistant\n if (\n value := self.capability.current\n ) is not None and self.translation_key == \"zone_sleep\":\n return ZONE_SLEEP_STATE_MAPPING[value]\n\n return value", "def getmode(self):\n return self.mode" ]
[ "0.74566215", "0.74438035", "0.71478873", "0.70889574", "0.70163906", "0.6992599", "0.69559926", "0.6906426", "0.68646806", "0.6750974", "0.6643373", "0.6543945", "0.6502334", "0.6471802", "0.6455682", "0.6405507", "0.6283329", "0.6283329", "0.62627965", "0.62500966", "0.62500966", "0.6249896", "0.62250096", "0.6207007", "0.6166562", "0.6101231", "0.60647964", "0.6063407", "0.6062076", "0.60400176" ]
0.75515884
0
Fake the measured temperature of the Zone sensor.
def svc_put_zone_temp( self, temperature: float, **kwargs ) -> None: # set_current_temp self._device.sensor._make_fake() self._device.sensor.temperature = temperature self._device._get_temp() self.update_ha_state()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def temperature() -> float:", "def get_temperature(self):\n # Fake a random temperature change\n temperature = random.randint(20, 25)\n self.set_temperature(temperature)", "def get_temperature(self):\n pass", "def test_sensor_temperature_fahrenheit(self):\n with patch.dict(TYPES, {'TemperatureSensor': self.mock_type}):\n state = State('sensor.temperature', '74',\n {ATTR_UNIT_OF_MEASUREMENT: TEMP_FAHRENHEIT})\n get_accessory(None, state, 2, {})", "def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0", "def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def set_temperature(self, temperature: float = None, **kwargs) -> None:\n self.svc_set_zone_mode(setpoint=temperature)", "def target_temperature(self) -> float:\n return self._thermostat.setpoint_temperature", "def target_temperature(self) -> float | None:\n\n zones = [z for z in self._device.zones if z.setpoint is not None]\n temps = [z.setpoint for z in zones if z.heat_demand is not None]\n return max(z.setpoint for z in zones) if temps else None\n\n # temps = [z.setpoint for z in self._device.zones]\n # return round(sum(temps) / len(temps), 1) if temps else None", "def getTemperature(self):\n return self.temperature", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def give_temperature(self, value):\n self.temperature = value", "def temperature_unit(self):\n return TEMP_FAHRENHEIT", "def test_sensor_temperature_celsius(self):\n with patch.dict(TYPES, {'TemperatureSensor': self.mock_type}):\n state = State('sensor.temperature', '23',\n {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS})\n get_accessory(None, state, 2, {})", "def test_sensor_temperature(self):\n with patch.dict(TYPES, {'TemperatureSensor': self.mock_type}):\n state = State('sensor.temperature', '23',\n {ATTR_DEVICE_CLASS: 'temperature'})\n get_accessory(None, state, 2, {})", "def test_temperature_same_unit(self):\n self.assertEqual(\n 5,\n METRIC_SYSTEM.temperature(5,\n METRIC_SYSTEM.temperature_unit))", "def tempWater(sample):\n sample *= .0009\n sample *= 1000\n celsius = (sample - 20.5128) * 0.0512\n return round(celsius,2)", "def temperatures():\n\n return station_9281", "def current_temperature(self) -> float | None:\n temps = [z.temperature for z in self._device.zones if z.temperature is not None]\n temps = [t for t in temps if t is not None] # above is buggy, why?\n try:\n return round(sum(temps) / len(temps), 1) if temps else None\n except TypeError:\n _LOGGER.error(f\"temp ({temps}) contains None\")", "def temperature(self):\n return _cantera.reactor_temperature(self.__reactor_id)", "def writetemperature(self):\r\n\t\tTEMP_CONFIG = (SI7015_REG_CONFIG_CNVRSN_ON | SI7015_REG_CONFIG_TEMP)\r\n\t\tbus.write_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_CONFIG, TEMP_CONFIG)", "def temperature_effect(self, location):\n return self.__temperature_effect_impl(location)", "def test_temperatures_value(self):\n self.assertEqual(self.TminValue, 450.0)", "def target_temperature(self) -> float | None:\n return self._device.setpoint", "def temperature(self):\n names = ['anc_air_temperature']\n return self.sensor.get_with_fallback('temperature', names)", "def test_take_temperature_readings0002(self, platform, monkeypatch):\n\n platform.temp_sensors = [\n \"hw.sensors.cpu0.temp0\",\n \"hw.sensors.acpitz0.temp0\",\n ]\n\n def fake__raw_read_temperature_sensor(self, sensor):\n if sensor == \"hw.sensors.cpu0.temp0\":\n return \"hw.sensors.cpu0.temp0=64.00 degC\"\n elif sensor == \"hw.sensors.acpitz0.temp0\":\n return \"hw.sensors.acpitz0.temp0=65.58 degC (zone temperature)\"\n else:\n assert False\n\n monkeypatch.setattr(krun.platform.OpenBSDPlatform,\n \"_raw_read_temperature_sensor\",\n fake__raw_read_temperature_sensor)\n\n # Results were already in degrees C\n expect = {\n \"hw.sensors.cpu0.temp0\": 64.00,\n \"hw.sensors.acpitz0.temp0\": 65.58,\n }\n got = platform.take_temperature_readings()\n\n assert expect == got", "def temperature(self):\n return self._temperature", "def temperature(self):\n return self._temperature", "def test_temperatures(get_touchmat):\n touchmat = get_touchmat\n\n temperatures = touchmat.temperatures()\n info = touchmat.info()\n check_system_types.check_TemperatureInfoList(temperatures, [info])" ]
[ "0.70572984", "0.6964777", "0.68520457", "0.65681976", "0.6562205", "0.64241844", "0.6369297", "0.6343626", "0.6342289", "0.6325718", "0.63102293", "0.6304554", "0.62683314", "0.6252575", "0.62433594", "0.62384284", "0.6211073", "0.6199095", "0.61890876", "0.6184514", "0.6183885", "0.6168048", "0.616687", "0.61551356", "0.61443347", "0.61394876", "0.61203665", "0.6108162", "0.6108162", "0.6089693" ]
0.7113436
0
Reset the configuration of the Zone.
def svc_reset_zone_config(self) -> None: self._call_client_api(self._device.reset_config)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svc_reset_zone_mode(self) -> None:\n self._call_client_api(self._device.reset_mode)", "def reset_config():\r\n # TODO implement configuration reset\r\n pass", "def reset( self ):\n self.conf = self.defaults", "def tearDown(self):\n updateConfigurationCmd = updateConfiguration.updateConfigurationCmd()\n updateConfigurationCmd.name = \"use.external.dns\"\n updateConfigurationCmd.value = \"false\"\n updateConfigurationCmd.scopename = \"zone\"\n updateConfigurationCmd.scopeid = 1\n self.apiClient.updateConfiguration(updateConfigurationCmd)", "def reset_config():\n return _set_config(_gen_config())", "def reset_zone_overlay(self, zone_id):\n self.tado.resetZoneOverlay(zone_id)\n self.update_sensor(\"zone\", zone_id)", "def reset(self) -> None:\n\n self.host.reset()", "def reset(self, config, **kwargs):\n pass", "def clearDropzones( self ):\n self._dropzones = []", "def reset_config():\n\n Config.config().update({\"coerce\": True, \"debug\": True, \"active\": True})", "def reset(self):\n self._config = Config()\n self._router = Router(())\n self._middleware = []\n self._start_response = None", "def reset(self):\n self.at_cmd('Z')", "def svc_set_zone_config(self, **kwargs) -> None:\n self._call_client_api(self._device.set_config, **kwargs)", "def deconfigure(self):\n\n pass", "def antenny_config_reset(self):\n return self.antenny_config.reset_default_config()", "def deconfigure(self):\n\n self.platform.deconfigure()", "def resetConfiguration(self):\n exec(config.loadConfiguration(\"console.cfg\").read())", "def _reset(cls):\r\n cls._CONFIGURED = False\r\n cls._ENABLED = {}", "async def reset_config(self):\n self.config = {\"enable_auto_gen\": False, \"enable_world_barrier\": False}\n await shared.event_handler.call_async(\"world:reset_config\")\n self.gamerule_handler = mcpython.common.world.GameRule.GameRuleHandler(self)", "def reset(self):\r\n err = self._cfuncs['ka_reset'](self._core._get_ka())\r\n self._core._handle_error(err)", "def reset(self):\n self.velocity_controller.reset()\n self.yaw_filter.reset()", "def reset(self):\n self.params.resetParams()", "def reset(self):\n self.logger.debug(\"Resetting...\")\n pass", "def reset():\n bwc = BandwidthConfigurator()\n bwc.reset()", "async def admin_reset(self, ctx: commands.Context):\n await self.config.clear_all()\n await self.initialize_internals()\n await ctx.send('Global team management factory reset complete.')", "def reset(self):\n \n pass", "def restore_config(self):\n self._clear_previous_windows_assigment()\n self._restart_i3_config()", "def reset(self):\n self._set_init()", "def reset_stations(self):\n # XXX currently everyone shares the default MAC\n mac = g.DEFAULT_MAC\n stations = model.Session.query(model.Station)\n station = stations.filter(model.Station.mac == mac).one()\n station.clone(model.Station())\n model.Session.update(station)\n model.Session.commit()\n\n # reset all config archives\n for type in ['station', 'user']:\n path = h.get_config_dir_for(type)\n src = os.path.join(path, '..', g.FACTORY_CONFIG)\n for f in os.listdir(path):\n if f.endswith('.tar.gz'):\n dst = os.path.join(path, f)\n log.debug('%s -> %s' % (src, dst))\n shutil.copyfile(src, dst)\n\n redirect_to('/admin/dashboard')", "def reset(self):\n requests.put('{}/reset'.format(self._get_url()))" ]
[ "0.7075728", "0.67781687", "0.6710553", "0.6671109", "0.6439973", "0.6333401", "0.6261008", "0.62493366", "0.62182164", "0.62151074", "0.6211837", "0.61439663", "0.6118708", "0.61135024", "0.610688", "0.60491854", "0.6011505", "0.6008325", "0.5952107", "0.5927481", "0.58829653", "0.58324224", "0.5826606", "0.58218694", "0.580276", "0.5788006", "0.57863957", "0.5756325", "0.57399577", "0.5738121" ]
0.84243864
0
Reset the (native) operating mode of the Zone.
def svc_reset_zone_mode(self) -> None: self._call_client_api(self._device.reset_mode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svc_reset_system_mode(self) -> None:\n self._call_client_api(self._device.reset_mode)", "def svc_reset_zone_config(self) -> None:\n self._call_client_api(self._device.reset_config)", "def __mode_reset(self):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tval.reset_restart()", "def reset(self):\n self.at_cmd('Z')", "def reset(self):\n return self.set_command(\"Z\")", "def resetDeviceStates(self):", "def _reset(self):\n self._interface.set('fw_wp_en', 'off')", "def SetOperateMode(self):\n handler = self.get_command_object(\"SetOperateMode\")\n handler()", "def reset():\n if os.name == \"posix\": #In linux\n os.system(\"clear\")\n elif os.name == (\"ce\", \"nt\", \"dos\"): #In windows\n os.system(\"cls\")", "def svc_set_zone_mode(\n self, mode=None, setpoint=None, duration=None, until=None\n ) -> None:\n if until is None and duration is not None:\n until = dt.now() + duration\n self._call_client_api(\n self._device.set_mode, mode=mode, setpoint=setpoint, until=until\n )", "def turn_off(self) -> None:\n self._monoprice.set_power(self._zone_id, False)", "def change_mode(self):\n master.destroy()\n os.system(\"add_mode_run.py\")", "def reset(cls):\n\n cls._set_mode_stopped()\n TimeDisplay.reset_time(erase=True)\n TimeDisplay.show_default()\n Notes.clear()\n for callback in cls.reset_callback:\n callback()", "def set_zone_off(self, zone_id, overlay_mode, device_type=\"HEATING\"):\n try:\n self.tado.setZoneOverlay(\n zone_id, overlay_mode, None, None, device_type, \"OFF\"\n )\n except urllib.error.HTTPError as exc:\n _LOGGER.error(\"Could not set zone overlay: %s\", exc.read())\n\n self.update_sensor(\"zone\", zone_id)", "def setOff(self, command):\r\n self.setDriver('ST', 0)", "def reset(self):\n # The camera will give no response to this command\n self._serial_io('\\x55\\x99\\x66\\x11', None)\n while True:\n try:\n self.system_state = 0x11\n if self.system_state == 0x11:\n break\n except CygnetExc:\n time.sleep(.2)\n while True:\n self.system_state = 0x12\n time.sleep(.2)\n if self.system_state == 0x16:\n break", "def set_drive_mode(mode):", "def reset(self):\n self._write(0x16, 1, 3, 0x08)", "async def async_turn_off(self, **kwargs):\n self._wrap_device.device.set_duct_zone(self._zone, False)", "def _mask_mode(self):\r\n self._mode_select(0)", "def full_reset(self):\n self.at_cmd('CFUN=1')", "def soft_reset():", "def reset_use_case(self, save: bool=None):\n self.pm.reset_use_case()\n self.pm_persist(save)", "def reset(self):\n self.reset_dev_via_serial(self.forced_reset_timeout)", "def clear(self):\n self.cmd(0x33) # $33 8-bit mode\n self.cmd(0x32) # $32 8-bit mode\n self.cmd(0x28) # $28 8-bit mode\n self.cmd(0x0C) # $0C 8-bit mode\n self.cmd(0x06) # $06 8-bit mode\n self.cmd(0x01) # $01 8-bit mode", "def reset(self):\n self._position = TwoDV(0.0, 0.0)\n self._orient = TNavigator.START_ORIENTATION[self._mode]", "def modes_off(self):\n bm = self.fitsimage.get_bindmap()\n bm.reset_mode(self.fitsimage)", "def set_offline(self, tzone):\n\t\t\n\t\tif tzone.startswith(\"Other/\"):\n\t\t\ttzone.replace(\"Other/\",\"\")\n\t\t\n\t\twith open(\"/etc/timezone\", \"w\") as f:\n\t\t\tf.write(tzone + \"\\n\")\n\t\t\t\t\n\t\tif os.path.exists(\"/etc/localtime\"):\n\t\t\tos.remove(\"/etc/localtime\")\n\t\tshutil.copy2(\"/usr/share/zoneinfo/%s\" % (tzone),\"/etc/localtime\")", "def svc_set_system_mode(self, mode, period=None, days=None) -> None:\n if period is not None:\n until = dt.now() + period\n elif days is not None:\n until = dt.now() + days # TODO: round down\n else:\n until = None\n self._call_client_api(self._device.set_mode, system_mode=mode, until=until)", "def reset():\n _runtime.reset()" ]
[ "0.7032803", "0.6246101", "0.61618865", "0.61210054", "0.5992715", "0.59177274", "0.58946455", "0.58787835", "0.587655", "0.5838272", "0.5831863", "0.58193153", "0.5774155", "0.56983584", "0.5684187", "0.5671345", "0.5668101", "0.56679434", "0.5648671", "0.5634961", "0.563169", "0.56037337", "0.55789477", "0.557478", "0.5572002", "0.555355", "0.55416375", "0.5538004", "0.5531209", "0.5530324" ]
0.7732747
0
Set the configuration of the Zone (min/max temp, etc.).
def svc_set_zone_config(self, **kwargs) -> None: self._call_client_api(self._device.set_config, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zone(self, zone: str):\n\n self._zone = zone", "def set_temperature(self, temperature: float = None, **kwargs) -> None:\n self.svc_set_zone_mode(setpoint=temperature)", "def svc_reset_zone_config(self) -> None:\n self._call_client_api(self._device.reset_config)", "def zones(self, zones):\n\n self._zones = zones", "def zones(self, zones):\n\n self._zones = zones", "def pzone_set_one_zone_one_param(self, p_zone, param, value):\n\tcommand = '<' + str(p_zone) + str(param) + str(value) + '\\r\\n'\n try:\n self._ser.write(command)\n self.log.debug(u\"= = = > Command {0} sent to the amp\".format(command.rstrip()))\n except:\n error = \"Error while polling device : {}\".format(self.device)\n raise Mprsg6zException(error)\n\n\t# update _pzones with result\n\tself._pzones[p_zone][param] = value", "def svc_set_zone_mode(\n self, mode=None, setpoint=None, duration=None, until=None\n ) -> None:\n if until is None and duration is not None:\n until = dt.now() + duration\n self._call_client_api(\n self._device.set_mode, mode=mode, setpoint=setpoint, until=until\n )", "def config_connect(self):\n self._config.connect('geography.maximum_meeting_zone',\n self.cb_update_meeting_radius)", "def __init__(self, easting=0.0, northing=0.0, altitude=0.0,\n zone_number=None, zone_letter=None):\n self.easting = easting\n self.northing = northing\n self.altitude = altitude\n self.zone_number = zone_number\n self.zone_letter = zone_letter", "def set_min_max(self, xmin, xmax, ymin, ymax, zmin, zmax):\n\n self.xmin = xmin\n self.xmax = xmax\n self.ymin = ymin\n self.ymax = ymax\n self.zmin = zmin\n self.zmax = zmax", "def set(self, tzone):\n\t\t\n\t\tif self.no_dbus: return\n\t\t\n\t\tself.TimeZone.SetTimezone(\n\t\t\t'(sb)',\n\t\t\ttzone,\n\t\t\tTrue # User interaction\n\t\t)", "def zone_topo(self, zone_topo):\n\n self._zone_topo = zone_topo", "def set_preset_mode(self, preset_mode: str | None) -> None:\n self.svc_set_zone_mode(\n mode=PRESET_TO_ZONE.get(preset_mode),\n setpoint=self.target_temperature if preset_mode == \"permanent\" else None,\n )", "def zone(self, zone):\n if self._bundle:\n self._bundle.check_zone(zone)\n self._zone = zone", "def set_zonepath(self, path):\n self.set_attr(ZONE_ENTRY['ZROOT'], path)", "def set_temperatures_and_fit(self, curr_zone_temperatures, interval, now):\n pass", "def __init__(self, zoneType, costMod):\r\n self.zoneType = zoneType\r\n self.costMod = costMod", "def svc_put_zone_temp(\n self, temperature: float, **kwargs\n ) -> None: # set_current_temp\n self._device.sensor._make_fake()\n self._device.sensor.temperature = temperature\n self._device._get_temp()\n self.update_ha_state()", "def set_timezone(tz=None, deploy=False):\n\n if not tz:\n raise CommandExecutionError(\"Timezone name option must not be none.\")\n\n ret = {}\n\n query = {\n \"type\": \"config\",\n \"action\": \"set\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/timezone\",\n \"element\": \"<timezone>{}</timezone>\".format(tz),\n }\n\n ret.update(__proxy__[\"panos.call\"](query))\n\n if deploy is True:\n ret.update(commit())\n\n return ret", "def setDefaults(self) -> None:\n self.night_boundary = -12.0\n self.new_moon_phase_threshold = 20.0", "def set_zone(self, zone_type):\n if not self.can_place(ConstructType.FAMILY_HOUSE):\n return False\n\n self.zone_type = zone_type\n if zone_type == 'residential':\n self.construct = Construct(ConstructType.FAMILY_HOUSE)\n elif zone_type == 'commercial':\n self.construct = Construct(ConstructType.SHOP)\n elif zone_type == 'industrial':\n self.construct = Construct(ConstructType.FACTORY)\n return True", "def assign_zone_veh(self, t, warmup_phase, penalty, operator):\n for z in self.zones:\n z.assign(self.zones, t, warmup_phase, penalty, operator)", "def setZoneRecords(self, records):\n self._dump_data['zone'] = records", "def initConfig(self):\n\n # set observer position to last one first, to greenwich if not known\n lat = self.config.get('topoLat', 51.47)\n lon = self.config.get('topoLon', 0)\n elev = self.config.get('topoElev', 46)\n topo = skyfield.api.Topos(longitude_degrees=lon,\n latitude_degrees=lat,\n elevation_m=elev)\n\n config = self.config.get('mainW', {})\n if config.get('loglevelDeepDebug', True):\n level = 'DEBUG'\n elif config.get('loglevelDebug', True):\n level = 'INFO'\n else:\n level = 'WARN'\n setCustomLoggingLevel(level)\n\n return topo", "def setUTC(self, flag):\n try:\n\n adjtimeFile = \"/etc/adjtime\"\n if self.__mountDir:\n adjtimeFile = self.__mountDir + adjtimeFile\n\n fd = open(adjtimeFile)\n content = fd.read()\n fd.close()\n\n newContent = content\n\n if flag and not \"UTC\" in content:\n if \"LOCAL\" in content:\n newContent = re.sub(\"LOCAL\", \"UTC\", content)\n else:\n newContent += \"UTC\\n\"\n elif not \"LOCAL\" in content:\n if \"UTC\" in content:\n newContent = re.sub(\"UTC\", \"LOCAL\", content)\n else:\n newContent += \"LOCAL\\n\"\n\n fd = open(adjtimeFile, \"w\")\n fd.write(newContent)\n fd.close()\n except Exception as e:\n self.__logger.critical(\"Failed to write UTC configuration\")\n raise ZKVMError(\"POSTINSTALL\", \"TIMEZONE\", \"UTC_CONF\")", "def __init__(self, **kwargs):\n\t\tself.__c_version = c_singlezone(**kwargs)", "def switch_availability_zone():\n global current_az\n if current_az == 0:\n current_az = 1\n else:\n current_az = 0", "def configure_wifi(self, ssid, password, uid=0, timezone=None):\n extra_params = {}\n if timezone is not None:\n now = datetime.datetime.now(pytz.timezone(timezone))\n offset_as_float = now.utcoffset().total_seconds() / 60 / 60\n extra_params[\"tz\"] = timezone\n extra_params[\"gmt_offset\"] = offset_as_float\n\n return super().configure_wifi(ssid, password, uid, extra_params)", "def __init__(self, alt=0, temp_offset=0):\n\t\tWorkingAtmosphere.__init__(self, alt)\n\t\t#self.temperature_offset = tOffset\n\t\tself.Temperature_offset = temp_offset\n\t\tself.make_environment()", "async def svc_set_zone_schedule(self, schedule: str, **kwargs) -> None:\n await self._device.set_schedule(json.loads(schedule))" ]
[ "0.61640877", "0.590769", "0.59051955", "0.58893234", "0.58893234", "0.58371204", "0.5772537", "0.5691023", "0.56861216", "0.56284785", "0.56142485", "0.5595346", "0.5559072", "0.5520208", "0.54665196", "0.5450415", "0.54297817", "0.54261005", "0.5416261", "0.5361676", "0.53615856", "0.5358542", "0.5321582", "0.5318884", "0.52999043", "0.5296864", "0.52681124", "0.5266614", "0.5259973", "0.52484226" ]
0.7082057
0
Return Position of p's subtree having key k, or last node searched
def _subtree_search(self, p, k): if k == p.key(): # found match return p elif k < p.key(): # search left subtree if self.left(p) is not None: return self._subtree_search(self.left(p), k) else: # search right subtree if self.right(p) is not None: return self._subtree_search(self.right(p), k) return p # unsucessful search
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _subtree_search(self, p, k):\n if k == p.key():\n return p\n elif k < p.key():\n if self.left(p) is not None:\n return self._subtree_search(self.left(p), k)\n else:\n if self.right(p) is not None:\n return self._subtree_search(self.right(p), k)\n #unsuccesful search and return the last position searched\n return p", "def find(self, k):\n if k == self.key:\n return self\n elif k < self.key:\n if self.left is None:\n return None\n else:\n return self.left.find(k)\n else:\n if self.right is None: \n return None\n else:\n return self.right.find(k)", "def find_pos(self, _node, _key):\n for i, key in enumerate(_node.keys):\n if _key < key:\n return i\n \n return len(_node.pt)-1", "def search(self, key):\r\n (node, index) = self.root, self.root.search(key)\r\n while not node.contains_key_at(key, index) and not node.is_leaf():\r\n node = node.children[index]\r\n index = node.search(key)\r\n\r\n return (node, index) if node.contains_key_at(key, index) else None", "def find_ge(self, k):\n if self.is_empty():\n return None\n else:\n p = self.find_position(k)\n if p.key() < k:\n p = self.after(p)\n if p is not None:\n return (p.key(), p.value())\n else: \n return None", "def _query(self, p, k):\n if isinstance(p, int):\n if k >= len(self._table[p]):\n return None\n return self._table[p][k]\n\n # if k > self._tree.depth(p):\n if k >= len(self._table[p.index()]):\n return None\n return self._table[p.index()][k]", "def search(self, key):\n x = self.root\n\n while x is not self.nil:\n if key == x.key:\n break\n\n if key < x.key:\n x = x.left\n else:\n x = x.right\n return x", "def find(self, k):\n return self.root and self.root.find(k)", "def find(self, k):\n return self.root and self.root.find(k)", "def _query(self, p, k):\n if k > self._tree.depth(p):\n return None\n\n if k == 0:\n return p\n\n # Find the jump-node descendant of the node and recompute the query level.\n jump = self._jump[p.index()]\n k = k + self._tree.depth(jump) - self._tree.depth(p)\n\n l = self._log[k] # k = 2^l + d\n d = k - self._pow[l]\n\n u = self._table[jump.index()][l]\n w = self._ladders[self._path[u.index()]][self._ind[u.index()] - d]\n\n return w", "def get_position(self, key):\n return bisect.bisect_left(self.keys, key)", "def search(self, key: int, possible_parent=False) -> TreeNode:\n node = prev_node = self.root\n while node:\n if key > node.val:\n prev_node = node\n node = node.right\n elif key == node.val:\n return node\n else:\n prev_node = node\n node = node.left\n if possible_parent:\n return prev_node\n return None", "def _get_node_pos(self, key):\n if not self._hashring:\n return\n\n k = md5_bytes(key)\n key = (k[3] << 24) | (k[2] << 16) | (k[1] << 8) | k[0]\n\n nodes = self._sorted_keys\n pos = bisect(nodes, key)\n\n if pos == len(nodes):\n return 0\n return pos", "def search(T,k):\r\n for t in T.data:\r\n if k == t.word:\r\n return t\r\n if T.isLeaf:\r\n return None\r\n return search(T.child[findChildB(T,k)],k)", "def search(self, key):\r\n left = 0 \r\n right = self.num_keys()\r\n while right > left:\r\n mid = (left + right)//2\r\n if self.keys[mid] >= key:\r\n right = mid\r\n else:\r\n left = mid + 1\r\n return left", "def find(self, key):\n if self.key == key:\n return self.item\n elif key > self.key:\n if self.right:\n return self.right.find(key)\n else:\n if self.left:\n return self.left.find(key)\n # Replace by correct code", "def _get(self, k, currNode):\n if not currNode:\n return\n if k < currNode.key:\n return self._get(k, currNode.leftChild)\n elif k > currNode.key:\n return self._get(k, currNode.rightChild)\n elif k == currNode.key:\n return currNode", "def find(self, p):\n if self.parent[p] != p: \n self.parent[p] = self.find(self.parent[p])\n return self.parent[p]", "def locate_predecessor(self, key):\r\n index = self.search(key)\r\n return index-1", "def get_position(k):\r\n l = get_level(k)\r\n return (l, k - 2**l)", "def find(self, key) -> Union[\"Node\", None]:\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left # traverse left\n\n elif key > current.key:\n current = current.right # traverse right\n\n if current is None: # failure\n break\n return current", "def _findPosition(self, key):\n for i in range(len(self._entryList)):\n if self._entryList[i].key == key:\n return i\n return None", "def findChildA(T,k): \r\n for i in range(len(T.data)):\r\n if k.word < T.data[i].word:\r\n return i\r\n return len(T.data)", "def search(self, key):\n if self.key == key:\n if self.val is not None:\n return self.val\n else:\n return self.key\n\n \"\"\"If the key of the node is smaller than the root node's key, traverse the left subtree\"\"\"\n if self.key < key:\n self.left.search(key)\n\n \"\"\"If the key of the node is greater than the root node's key, traverse the right subtree \"\"\"\n if self.key > key:\n self.right.search(key)\n\n \"\"\"If tree is empty, return None\"\"\"\n return None", "def __getitem__(self, k):\n if self.is_empty():\n raise KeyError('key Error:' + repr(k))\n else:\n p = self._subtree_search(self.root(), k)\n self._rebalance_access(p)\n #this might be an unsuccessful search, so deal with this...\n if k!=p.key():\n raise KeyError('key error:'+repr(k))\n return p.value()", "def search(root, key):\n if root is None:\n return None\n else:\n if root.key == key:\n return root.value\n elif root.right is None and root.left is None:\n return None\n elif key >= root.key:\n return search(root.right, key)\n # No need to return root.right.value, since this should be\n # returned by root.key as root is replaced by root.right\n elif key < root.key:\n return search(root.left, key)\n # No need to return root.right.value, since this should be\n # returned by root.key as root is replaced by root.right", "def _kth_to_last_recursive(self, head, k):\n if head is None:\n return None, 0 \n node, index = self._kth_to_last_recursive(head.next_node, k) \n index += 1 \n if index == k:\n return head, index\n return node, index", "def find_leaf(self, _key):\n cur_node = self.root\n while type(cur_node) is not leaf:\n\n flag = True\n for i, key in enumerate(cur_node.keys):\n if key > _key:\n cur_node = cur_node.pt[i]\n flag = False\n break\n \n # the value passed in is greater than all the keys in this node\n if flag:\n cur_node = cur_node.pt[-1]\n \n return cur_node", "def findChildB(T,k):\r\n for i in range(len(T.data)):\r\n if k < T.data[i].word:\r\n return i\r\n return len(T.data)", "def successor(self, key: int) -> TreeNode:\n tree_node = self.search(key, possible_parent=True)\n if tree_node:\n if tree_node.right and tree_node.val <= key:\n right_subtree = tree_node.right\n while right_subtree.left:\n right_subtree = right_subtree.left\n return right_subtree\n else:\n while tree_node:\n if tree_node.val > key:\n return tree_node\n tree_node = tree_node.parent\n return" ]
[ "0.85987526", "0.7235625", "0.71664894", "0.70371085", "0.70218265", "0.6801036", "0.67528373", "0.6744712", "0.6744712", "0.6713811", "0.6692726", "0.66674405", "0.66482687", "0.6632227", "0.6625981", "0.6618637", "0.65873015", "0.658536", "0.6555594", "0.65537417", "0.65483856", "0.65013784", "0.648744", "0.64867604", "0.6458155", "0.6448642", "0.64449626", "0.6434773", "0.63977855", "0.6394605" ]
0.80569565
1
Return Position of first item in subtree rooted at p
def _subtree_first_position(self, p): walk = p while self.left(walk) is not None: walk = self.left(walk) # keep walking left return walk
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _subtree_first_position(self, p):\n \"\"\"will be used by before()\"\"\"\n walk = p\n #recursivly walking to the left child until the left subtree has no child\n while self.left(walk) is not None:\n walk = self.left(walk)\n return walk", "def before(self, p):\n self._validate(p)\n # if there is a left subtree, then the first positiion of in subtree \n # rooted at the left(p) will be the immediate position before p\n if self.left(p) is not None:\n return self._subtree_first_position(self.left(p))\n # if there is no left substree, \n # the immediate smaller position will be the parent of the \"left turn\" position\n # when going upward. \n else: \n walk = p # if p is the root of the tree None will be returned\n above = self.parent(walk)\n # not None is the boundary for root node\n # walk == self.left(above) is to look for \"left turn\":\n # if walk != self.left(above), that means there is left turn\n while above is not None and walk==self.left(above):\n walk = above\n above = self.parent(walk)\n return above", "def before(self, p):\n self._validate(p) # inherited from linked bin tree\n if self.left(p):\n return self._subtree_last_position(self.left(p))\n else:\n walk = p # walk up tree\n above = self.parent(walk)\n while above is not None and walk == self.left(above):\n walk = above\n above = self.parent(walk)\n return above", "def find(self, p):\n if self.parent[p] != p: \n self.parent[p] = self.find(self.parent[p])\n return self.parent[p]", "def find(p):\n if p != parent[p]:\n parent[p] = find(parent[p])\n return parent[p]", "def find(self, p):\n\n # Find the root of the component/set\n root = p\n while root != self.id[root]:\n root = self.id[root]\n\n # Compress the path leading back to the root\n # This operation is called \"path compression\" and is\n # waht gives the amortized constant time complexity.\n while p != root:\n next = self.id[p]\n self.id[p] = root\n p = next\n\n return root", "def parent(self, p):\n node = self._validate_position(p)\n return self._make_position(node)", "def find(self, p):\n parent = self._parent\n while p != parent[p]:\n p = parent[p] # !!cannot apply path compression to this problem\n return p", "def parent(self, p):\n node = self._validate(p)\n return self._make_position(node._parent)", "def _subtree_search(self, p, k):\n if k == p.key():\n return p\n elif k < p.key():\n if self.left(p) is not None:\n return self._subtree_search(self.left(p), k)\n else:\n if self.right(p) is not None:\n return self._subtree_search(self.right(p), k)\n #unsuccesful search and return the last position searched\n return p", "def left(self, p):\n node = self._validate(p)\n return self._make_position(node._left)", "def parent(self,p):\n node = self._validate(p)\n return self._make_position(node._parent)", "def parent(self, p):\n node = self._validate(p)\n return self._make_position(node.parent)", "def parent(self, p):\n node = self._validate(p)\n return self._make_position(node._parent)", "def parent(self, p):\n node = self._validate(p)\n return self._make_position(node._parent)", "def parent(self, p):\n node = self._validate(p)\n return self._make_position(node._parent)", "def _subtree_last_position(self, p):\n walk = p\n while self.right(walk) is not None:\n walk = self.right(walk)\n return walk", "def position(element1, root=None):\n \n position = [] \n current = element1\n while (current.getparent() is not None) and (current is not root):\n parent = current.getparent()\n #find the index of current under parent\n index = 0\n for i in parent:\n if i is current: break\n index += 1\n position.insert(0, index + 1)\n current = parent\n \n position.insert(0, 1) # for the root element\n return position", "def first(self):\n return self._subtree_first_position(self.root()) if len(self) > 0 else None", "def find(self, p):\n self._validate(p)\n while p != self._parent[p]:\n self._parent[p] = self._parent[self._parent[p]] # path compression by halving\n p = self._parent[p]\n return p", "def left(self, p):\n node = self._validate(p)\n return self._make_position(node.left)", "def left(self, p):\n node = self._validate(p)\n return self._make_position(node._left)", "def left(self, p):\n node = self._validate(p)\n return self._make_position(node._left)", "def depth(self, p):\n if self.is_root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))", "def find(self, p):\n self._validate(p)\n return self.parents[p]", "def left(self, p):\n node = self._validate_position(p)\n return self._make_position(node.left)", "def depth(self, p):\n if self.is root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))", "def treepos(self, tree):\n if tree is None:\n raise ValueError(\"Parse tree not available\")\n stack = [tree]\n treepos = []\n\n wordnum = 0\n while True:\n # tree node:\n if isinstance(stack[-1], Tree):\n # Select the next child.\n if len(treepos) < len(stack):\n treepos.append(0)\n else:\n treepos[-1] += 1\n # Update the stack.\n if treepos[-1] < len(stack[-1]):\n stack.append(stack[-1][treepos[-1]])\n else:\n # End of node's child list: pop up a level.\n stack.pop()\n treepos.pop()\n # word node:\n else:\n if wordnum == self.wordnum:\n return tuple(treepos[: len(treepos) - self.height - 1])\n else:\n wordnum += 1\n stack.pop()", "def get_curpos(self):\n for i in range(len(self.tree)):\n if self.path == self.tree[i][2]:\n return i\n else:\n return -1", "def _subtree_last_position(self, p):\n walk = p\n while self.right(walk) is not None:\n walk = self.right(walk) # keep walking right\n return walk" ]
[ "0.8037946", "0.7145369", "0.69503504", "0.68678206", "0.67657113", "0.6577142", "0.6530356", "0.6518154", "0.65114397", "0.6478126", "0.64278555", "0.64125717", "0.63804257", "0.6373036", "0.6373036", "0.6373036", "0.6370875", "0.6368855", "0.636215", "0.6319114", "0.63092345", "0.63051796", "0.63051796", "0.6281129", "0.6277585", "0.62649286", "0.62640166", "0.62549514", "0.62362576", "0.6227674" ]
0.7995807
1
Return Position of last item in subtree rooted at p
def _subtree_last_position(self, p): walk = p while self.right(walk) is not None: walk = self.right(walk) # keep walking right return walk
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _subtree_last_position(self, p):\n walk = p\n while self.right(walk) is not None:\n walk = self.right(walk)\n return walk", "def after(self, p):\n self._validate(p)\n if self.right(p):\n return self._subtree_first_position(self.right(p))", "def last(self):\n return self._subtree_last_position(self.root()) if len(self) > 0 else None", "def after(self, p):\n self._validate(p)\n # if there exists p's right child, successor is left most position\n # in p's right subtree\n if self.right(p) is not None:\n walk = self.right(p)\n while self.left(walk)is not None:\n walk = left(walk)\n return walk\n # successor is the parent of the \"right turn\" position \n # when going upward\n else:\n walk = p\n above = self.parent(walk)\n while above is not None and walk==self.right(above):\n walk = above\n above = self.parent(walk)\n return above", "def _subtree_first_position(self, p):\n walk = p\n while self.left(walk) is not None:\n walk = self.left(walk) # keep walking left\n return walk", "def last_position(self):\n return self.visited_positions[-1]", "def _subtree_first_position(self, p):\n \"\"\"will be used by before()\"\"\"\n walk = p\n #recursivly walking to the left child until the left subtree has no child\n while self.left(walk) is not None:\n walk = self.left(walk)\n return walk", "def find(self, p):\n if self.parent[p] != p: \n self.parent[p] = self.find(self.parent[p])\n return self.parent[p]", "def right(self, p):\n node = self._validate(p)\n return self._make_position(node._right)", "def before(self, p):\n self._validate(p) # inherited from linked bin tree\n if self.left(p):\n return self._subtree_last_position(self.left(p))\n else:\n walk = p # walk up tree\n above = self.parent(walk)\n while above is not None and walk == self.left(above):\n walk = above\n above = self.parent(walk)\n return above", "def _height1(self, p):\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def GetLastChild(self, item):\r\n\r\n children = item.GetChildren()\r\n return (len(children) == 0 and [None] or [children[-1]])[0]", "def _subtree_search(self, p, k):\n if k == p.key():\n return p\n elif k < p.key():\n if self.left(p) is not None:\n return self._subtree_search(self.left(p), k)\n else:\n if self.right(p) is not None:\n return self._subtree_search(self.right(p), k)\n #unsuccesful search and return the last position searched\n return p", "def right(self, p):\n node = self._validate(p)\n return self._make_position(node._right)", "def right(self, p):\n node = self._validate_position(p)\n return self._make_position(node.right)", "def right(self, p):\n node = self._validate(p)\n return self._make_position(node.right)", "def find(p):\n if p != parent[p]:\n parent[p] = find(parent[p])\n return parent[p]", "def p (self):\n\n return self.end - 1", "def _last_node(self):\n if self.trail[-1][1] is None or self.trail[-1][1].group():\n return self.trail[-1][0]\n else:\n return self.trail[-2][0]", "def right(self, p):\n\n node = self._validate(p)\n return self._make_position(node._right)", "def depth(self, p):\n if self.is root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))", "def after(self,p):\r\n \r\n current = self.tail #test from the tail node\r\n \r\n if p == current: #if the tail node = p\r\n return 'null' #there cannot be a node after it\r\n \r\n while current !=p: #else keep cheking the elements until it reaches p\r\n current = current.prev\r\n return current.next.data #now current = p, so return the node after it\r", "def lastChild(self, parent):\r\n if len(parent):\r\n return parent[-1]\r\n else:\r\n return None", "def _height2(self, p): # time is linear in size of subtree\n if self.is_leaf(p):\n return 0\n else:\n return 1 + max(self._height2(c) for c in self.children(p))", "def depth(self, p):\n if self.is_root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))", "def after(self, p):\n node = self._validate(p)\n return self._make_position(node._next)", "def _height2(self, p): # time is linear in size of subtree\n if self.is_leaf(p):\n return 0\n else:\n return 1 + max(self._height2(c) for c in self.children(p))", "def particleLastHalo(particle, halo):\n for i in range(0, len(halo.fullParents)):\n if halo.fullParents[i].containsParticle(particle):\n return halo.fullParents[i]\n return -1", "def position_last(self):\n return self._position_last", "def last_pos(self):\n return self.locs[self.indices[-1], 2:4]" ]
[ "0.84641135", "0.74079204", "0.6871047", "0.67420864", "0.6579081", "0.6513731", "0.6434427", "0.63944095", "0.6347665", "0.63426125", "0.6296618", "0.6280108", "0.6273795", "0.6258798", "0.6256129", "0.62405384", "0.6210961", "0.62079275", "0.6204021", "0.6203857", "0.615324", "0.61434495", "0.61429965", "0.6125982", "0.61175185", "0.610878", "0.60823685", "0.60789007", "0.60584104", "0.60574824" ]
0.8339861
1
Return the first Position in the tree (or None if empty)
def first(self): return self._subtree_first_position(self.root()) if len(self) > 0 else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_first(self) -> object:\n #binary search tree == empty\n if self.root is None:\n return None\n\n # return\n return self.root.value", "def get_first(self) -> object:\n if self.root is None: # If tree is empty\n return None\n\n return self.root.value # Returning root value", "def get_position(self, pos):\n element = self.head\n count = 1\n\n if pos == 1:\n return element\n elif pos > 1 and self.head:\n while count < pos:\n element = element.next\n count += 1\n if element is None:\n break\n return element # if (element is not None) else (f\"Position {pos} does not exist.\") # conditional expression\n # else:\n # return None\n return None", "def _subtree_first_position(self, p):\n \"\"\"will be used by before()\"\"\"\n walk = p\n #recursivly walking to the left child until the left subtree has no child\n while self.left(walk) is not None:\n walk = self.left(walk)\n return walk", "def _subtree_first_position(self, p):\n walk = p\n while self.left(walk) is not None:\n walk = self.left(walk) # keep walking left\n return walk", "def get_position(self, position):\n current = self.head\n index = 1\n \n if position < 1:\n return None\n elif position == 1:\n return current\n else:\n while index != position:\n if current.next:\n current = current.next\n else:\n return None\n index += 1\n return current", "def get_position(self):\n pos_or_org = self.position.to_object\n if pos_or_org is None:\n return None\n elif pos_or_org.portal_type == 'position':\n return pos_or_org\n else:\n return None", "def find_min(self):\n\n if self.left:\n return self.left.find_min()\n\n return self.data", "def find_min(self):\n current = self\n while current.left is not None:\n current = current.left\n return current", "def min(self):\n no = self.root\n if no:\n no = self.__search_node_min_dir(no)\n if no:\n return no.valor\n return None", "def min_child(self, index):\n if self.empty():\n return None\n if self._has_left(index):\n left = self._left(index)\n small_child = left\n if self._has_right(index):\n right = self._right(index)\n if self._data[right] < self._data[left]:\n small_child = right\n if self._data[right] == self._data[left]:\n small_child = right\n return small_child\n return None", "def get_curpos(self):\n for i in range(len(self.tree)):\n if self.path == self.tree[i][2]:\n return i\n else:\n return -1", "def peek_min(self):\n if self.root:\n return self.root.min().value\n raise ValueError(\"cannot perform peek_min on an empty tree\")", "def left_child(self, position):\n child = 2 * position + 1\n if child > len(self.table) - 1:\n return None\n return child", "def get_top_node(tree):\n if tree[2][0]=='X':\n return tree[2][3]\n elif type(tree[2][0])==list:\n return tree[2][0][3]\n else:\n print 'Error in get_top_node'\n return None", "def first(self):\n self._ll_tree.first()", "def first(self):\n if self.head:\n self.cursor = self.head\n return self.cursor\n return None", "def parent(self, pos):\n if pos == 0: \n return None\n return int(math.ceil(pos / self.dary) - 1)", "def _find_first(self, ast, label):\n res = self._find_all(ast, label, max_results=1)\n if len(res):\n return res[0]\n return None", "def first_value(self):\n if not self.is_empty():\n return self.data[self.head]\n return None", "def treepos(self, tree):\n if tree is None:\n raise ValueError(\"Parse tree not available\")\n stack = [tree]\n treepos = []\n\n wordnum = 0\n while True:\n # tree node:\n if isinstance(stack[-1], Tree):\n # Select the next child.\n if len(treepos) < len(stack):\n treepos.append(0)\n else:\n treepos[-1] += 1\n # Update the stack.\n if treepos[-1] < len(stack[-1]):\n stack.append(stack[-1][treepos[-1]])\n else:\n # End of node's child list: pop up a level.\n stack.pop()\n treepos.pop()\n # word node:\n else:\n if wordnum == self.wordnum:\n return tuple(treepos[: len(treepos) - self.height - 1])\n else:\n wordnum += 1\n stack.pop()", "def first(self):\n return self.head and self.head.value or None", "def findmin(self):\n return self.heap[0] if len(self.heap) > 0 else None", "def findMin(self):\n curr = self\n while curr.hasLeftChild():\n curr = curr.leftChild\n return curr", "def min(self):\n if not self.root:\n return None\n\n node, parent = Treap._traverse(self.root, 'left')\n return node.key", "def get_first(self):\n if self.is_empty():\n raise self.NoSuchNodeException()\n\n return self.head.data", "def _make_position(self, node):\n return self.Position(self, node) if node is not None else None", "def first(self):\r\n if self.head == None: #check if first(head) node is empty\r\n return 'null' #if yes, then return null\r\n else: #if it is not empty\r\n return self.head.data #return the data of head node\r", "def get_position(self):\n return self._find_gnx_node(self.gnx)", "def firstElement(self):\n return self.top()" ]
[ "0.72985446", "0.7136009", "0.7109405", "0.6841215", "0.6825596", "0.6803908", "0.6718761", "0.6697293", "0.66879636", "0.6653463", "0.65870184", "0.6469372", "0.6462883", "0.6442182", "0.6429032", "0.6427359", "0.64196825", "0.6395568", "0.6392002", "0.63889307", "0.63877964", "0.6387109", "0.6376522", "0.63727134", "0.6355827", "0.6337138", "0.63231117", "0.62821066", "0.62581486", "0.6248385" ]
0.7791313
0
Return the last Position in the tree (or None if empty)
def last(self): return self._subtree_last_position(self.root()) if len(self) > 0 else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last_node(self):\n nodes = self.as_list()\n\n if nodes:\n # If there are nodes return the last one.\n return nodes[-1]\n # No nodes, return None\n return None", "def last_position(self):\n return self.visited_positions[-1]", "def _last_node(self):\n if self.trail[-1][1] is None or self.trail[-1][1].group():\n return self.trail[-1][0]\n else:\n return self.trail[-2][0]", "def position_last(self):\n return self._position_last", "def _subtree_last_position(self, p):\n walk = p\n while self.right(walk) is not None:\n walk = self.right(walk)\n return walk", "def lastChild(self, parent):\r\n if len(parent):\r\n return parent[-1]\r\n else:\r\n return None", "def last(self):\n if self.tail:\n self.cursor = self.tail\n return self.cursor\n return None", "def last(self):\n self._ll_tree.last()", "def _subtree_last_position(self, p):\n walk = p\n while self.right(walk) is not None:\n walk = self.right(walk) # keep walking right\n return walk", "def last(self):\r\n if self.tail == None: #check if last(tail) node is empty\r\n return 'null' #if yes, then return null\r\n else: #if it is not empty\r\n return self.tail.data #return the data of tail node\r", "def last_pos(self):\n return self.locs[self.indices[-1], 2:4]", "def last_move(self):\n if len(self.moves) > 0:\n return self.moves[-1]\n else:\n return None", "def last(self):\n return self.last and self.last.value or None", "def last(self):\n return Null", "def last_node(self):\n return self._array[self._size-1]", "def get_latest(self):\n if len(self.points) == 0:\n return None\n return self.points[-1]", "def GetLastChild(self, item):\r\n\r\n children = item.GetChildren()\r\n return (len(children) == 0 and [None] or [children[-1]])[0]", "def locate_last_node(self, name):\n name = name.toUri()\n path = self.name_to_path(name)\n # create a cypher query to match the path\n try:\n query = self.create_path_query(path, 'MATCH')\n except UnsupportedQueryException as ex:\n print 'Error: extract_from_repo: %s' % str(ex)\n\n records = neo4j.CypherQuery(self.db_handler, query).execute()\n if not records:\n return None\n # in the name tree there should be AT MOST one match for a \n # given name prefix\n assert(len(records.data) == 1)\n assert(len(records.data[0].values) == 1)\n last_node = records.data[0].values[0]\n\n return last_node", "def last(self):\n if self.is_empty():\n raise ValueError('Queue is empty!')\n return self.last_node().element().value()", "def first(self):\n return self._subtree_first_position(self.root()) if len(self) > 0 else None", "def get_last_node_child(self):\n\n pass", "def _last_matching(self, arg):\n try:\n if arg:\n return self.history.get(arg)[-1]\n else:\n return self.history[-1]\n except IndexError:\n return None", "def max(self):\n no = self.root\n if no:\n no = self.__search_node_max_esq(no)\n if no:\n return no.valor\n return None", "def top(self):\n if self.stack == []:\n return None\n return self.stack[-1]", "def _findTopNode():\n node = nuke.thisNode()\n\n parent_node = node\n while parent_node != None:\n last_node = parent_node\n parent_node = parent_node.input(0)\n return last_node", "def lastPos(self):\n return Point(self.currentItem.mapFromScene(self._lastScenePos))", "def lastPos(self):\n return Point(self.currentItem.mapFromScene(self._lastScenePos))", "def lastPos(self):\n return Point(self.currentItem.mapFromScene(self._lastScenePos))", "def return_last_node(self):\n if self.head:\n current = self.head\n while True:\n\tprev = current\n\tcurrent = current.next\n\tif current == self.head:\n\t break\n return prev\n else:\n return None", "def get_last_deep_child(ast_node):\n if not hasattr(ast_node, \"body\"):\n return ast_node\n return get_last_deep_child(ast_node.body[-1])" ]
[ "0.77644545", "0.75120115", "0.7474619", "0.7323458", "0.7178632", "0.710007", "0.70557827", "0.7003163", "0.6992566", "0.6982288", "0.69820374", "0.69403404", "0.68176055", "0.67916065", "0.67826104", "0.67346746", "0.6727946", "0.66364545", "0.6623255", "0.65903014", "0.65767056", "0.65106535", "0.6460326", "0.6458973", "0.6453212", "0.6442669", "0.6442669", "0.6442669", "0.6436923", "0.64231944" ]
0.83756673
0
Return the Position just after p in the natural order Return None if P is the last position
def after(self, p): self._validate(p) if self.right(p): return self._subtree_first_position(self.right(p))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def after(self, p):\n node = self._validate(p)\n return self._make_position(node._next)", "def after(self,p):\r\n \r\n current = self.tail #test from the tail node\r\n \r\n if p == current: #if the tail node = p\r\n return 'null' #there cannot be a node after it\r\n \r\n while current !=p: #else keep cheking the elements until it reaches p\r\n current = current.prev\r\n return current.next.data #now current = p, so return the node after it\r", "def after(self, p):\n self._validate(p)\n # if there exists p's right child, successor is left most position\n # in p's right subtree\n if self.right(p) is not None:\n walk = self.right(p)\n while self.left(walk)is not None:\n walk = left(walk)\n return walk\n # successor is the parent of the \"right turn\" position \n # when going upward\n else:\n walk = p\n above = self.parent(walk)\n while above is not None and walk==self.right(above):\n walk = above\n above = self.parent(walk)\n return above", "def p (self):\n\n return self.end - 1", "def _subtree_last_position(self, p):\n walk = p\n while self.right(walk) is not None:\n walk = self.right(walk)\n return walk", "def get_position(self, pos):\n element = self.head\n count = 1\n\n if pos == 1:\n return element\n elif pos > 1 and self.head:\n while count < pos:\n element = element.next\n count += 1\n if element is None:\n break\n return element # if (element is not None) else (f\"Position {pos} does not exist.\") # conditional expression\n # else:\n # return None\n return None", "def position(self):\n if self.p:\n if self._finished:\n return None\n return self.p.get_position()*10", "def before(self, p):\n node = self._validate(p)\n return self._make_position(node._prev)", "def get_next_position(self):", "def _subtree_last_position(self, p):\n walk = p\n while self.right(walk) is not None:\n walk = self.right(walk) # keep walking right\n return walk", "def _find_position(self, e):\n walk = self._data.first()\n while walk is not None and walk.element()._value != e:\n walk = self._data.after(walk)\n \n return walk", "def get_position(self, position):\n current = self.head\n index = 1\n \n if position < 1:\n return None\n elif position == 1:\n return current\n else:\n while index != position:\n if current.next:\n current = current.next\n else:\n return None\n index += 1\n return current", "def last_position(self):\n return self.visited_positions[-1]", "def before(self, p):\n self._validate(p) # inherited from linked bin tree\n if self.left(p):\n return self._subtree_last_position(self.left(p))\n else:\n walk = p # walk up tree\n above = self.parent(walk)\n while above is not None and walk == self.left(above):\n walk = above\n above = self.parent(walk)\n return above", "def get_position(self, position):\n current=self.head\n count=1\n if position < 1:\n return None\n if self.head:\n while ((current.next )and (count<= position)):\n if count == position:\n return current\n current = current.next\n count=count+1\n\n return current.value", "def last_pos(self):\n return self.locs[self.indices[-1], 2:4]", "def get_prev(self, pos):\n if pos <= 0:\n return None, None\n return self._get_at(pos - 1)", "def getUpperLowerFromPosition(self, p=None, label=None, sigma=1e-7):\n\n if p in self.getPositions(label=label):\n return self.i[label].index(p), self.i[label].index(p)\n elif p < self.i[label][0]:\n # print \"Warning: position %1.5E is less than lowest interface %1.5E\" % (p, self.i[label][0])\n return None, 0\n elif p > (self.i[label][-1] * (1 + sigma)):\n # print \"Warning: position %1.5E is greater than the greatest interface %1.5E\" % (p, self.i[label][-1])\n return len(self.i[label]) + 1, None\n j = 0\n while j + 1 <= len(self.i[label]):\n if (self.i[label][j] * (1 - sigma)) <= p and p < (\n self.i[label][j + 1] * (1 + sigma)\n ):\n return j, (j + 1)\n j += 1", "def get_word_postion(self, word: Word) -> Tuple[int, int]:\n text: str = self.to_text()\n words: List[Word] = self.get_words()\n current_position: int = 0\n\n for w in words:\n current_position = text.find(w.text, current_position)\n\n if w == word:\n return (current_position, current_position + len(w.text))\n return 0, 0", "def FindLastOpBeforePos(NotedRE, Pos):\n\n assert NotedRE\n \n Count = 0\n for i in range(Pos - 1, -1, -1):\n if NotedRE[i][0] is 'RIGHT_PARENTHESES':\n Count += 1\n elif NotedRE[i][0] is 'LEFT_PARENTHESES':\n Count -= 1\n \n if Count == 0:\n return i\n \n assert Count == 0", "def locate_predecessor(self, key):\r\n index = self.search(key)\r\n return index-1", "def particleLastHalo(particle, halo):\n for i in range(0, len(halo.fullParents)):\n if halo.fullParents[i].containsParticle(particle):\n return halo.fullParents[i]\n return -1", "def pcurrent(self):\n return self.pointlist[-1]", "def _find_position(self, element):\n walk = self._data.first()\n while walk is not None and walk.element()._value != element:\n walk = self._data.after(walk)\n return walk", "def getClosestUpperFromPosition(self, p=None, label=None):\n if p in self.i[label]:\n return self.i[label].index(p)\n else:\n \"\"\"\n i = 0\n self.i[label][i]-p\n self.di[label][i]\n len(self.i[label])\n while math.fabs(self.i[label][i]-p) > self.di[label][i] and i < len(self.i[label]):\n i += 1\n if math.fabs(p - self.i[label][i]) < (self.di[label][i]/2):\n # closer to lower bounds\n # or looped through the mesh without finding a closest match\n if i == (len(self.i[label])-1) or math.fabs(p - self.i[label][i]) < math.fabs(p - self.i[label][i+1]):\n return i\n else:\n return (i + 1)\n else:\n return i + 1\n \"\"\"\n return self.i[label].index(min(self.i[label], key=lambda x: abs(x - p)))", "def get_leftmost_atom(self, p):\r\n\r\n if p.type == self.PT.atomic:\r\n return p.v1\r\n else:\r\n return self.get_leftmost_atom(p.v1)", "def _re(self, p):\n return self.edges[:, 0, :] - p # 0 is arbitrary - the other end also works", "def current_position(self):\n\n # It is an error to call playlist_current_pos when there are\n # no entries in the playlist.\n r = self.x.playlist_current_pos()\n r.wait()\n if r.iserror():\n print r.get_error()\n return None\n else:\n return r.get_dict()['position']", "def find_head_pos(self, relation):\n\t\treturn int(re.search('(?<=-)[0-9]*(?=, )',relation).group(0))", "def before(self,p):\r\n \r\n current = self.head #test from the head node\r\n \r\n if p == current: #if the head node = p\r\n return 'null' #there cannot be a node before it\r\n \r\n while current != p: #else keep checking the elements until it reaches p\r\n current = current.next\r\n return current.prev.data #now current = p, so return the node before p\r" ]
[ "0.690741", "0.6589813", "0.65660757", "0.63753384", "0.6306047", "0.62684643", "0.6233519", "0.6223828", "0.61747414", "0.6174467", "0.61634713", "0.60753924", "0.6069791", "0.6054677", "0.60125583", "0.5986297", "0.5981063", "0.59719", "0.59353846", "0.5913079", "0.58700633", "0.5864646", "0.58591616", "0.58575326", "0.5843718", "0.580911", "0.58023655", "0.5792038", "0.57890946", "0.5781846" ]
0.67440933
1
Refresh the visualization session information.
def _refresh(request): # check the session for a vtkweb instance vis = request.session.get('vtkweb') if vis is None or vtk_launcher.status(vis.get('id', '')) is None: # open a visualization instance vis = vtk_launcher.new_instance() request.session['vtkweb'] = vis return dict(vis)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_visualization(self) -> None:\n pass", "def update_plot():\n pass", "def refresh(self):\n pass", "def refresh(self):\n pass", "def refresh_screen(self):\n stdscr = self.stdscr\n stdscr.refresh()", "def plot_refresh():\n figure.canvas.draw()", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\n raise NotImplementedError", "def Refresh(self):\n pass", "def refresh(self):\n raise NotImplementedError(\"To be implemented\")", "def new_session_loaded(self):\n session = self.parent.session\n if session is None: return None\n #logger.debug(\"LOADING NEW SESSION\")\n self.figure.new_session(session)\n self.refresh_table()\n self.summarize_current_table()\n self.refresh_plots()\n self.update_fitting_options()\n return None", "def refresh(self):\n self.__refresh()", "def redraw_viz():\n\tglobal g_last_draw\n\tif (rospy.Time.now().to_sec() > (refresh_rate + g_last_draw)):\n\t\tg_last_draw = rospy.Time.now().to_sec()\n\t\t# redraw imu box\n\t\tdoDraw()", "def refresh_session():\n\n hruntime.response.headers['Cache-Control'] = 'must-revalidate, no-cache, no-store'\n\n hruntime.user = hruntime.dbroot.users[hruntime.session.name]\n hruntime.i18n = hruntime.dbroot.localization.languages['cz']", "def refresh(self):\n\n # Set Graphics scene\n self.setScene(QtGui.QGraphicsScene())\n self._connections = set()\n self._nodes = {}\n self._selection = set()\n self._manipulation_mode = 0\n self._selection_rect = None", "def redraw(self):\n self.vispy_viewer.canvas.update()", "def refresh(self):\n self.Refresh()", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial\"][1]) + \" at \" + str(self.values[\"Trial\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream.write(dict(x=self.values[\"Trial\"][0], y=self.values[\"Trial\"][1]))", "def _refresh(self):\n self._need_display_update = True\n self._update()", "def refresh_plot(self):\n self.ax.relim() # recompute the data limits\n self.ax.autoscale_view() # automatic axis scaling\n self.fig.canvas.flush_events()", "def refresh_screen(self):", "def refresh(self):\r\n # todo, use vid_info as property instead of this\r\n # reset properties and rebuild streams\r\n self.setup()", "def _update_current_graph(self, **kwargs):\n\n self.current_graph.redraw()", "def refresh(self, _loop, data):\n try:\n if(self.model.mode == 'live'):\n self.updateGraphs()\n self.model.memory = int(self.dataClient.recv())\n self.model.cpu = float(self.dataClient.recv())\n except EOFError:\n pass\n except Exception as e:\n self.logger.error(e)\n\n self.view.refresh()\n _loop.set_alarm_in(guiRefreshTimer, self.refresh)", "def refresh_HDV(self):\n self.canvas.draw()\n self.dicom_navigation.parent.dicom_right_window.top_info.canvas_HDV.draw()", "def refresh_all(self):\n\t\t\n\t\tself.symbolsList.set_datasource(self.source)\n\t\tself.symbolsList.refresh()\n\t\t\n\t\tself.plotFrame.set_datasource(self.source)\n\t\tself.plotFrame.refresh()", "def reload_processgraph_view(self):\n #widget = self.processgraphWidget\n #self.load_dict_into_widget(widget, self.processgraph.graph)\n self.processgraphEdit.setText(json.dumps(self.processgraph.graph, indent=2, sort_keys=True))\n #widget.show()" ]
[ "0.71400917", "0.6427015", "0.63905346", "0.63905346", "0.63827485", "0.63614184", "0.63082224", "0.63082224", "0.63082224", "0.6300843", "0.6300843", "0.62704825", "0.6262251", "0.6204581", "0.61797917", "0.6175653", "0.6151583", "0.6135584", "0.61329216", "0.6109207", "0.60997236", "0.6002734", "0.5996415", "0.59761494", "0.5963111", "0.59621406", "0.59452844", "0.5941454", "0.5931789", "0.5903657" ]
0.70314527
1
Set duration for test.
def set_duration(self, duration): self.__test_result[Result.__DURATION] = round(duration * 1000)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def async_set_duration(self, duration):\n self._duration = duration", "def duration(self, duration):\n self._duration = duration", "def duration(self, duration):\n self._duration = duration", "def duration(self, duration):\n\n self._duration = duration", "def duration(self, duration):\n\n self._duration = duration", "def duration(self, duration):\n\n self._duration = duration", "def duration(self, duration):\n\n self._duration = duration", "def duration(self, duration):\n\n self._duration = duration", "def duration(self, duration):\n\n self._duration = duration", "def duration(self, duration):\n\n self._duration = duration", "def duration(self, duration):\n\n self._duration = duration", "def set_duration(self, duration_secs):\n raise UnsupportedOperation('Duration not implemented')", "def duration(self, duration):\n # type: (int) -> None\n\n if duration is not None:\n if not isinstance(duration, int):\n raise TypeError(\"Invalid type for `duration`, type has to be `int`\")\n\n self._duration = duration", "def set_duration_tier(self, value):\n self._options['duration'] = bool(value)", "def testHrtDuration(self):\n attr = self.session.create_visit_attr()\n\n self.util.stringTypeTest(self, attr, \"duration\")\n\n self.util.stringPropertyTest(self, attr, \"duration\")", "def setDuration(self, duration):\n if duration == None:\n return False\n dur = _float(duration)\n if dur == None:\n raise Error_Float(\"Cannot interpret duration {}\".format(duration))\n return False\n else:\n self.duration = dur\n self.durationMs = self.durationToMs(dur)\n return True", "def set_duration(self, duration, movable_attr=\"DTSTART\"):\n i = self.icalendar_component\n return self._set_duration(i, duration, movable_attr)", "def __init__(__self__, *,\n test_process_duration: 'outputs.DurationResponse'):\n pulumi.set(__self__, \"test_process_duration\", test_process_duration)", "def duration_in_seconds(self, value):\n self.__duration = (value * 1000000)", "def setDuration(self, *args):\n return _osgAnimation.Animation_setDuration(self, *args)", "def test_duration(self):\n for duration_, _, _ in self.test_cases:\n self.assertEqual(Rest(duration_).duration, duration_)", "def __init__(__self__, *,\n test_process_duration: Optional[pulumi.Input['DurationArgs']] = None):\n if test_process_duration is not None:\n pulumi.set(__self__, \"test_process_duration\", test_process_duration)", "def __init__(__self__, *,\n duration: Optional[pulumi.Input[str]] = None):\n if duration is not None:\n pulumi.set(__self__, \"duration\", duration)", "def pytest_timeout_set_timer(item, settings):", "def start(self, duration=None):\n if duration is None:\n duration = self.duration\n\n self.set(duration)", "def test_duration_property(self):\n recording_dt = 0.1\n recording_shape = {\n 'no_timesteps': 1000,\n 'no_sweeps': 10,\n 'no_channels': 4,\n }\n expected_duration = recording_shape['no_timesteps'] * recording_dt\n test_rec = rt.Recording(\n np.zeros(\n [\n recording_shape['no_channels'],\n recording_shape['no_timesteps'],\n recording_shape['no_sweeps'],\n ]\n ),\n dt=recording_dt,\n )\n npt.assert_almost_equal(\n test_rec.duration,\n expected_duration,\n err_msg='Expected {} for `duration` property; got {} instead.'.format(\n expected_duration, test_rec.duration\n ),\n )", "def set_timeout(self, seconds):\n self._timeout = seconds", "def settimeout(self, value: int) -> None:\n ...", "def setTestTime(self, timestamp):\n self._test_time = timestamp", "def test_duration_attribute_is_working_properly(self):\n d = DurationMixin(duration=10)\n d.duration = 15\n self.assertEqual(15, d.duration)" ]
[ "0.76691973", "0.75579613", "0.75579613", "0.7402852", "0.7402852", "0.7402852", "0.7402852", "0.7402852", "0.7402852", "0.7402852", "0.7402852", "0.7241233", "0.6952764", "0.69131434", "0.6885508", "0.6839374", "0.6780654", "0.6772885", "0.6649575", "0.6629042", "0.6527529", "0.65017635", "0.6451426", "0.63262373", "0.6284202", "0.6264977", "0.6217179", "0.6187322", "0.61826366", "0.61805254" ]
0.83807087
0
Set status and message for specify step.
def set_step_status(self, step_summary: str, status: str = Status.PASSED, message: str = None): temp = {Result.__STEP: step_summary, Result.__STATUS: status, Result.__MESSAGE: message} self.__run.append(temp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def report_step_progress(self, step):\n dot_status = self.dot_status[step.status.name]\n if step.status == Status.failed:\n if (step.exception and\n not isinstance(step.exception, AssertionError)):\n # -- ISA-ERROR: Some Exception\n dot_status = self.dot_status[\"error\"]\n step.feature = self.current_feature\n step.scenario = self.current_scenario\n self.failures.append(step)\n self.stream.write(dot_status)\n self.stream.flush()", "def report_step_progress(self, step):\n pass", "def add_step(self, step):\n if not step:\n return\n temp = {Result.__STEP: step.get_name(),\n Result.__STATUS: step.get_status(),\n Result.__MESSAGE: step.get_message()}\n self.__run.append(temp)", "def update_job_step_status(self, step_id, job_id, status, detail, msg):\n try:\n self._session.query(JobStepEntity).\\\n filter(JobStepEntity.step_id == step_id).\\\n filter(JobStepEntity.job_id == job_id).\\\n update(\n {\n 'status': status,\n 'detail': detail,\n 'msg': case(\n [(JobStepEntity.msg == '', msg)],\n else_=JobStepEntity.msg+'|'+msg\n )\n },\n synchronize_session=False\n )\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return True", "def step_impl(context, message):\n assert message in context.driver.title", "def set_status(self, msg):\n if self.msg[:5] != \"ERROR\":\n self.msg = msg\n else:\n if msg[:5] == \"ERROR\":\n self.msg = \"\\n\" + msg", "def progress(self, msg):\n logging.info(\"UI-Test: \" + msg)\n with step(\"UI test progress: \" + msg):\n pass\n if len(self.state) > 0:\n self.state += \"\\n\"\n self.state += \"UI: \" + msg", "def SetStatusMessage(self, msg):\n if self._status_msg_fn:\n self._status_msg_fn(msg)\n else:\n tf.logging.info('Status: %s', msg)", "def set_status(\n self,\n key: str,\n status: TaskStatus,\n error: Optional[ErrorInfo] = None,\n skipped_by: Optional[str] = None,\n ) -> None:\n raise NotImplementedError", "def set_status(self, status):\n if status == 'qw':\n status = 'Waiting'\n elif status == 'hqw':\n status = 'Held'\n elif status == 'Eqw':\n status = 'Error'\n else:\n sys.exit(20)\n self.status = status\n return", "def set_status(self, status: Status) -> None:\n if status.status_code == StatusCode.ERROR:\n self.elastic_span.outcome = constants.OUTCOME.FAILURE\n elif status.status_code == StatusCode.OK:\n self.elastic_span.outcome = constants.OUTCOME.SUCCESS\n else:\n self.elastic_span.outcome = constants.OUTCOME.UNKNOWN", "def _set_status(self, action, status):\n raise NotImplementedError(\"Base class: cannot be called directly\")", "def put_status(status='successed'):\n if not proxy_config.OUTPUT_DEBUG_INFO:\n return\n status_code=''\n status_color=()\n if status.strip()=='successed':\n status_code='*'\n status_color=(Color.Green,None,[Color.Bold])\n elif status.strip()=='warning':\n status_code='-'\n status_color=(Color.Yellow,None,[Color.Bold])\n else:\n status_code='!'\n status_color=(Color.Red,None,[Color.Bold])\n print(colored(f'[{status_code}]',*status_color),end=' ')", "def SetStatus(self, status):\r\n self.status = status", "def log_step(step: int, message: str, stdout: bool = True) -> None:\n log(f\"Step {step:6d}: {message}\", stdout=stdout)", "def step_impl(context, message):\n expect(context.driver.title).to_contain(message)", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n # TODO log to db\n self.status = status", "def step(self, **kwargs):\n pass", "def setStatus(self, status, details=None):\n self.onStatusSent(None, status)", "def step(self, step=None):\n pass", "def status(self, status):\n self._set_property_(self.STATUS, str(status))", "async def set_status(event, gh, *args, **kwargs):\n issue_number_found = ISSUE_RE.search(event.data[\"pull_request\"][\"title\"])\n if not issue_number_found:\n issue_url = event.data[\"pull_request\"][\"issue_url\"]\n data = await gh.getitem(issue_url)\n for label in data[\"labels\"]:\n if label[\"name\"] == TRIVIAL_LABEL:\n status = TRIVIAL_STATUS\n break\n else:\n status = FAILURE_STATUS\n else:\n status = create_success_status(issue_number_found)\n await _post_status(event, gh, status)", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status" ]
[ "0.6730893", "0.6360367", "0.6348932", "0.6267725", "0.62492114", "0.6235833", "0.6199079", "0.6175628", "0.6044504", "0.60384035", "0.6010956", "0.60092", "0.5985237", "0.59349287", "0.5883274", "0.5878642", "0.58284914", "0.58284914", "0.58284914", "0.58048546", "0.58047265", "0.5787675", "0.57573044", "0.57357854", "0.572944", "0.571097", "0.571097", "0.571097", "0.571097", "0.571097" ]
0.77131367
0
Add a step to report.
def add_step(self, step): if not step: return temp = {Result.__STEP: step.get_name(), Result.__STATUS: step.get_status(), Result.__MESSAGE: step.get_message()} self.__run.append(temp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addStep(self, step):\n self.stepper.addStep(step)\n return self", "def add_step(self, step, run_by_default=True):\n self.steps[step.name] = step\n if run_by_default:\n self.steps_to_run.append(step.name)", "def addStep( self, stepNum ):\n assert isinstance( stepNum, (int, tuple) )\n\n assert isinstance( self._level, int )\n assert isinstance( self._steps, list )\n assert isinstance( self._outter, Env ) or ( self._outter is None )\n\n self._steps.append( stepNum )", "def add_step(self,\n step_class: Type[Step],\n step_label: str,\n step_settings: Optional[Dict[str, Any]] = None,\n step_tmp_vals=None,\n index=None):\n updated_step_settings = self.extract_step_settings(\n step_class, step_label, step_settings)\n step_settings['settings'] = updated_step_settings\n self.routine_template.add_step(step_class,\n step_label,\n step_settings,\n step_tmp_vals=step_tmp_vals,\n index=index)", "def report_step(self, step_report: StepReport):\n\n queue_item = QueueItem(\n report_as_json=step_report.to_json(),\n url=urljoin(self._remote_address, Endpoint.ReportStep.value),\n token=self._token,\n )\n\n self._queue.put(queue_item, block=False)", "def record(self, step):", "def _report_step(self, learning_rate, step, train_stats=None,\n valid_stats=None):\n if self.report_manager is not None:\n return self.report_manager.report_step(\n learning_rate, step, train_stats=train_stats,\n valid_stats=valid_stats)", "def add_step_hook(h):\n add_hook(step, h)", "def report_step_progress(self, step):\n pass", "def add_view_step(self, view_step):\n self._data_dict[self.KEY_VIEW_STEPS].append(view_step)", "def add_workflow_step(self, wf_step):\n self._data_dict[self.KEY_WF_STEPS].append(wf_step)", "def addStepRecord(self, name):\n assert name in RunRecord._recordNames\n record = StepRecord()\n if name in RunRecord._simpleNames:\n assert name not in self._records\n self._records[name] = record\n else:\n if name not in self._records:\n self._records[name] = StepRecordList()\n self._records[name].entries.append(record)\n return record", "def add_step(self):\n function_name, ok = QInputDialog.getText(self, 'Add step', 'Enter the function name (in custom.py or functions.py):')\n if ok:\n nsteps = len(self.mgr.obj.steps)\n try:\n self.mgr.obj.insert_step(function_name)\n except ValueError as err:\n print(\"Error adding step: {:s}\".format(str(err)))\n return\n\n # Flag the Generator as changed\n self.mgr.changed = True\n\n # Refresh lists/tables\n self.load_steps()\n self.stepsListWidget.setCurrentRow(nsteps)", "def add_step (self, methodname, args_obj):\n self.append( (methodname, args_obj,) )", "def start_step(self, _, step, **kwargs):\n if self._cfg.log_layout is not LogLayout.SCENARIO:\n step_content = self._build_step_content(step)\n self._step_id = self._rp.start_test_item(\n name=f\"[{step.keyword}]: {step.name}\",\n start_time=timestamp(),\n item_type=\"STEP\",\n parent_item_id=self._scenario_id,\n code_ref=self._code_ref(step),\n description=step_content,\n has_stats=False\n if self._cfg.log_layout is LogLayout.NESTED\n else True,\n **kwargs,\n )\n self._log_item_id = self._step_id\n if self._cfg.log_layout is LogLayout.NESTED and step_content:\n self.post_log(step_content)", "def step(self, step=None):\n pass", "def add_step(self,\n step_class,\n step_label,\n step_settings,\n step_tmp_vals=None,\n index=None):\n if step_tmp_vals is None:\n step_tmp_vals = []\n\n if index is None:\n super().append(\n [step_class, step_label, step_settings, step_tmp_vals])\n else:\n super().insert(\n index, [step_class, step_label, step_settings, step_tmp_vals])", "def add_step_entry(entry_message, data=''):\n return partial(__add_entry,\n event_type='STEP',\n entry_message=entry_message,\n data='')", "def report_step_progress(self, step):\n dot_status = self.dot_status[step.status.name]\n if step.status == Status.failed:\n if (step.exception and\n not isinstance(step.exception, AssertionError)):\n # -- ISA-ERROR: Some Exception\n dot_status = self.dot_status[\"error\"]\n step.feature = self.current_feature\n step.scenario = self.current_scenario\n self.failures.append(step)\n self.stream.write(dot_status)\n self.stream.flush()", "def write_line(self, line):\n # TODO(iannucci): have step_runner log the step metadata as a protobuf\n # and/or put it in the Step proto message.\n return self.logging.write_line(line)", "def addStep( self, prop, premiseCitationList, inferenceRule, mapping, conclusionIndex ):\n # Add the step to the Proof\n assert isinstance( prop, WFF )\n assert isinstance( premiseCitationList, list )\n assert isinstance( inferenceRule, InferenceRule )\n assert isinstance( mapping, dict )\n assert isinstance( conclusionIndex, int )\n\n assert isinstance( self._env, Env )\n assert isinstance( self._steps, list )\n\n self._steps.append( Step( self._env.level( ), prop, premiseCitationList, inferenceRule, mapping, conclusionIndex ) )\n\n self._env.addStep( len(self._steps) )", "def add_component(to_add_report, html_report_path, to_add_log=None,\n html_log_path=None):\n html_file_obj = open(html_report_path, 'a')\n html_file_obj.write(to_add_report)\n html_file_obj.close()\n\n if html_log_path is not None:\n html_file_obj = open(html_log_path, 'a')\n html_file_obj.write('<hr/>'+to_add_log)\n html_file_obj.close()", "def create_step(self, step):\n raise NotImplementedError", "def add_step_listener(self, listener):\n self.step_listeners.append(listener)", "def add_step(self, data):\n step_id = str(uuid.uuid4()).replace('-', '')\n try:\n self._session.add(StepEntity(\n id=step_id,\n workflow_id=data['workflow_id'],\n app_id=data['app_id'],\n name=data['name'],\n number=data['number'],\n letter=data['letter'],\n map_uri=data['map_uri'],\n map_inclusive=data['map_inclusive'],\n map_glob=data['map_glob'],\n map_regex=data['map_regex'],\n template=data['template'],\n exec_context=data['exec_context'],\n exec_method=data['exec_method'],\n exec_parameters=data['exec_parameters']\n ))\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return step_id", "def add_job_step(self, data):\n try:\n self._session.add(JobStepEntity(\n step_id=data['step_id'],\n job_id=data['job_id'],\n detail='{}',\n ))\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return True", "def add_step(self):\n assert self.y_real is not None and self.y_predicted is not None\n\n # Calculates some metrics\n rmse = Metrics.rmse_loss(self.y_real, self.y_predicted)\n mse = Metrics.mse_loss(self.y_real, self.y_predicted)\n cm = Metrics.confusion_matrix(self.y_real, self.y_predicted)\n accuracy = Metrics.accuracy(cm)\n\n # Store them\n self.summary['rmse'].append(rmse)\n self.summary['accuracy'].append(accuracy)\n self.summary['mse'].append(mse)\n self.summary['cm'].append(cm)", "def step(self, **kwargs):\n pass", "def insert_after(self, request, current_step, step):\n steps = self.get_steps(request)\n\n if step not in steps:\n index = steps.index(current_step) + 1\n steps.insert(index, step)", "def log_step(step: int, message: str, stdout: bool = True) -> None:\n log(f\"Step {step:6d}: {message}\", stdout=stdout)" ]
[ "0.7300454", "0.6940515", "0.68519664", "0.6736752", "0.64998645", "0.6465836", "0.6410942", "0.63787425", "0.6328564", "0.6318966", "0.626205", "0.6222592", "0.6116363", "0.60779536", "0.60434836", "0.5996921", "0.5988934", "0.5988201", "0.59717214", "0.5943704", "0.5937647", "0.5857048", "0.5836125", "0.58308035", "0.5810028", "0.5794786", "0.5790189", "0.5788797", "0.56050134", "0.5593239" ]
0.74939346
0
Set status of test to FAILED.
def set_test_failed(self): self.set_result(Status.FAILED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SetUnexpectedFailure(test_result):\n test_result['status'] = 'FAIL'\n test_result['expected'] = False\n logging.error('Processing failed for test %s', test_result['testPath'])", "def addFailure(self, test, err):\n test.status = \"failed\"\n self._addError(test, err)", "def set_test_passed(self):\n self.set_result(Status.PASSED)", "def mark_failure(self):\n LOGGER.debug('Marking current_state as: %s', self.States.FAILED)\n self.current_state = self.States.FAILED", "def mark_failed(self, test):\n if not test:\n LOGGER.warn('Empty or None test name passed to standard_json_util')\n return\n\n if test in self.tests:\n self.tests[test]['actual'] = self.tests[test]['actual'] + \" FAIL\"\n self.tests[test]['is_unexpected'] = True\n else:\n self.tests[test] = {\n 'expected': 'PASS',\n 'actual': 'FAIL',\n 'is_unexpected': True\n }", "def failed(self) -> None:\n self.failure_count += 1", "def _failed(self, msg):\n self.log(msg)\n self.result.passed = False\n self.result.add_error(msg)\n self.log(u\"Failed\")", "def failed(self, failed):\n\n self._failed = failed", "def failed(self, failed):\n\n self._failed = failed", "def mark_failed(self):\n self.status = self.FAILED\n self.traceback = self._format_traceback()\n self.save(update_fields={'status', 'traceback', 'updated_at'})", "def test_state_after_failure(self):\n pass", "def test_status(self, test_status: str):\n allowed_values = ['pass', 'fail', 'indeterminate'] # noqa: E501\n if test_status not in allowed_values:\n raise ValueError('Invalid value for `test_status` ({0}), must be one of {1}'.format(\n test_status, allowed_values))\n\n self._test_status = test_status", "def reset_failure_count(self): # suppress(unused-function)\n self._failures = 0", "def set_status(self, status: Status) -> None:\n if status.status_code == StatusCode.ERROR:\n self.elastic_span.outcome = constants.OUTCOME.FAILURE\n elif status.status_code == StatusCode.OK:\n self.elastic_span.outcome = constants.OUTCOME.SUCCESS\n else:\n self.elastic_span.outcome = constants.OUTCOME.UNKNOWN", "def failed( self, mesg ):\n self.tests_failed += 1\n print \"fail: \" + mesg.rstrip()", "def showTestFailure(self, test):\n #self._setTestButtonColor(test.id(), self.FAILURE_COLOR)\n self.test_buttons[test.id()].setState('failure')\n self.update_idletasks()\n return", "def set_failed(self, exception):\n self.logger.info(\"status: FAILED\")\n self._callback('on_failed', exception)\n return self.update_response(self.encoder.encode_failed(exception))", "def test_04_fail(self):\n if y == 2:\n self.fail('This is a custom fail message')", "def test_failed_job(self):\n\n failed_job = json.loads(TREEHERDER_JOB % (\"testfailed\", \"completed\"))\n self.assertEquals(self.query_api.get_job_status(failed_job), FAILURE)", "def test_api_object_failed_property(self, api_object):\n api_object.status = 'FAILED'\n assert api_object.failed\n assert not api_object.creating", "def useFailures(self):\n self.setupTests(tests = self.failures)", "def print_tcase_failed(self,testcaseName,reasonFailed):\n\n # go throuht the test case objects\n\tfor t in self.testcases:\n\t\t\n\t\ttName = t.name\n\t\tif tName == testcaseName:\n\t\t\t#print tName\n\t\t\tt.status = \"Failed\"\n\t\t\tt.reasonPassed = reasonFailed\n self.print_summary()\n raise TestCaseFailed (\"Testcase '%s' Failed, reason '%s\"%(testcaseName,reasonFailed))\n sys.exit(1)\n return 1\n\n\traise ViriValuePassedError(\"Testcase '%s' doesnt seem to be run but print failed called\"%testcaseName)", "def indicate_failure(self):\n pass", "def addFailure(self, test, err):\n self.failure_count += 1\n self.total_count += 1\n unittest.TestResult.addFailure(self, test, err)\n _, _exc_str = self.failures[-1]\n output = self.complete_output()\n self.result.append((self.__class__.FAIL, test, output, _exc_str))\n if self.verbosity > 1:\n sys.stderr.write('F ')\n sys.stderr.write(str(test))\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('F')", "def tearDown(self) -> None:\n\n logging.info(f\"{'=' * 20}Test completed!{'=' * 20}\")\n logging.info(\"Failed to execute the following parameter combinations: \")\n if self.error_params:\n for each in self.error_params:\n logging.info(each)", "def test_case_01(self):\n if True:\n self.fail()", "def failed(self, message=None):\n doc = {self.STATE: self.STATE_FAILED}\n\n if message:\n doc.update({self.ERROR_MESSAGE: message})\n\n self.update(doc)", "def fail(self, cause = None, annotations = {}):\n self.set_outcome(Result.FAIL, cause, annotations)", "def failure(self, target):\n print \"FAILED:\"\n self.show_target(target)\n self.failed += 1", "def testComplete(self, fail):\n if not fail:\n print \"Test Complete\"\n return self.__g.SUCCESS\n else:\n print \"Test Failed\"\n return self.__g.FAIL" ]
[ "0.7404037", "0.710185", "0.70464814", "0.7028599", "0.6924147", "0.6825908", "0.6819086", "0.6817489", "0.6817489", "0.6813257", "0.6722179", "0.66460186", "0.6590278", "0.6567286", "0.6478199", "0.6436117", "0.64310205", "0.6407246", "0.6405758", "0.6396214", "0.6393541", "0.6369045", "0.6314596", "0.62753654", "0.62629557", "0.6248834", "0.6248052", "0.62334275", "0.6222567", "0.62185186" ]
0.87635076
0
Set status of test to PASSED.
def set_test_passed(self): self.set_result(Status.PASSED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_test_failed(self):\n self.set_result(Status.FAILED)", "def addSuccess(self, test):\n test.status = \"success\"", "def excecute(self):\r\n self.initialize()\r\n self.addteststeps()\r\n for teststep in self.test_steps_list:\r\n if teststep.run() == TestStatus.PASS:\r\n logging.info(\"test {} passed the test\".format(teststep.stepname))\r\n self.result = TestStatus.PASS\r\n else:\r\n logging.warn(\"test {} failed the test\".format(teststep.stepname))\r\n self.result = TestStatus.FAIL\r\n self.cleanup()\r\n return self.result", "def set_status_running(self) -> None:\n if self._is_aborted():\n return\n assert self._status == self.Status.WAITING_FOR_TEST_START\n self._status = self.Status.RUNNING\n self.notify_update()", "def test_status(self, test_status: str):\n allowed_values = ['pass', 'fail', 'indeterminate'] # noqa: E501\n if test_status not in allowed_values:\n raise ValueError('Invalid value for `test_status` ({0}), must be one of {1}'.format(\n test_status, allowed_values))\n\n self._test_status = test_status", "def test_set_scan_status(self):\n pass", "def print_tcase_success(self,testcaseName,reasonPassed):\n\n # go throuht the test case objects\n\tfor t in self.testcases:\n\t\t\n\t\ttName = t.name\n\t\tif tName == testcaseName:\n\t\t\t#print tName\n\t\t\tt.status = \"Passed\"\n\t\t\tt.reasonPassed = reasonPassed\n return 1\n\tprint_green(\"=\" * 80)\n\ttrace_success(\"TESTCASE: PASSED %s,reason '%s'\"%(testcaseName,reasonPassed))\n\tprint_green(\"=\" * 80)\n \n\traise ViriValuePassedError(\"Testcase '%s' doesnt seem to be run but print success called\"%testcaseName)", "def testComplete(self, fail):\n if not fail:\n print \"Test Complete\"\n return self.__g.SUCCESS\n else:\n print \"Test Failed\"\n return self.__g.FAIL", "def success(self, target):\n self.passed += 1", "def test_passed():\n pass", "def finished_tests(self):\n self.testing = 0", "def set_status(self, learning=False, testing=False):\n self.learning = learning\n self.testing = testing", "def SetStatus(self, status):\r\n self.status = status", "def addSuccess(self, test):\n self.success_count += 1\n self.total_count += 1\n unittest.TestResult.addSuccess(self, test)\n\n output = self.complete_output()\n self.result.append((self.__class__.PASS, test, output, ''))\n\n if self.verbosity > 1:\n sys.stderr.write('ok ')\n sys.stderr.write(str(test))\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('.')", "def passed(self):\n if self.result == RESULT_PASS:\n return True\n\n return False", "def assertSuccessStatus(self, options, arguments):\n self.assertFailStatus(0, options, arguments)", "def setTestResult(self, rlt):\n self.__testResult = rlt\n\n total_count = TestScriptSymbolTable.get_value_from_sym_tab(\"total_count\", TestScriptSymbolTable.test_result_tab) + 1\n TestScriptSymbolTable.insert_sym_tab(\"total_count\", total_count, TestScriptSymbolTable.test_result_tab)\n #if rlt == 'PASS':\n if 'PASS' in rlt:\n pass_count = TestScriptSymbolTable.get_value_from_sym_tab(\"pass_count\", TestScriptSymbolTable.test_result_tab) + 1\n TestScriptSymbolTable.insert_sym_tab(\"pass_count\", pass_count, TestScriptSymbolTable.test_result_tab)\n else:\n fail_count = TestScriptSymbolTable.get_value_from_sym_tab(\"fail_count\", TestScriptSymbolTable.test_result_tab) + 1\n TestScriptSymbolTable.insert_sym_tab(\"fail_count\", fail_count, TestScriptSymbolTable.test_result_tab)\n \n #self.generateFinalResult()", "def test_case_01(self):\n if True:\n self.fail()", "def addSuccess(self, test):\n self.passing.append(proto_test(test))", "def test_completed():\n assert complete == 1\n assert errorflag == 0", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def SetUnexpectedFailure(test_result):\n test_result['status'] = 'FAIL'\n test_result['expected'] = False\n logging.error('Processing failed for test %s', test_result['testPath'])", "def set_step_status(self, step_summary: str, status: str = Status.PASSED,\n message: str = None):\n temp = {Result.__STEP: step_summary, Result.__STATUS: status,\n Result.__MESSAGE: message}\n self.__run.append(temp)", "def test_active_on(self):\n\n self.feature_test.set_percentage(100)\n self.assertTrue(self.feature_test.is_active)", "def mark_passed(self, test, flaky=False):\n if not test:\n LOGGER.warn('Empty or None test name passed to standard_json_util')\n return\n\n if test in self.tests:\n self.tests[test]['actual'] = self.tests[test]['actual'] + \" PASS\"\n else:\n self.tests[test] = {'expected': 'PASS', 'actual': 'PASS'}\n\n if flaky or 'FAIL' in self.tests[test]['actual']:\n self.tests[test]['is_flaky'] = True\n\n self.tests[test].pop('is_unexpected', None)", "def set_status(self, status):\n # TODO log to db\n self.status = status", "def addSuccess(self, test, test_time=None):\n test = proto_test(test)\n if test_time:\n test.test_time = str(test_time)\n self.passing.append(test)\n self._reportOutcome(test, \".\", self.colors.passing)", "def set_status(self, status: Status) -> None:\n if status.status_code == StatusCode.ERROR:\n self.elastic_span.outcome = constants.OUTCOME.FAILURE\n elif status.status_code == StatusCode.OK:\n self.elastic_span.outcome = constants.OUTCOME.SUCCESS\n else:\n self.elastic_span.outcome = constants.OUTCOME.UNKNOWN" ]
[ "0.74274033", "0.6862972", "0.67527765", "0.655421", "0.6513621", "0.65119374", "0.6406571", "0.63926363", "0.63095206", "0.62739694", "0.62568843", "0.62145996", "0.61953396", "0.6188526", "0.6136933", "0.613267", "0.6130687", "0.61200535", "0.611168", "0.6091256", "0.60275835", "0.60275835", "0.60275835", "0.59673697", "0.5961396", "0.5952017", "0.59489536", "0.5942751", "0.59298307", "0.59007996" ]
0.8919933
0
Get the status of test.
def get_test_status(self) -> str: return self.__test_result[Result.__RESULT]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_status(self) -> str:\n return self._test_status", "def test_get_status(self):\n pass", "def test_get_status(self):\n pass", "def GetStatus(self):\r\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def getstatus(self):\n return self.__status", "def test_get_status(self):\n response = self.client.open(\n '/v1/status',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_get_status(self):\n resp = self.build_api.getStatus().json()\n assert 'status' in resp\n assert 'message' in resp", "def get_status() -> None:\n assert scraper.get_status() == True", "def get_status(self):\n return self._status", "def _get_status(self):\n return self.__status", "def status(self):\n return self._get(path='status')", "def get_status(self):\n r = requests.get(self.base_url + '/status')\n return r.json()", "def getStatus(self):\n return self.__status", "def getStatus(self):\n return self._status", "def getStatus():", "def get_status(self):\n # TODO retrieve from db if not set\n return self.status", "def check_status(self):\n return self.status", "def check_status(self):\n return self.status", "def status(self):\n\t\treturn self._status", "def status(self):\n return self.get(self._names[\"status\"])", "async def get_status():", "def status(self):\n return self._data['status']", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")" ]
[ "0.86555594", "0.82592815", "0.82592815", "0.76769733", "0.75723225", "0.75723225", "0.75723225", "0.7554327", "0.7521762", "0.7490179", "0.74830604", "0.7448991", "0.73775405", "0.7375381", "0.7346149", "0.730579", "0.7288645", "0.7274315", "0.72343546", "0.72274816", "0.72274816", "0.7213538", "0.7211019", "0.72006446", "0.71953285", "0.7169399", "0.7169399", "0.7169399", "0.7169399", "0.7169399" ]
0.85337096
1
The MEAM alloy parameters for a pair of symbol models.
def alloy(self, symbols): try: return self.alloys[((self.alloys.Sym1==symbols[0]) & (self.alloys.Sym2==symbols[1]))].iloc[0] except: raise ValueError(f'MEAM parameters for alloy symbols {symbols} not found')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_parameters(self):\n params = {}\n if self.modelname == 'SI':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after splot\n # Ts: Time from split to present, in 2*Na generation units\n names = ['N1', 'N2', 'Ts']\n values = [1, 1, 1]\n upper_bounds = [20, 20, 10]\n lower_bounds = [0.01, 0.01, 0]\n elif self.modelname == 'IM':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Ts: Time from split to present, in 2*Na generations\n names = ['N1', 'N2', 'm21', 'm12', 'Ts']\n values = [1, 1, 1, 1, 1]\n upper_bounds = [20, 20, 20, 20, 10]\n lower_bounds = [0.01, 0.01, 0, 0, 0]\n elif self.modelname == 'AM':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Tam: Time from end of anc migration to split, in 2*Na gens\n # Ts: Time from split to present, in 2*Na generations\n names = ['N1', 'N2', 'm21', 'm12', 'Tam', 'Ts']\n values = [1, 1, 1, 1, 0.1, 1]\n upper_bounds = [20, 20, 20, 20, 2, 10]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0]\n elif self.modelname == 'SC':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Ts: Time from split to secondary contact, in 2*Na generations\n # Tsc: Time from secondary contact to presesnt, in 2*Na gens\n names = ['N1', 'N2', 'm21', 'm12', 'Ts', 'Tsc']\n values = [1, 1, 1, 1, 1, 0.1]\n upper_bounds = [20, 20, 20, 20, 10, 2]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0]\n elif self.modelname == 'IM2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Ts: Time from split to present, in 2*Na generations\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 10, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0.05]\n elif self.modelname == 'AM2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Tam: Time from end of anc migration to split, in 2*Na gens\n # Ts: Time from split to present, in 2*Na generations\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Tam', 'Ts', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 0.1, 1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 2, 10, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05]\n elif self.modelname == 'SC2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Ts: Time from split to secondary contact, in 2*Na generations\n # Tsc: Time from secondary contact to presesnt, in 2*Na gens\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'Tsc', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 10, 2, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05]\n params['Names'] = names\n params['Values'] = values\n params['Upper'] = upper_bounds\n params['Lower'] = lower_bounds\n return params", "def cartesian_parameters(self):\n return self.mu_list, self.k_list", "def _get_params(self):\r\n return np.hstack((self.k1._get_params(), self.k2._get_params()))", "def _get_params(self):\r\n return np.hstack((self.k1._get_params(), self.k2._get_params()))", "def get_model_params(self):\n w1 = self.w1\n b1 = self.b1\n w2 = self.w2\n b2 = self.b2\n w3 = self.w3\n b3 = self.b3\n w4 = self.w4\n b4 = self.b4\n w5 = self.w5\n b5 = self.b5\n w6 = self.w6\n b6 = self.b6\n\n return w1, b1, w2, b2, w3, b3, w4, b4, w5, b5, w6, b6", "def initParms(self):\n self.parmVal = []\n self.parmName = []\n (nvect, npt) = self.data.shape\n if self.model == 0:\n self.parmVal.append(2.0)\n self.parmName.append('Order')\n if self.model == 1:\n self.parmVal.append(1.0)\n self.parmName.append('A')\n self.parmVal.append(1.0)\n self.parmName.append('B')\n if self.model == 2:\n self.parmVal.append(self.data[1][0])\n self.parmName.append('A')\n self.parmVal.append(self.data[1][npt-1])\n self.parmName.append('B')\n if self.model == 3:\n self.parmVal.append(self.data[1][0])\n self.parmName.append('Ao')\n self.parmVal.append(100.0)\n self.parmName.append('Ea')\n if self.model == 4:\n self.parmVal.append(0.001)\n self.parmName.append('A')\n self.parmVal.append(1.0)\n self.parmName.append('B')\n if self.model == 5:\n self.parmVal.append(0.001)\n self.parmName.append('A')\n self.parmVal.append(0.0)\n self.parmName.append('B')\n self.parmVal.append(1.0)\n self.parmName.append('C')\n if self.model == 6:\n self.parmVal.append(self.data[0][0])\n self.parmName.append('xo')\n self.parmVal.append(self.data[1][0])\n self.parmName.append('yo')\n yspan = getSpan(self.data[1])\n if self.data[1][0] > 0.0:\n v = self.data[1][0] + yspan/2.0\n else:\n v = self.data[1][npt-1] + yspan/2.0\n self.parmVal.append(v)\n self.parmName.append('H')\n if self.data[1][0] > self.data[1][npt-1]:\n self.parmVal.append(-1.0)\n else:\n self.parmVal.append(1.0)\n self.parmName.append('S')", "def _get_model_params(self) -> T.List[np.ndarray]:\n layers = {\n layer.name: numpy_helper.to_array(layer)\n for layer in self.onnx_model.graph.initializer\n }\n\n param_names = [\n \"imageinput_Mean\",\n \"conv_1_W\",\n \"conv_1_B\",\n \"batchnorm_1_mean\",\n \"batchnorm_1_var\",\n \"batchnorm_1_scale\",\n \"batchnorm_1_B\",\n \"conv_2_W\",\n \"conv_2_B\",\n \"batchnorm_2_mean\",\n \"batchnorm_2_var\",\n \"batchnorm_2_scale\",\n \"batchnorm_2_B\",\n \"conv_3_W\",\n \"conv_3_B\",\n \"batchnorm_3_mean\",\n \"batchnorm_3_var\",\n \"batchnorm_3_scale\",\n \"batchnorm_3_B\",\n \"fc_1_W\",\n \"fc_1_B\",\n \"fc_2_W\",\n \"fc_2_B\",\n \"fc_3_W\",\n \"fc_3_B\",\n ]\n\n params = [layers[param] for param in param_names]\n return params", "def get_model_parameter_names():\n params = ['mu', 'rho']\n return params", "def _get_model(self):\n\n parameters = {keys._topology:self.topology,\n keys._size:self.size,\n keys._name:self.name,\n #keys._output_activation:self._outActiv_fun_key,\n #keys._hidden_activation:self._hiddenActiv_fun_key,\n keys._learning_rate:self.learningRate,\n keys._momentum:self.momentum}\n\n return parameters", "def get_model_parameters(self, *model_parameters):\r\n\r\n parameters = {k: v for k, v in zip(self.model_parameter_names, model_parameters)}\r\n\r\n return parameters", "def get_vm_parameters(self):\n return (self.__mu, self.__kappa)", "def def_paramt():\n Zeff = 1.0\n amu = 2.0\n mf = mp*amu\n return Zeff, amu,mf", "def addPppParams(model):\n \n ### GAPDP Parameters ####\n model.addParameter('GAPDP','KmSub2',0.385) # nadp\n model.addParameter('GAPDP','KmProd2',0.202) # nadph\n model.addParameter('GAPDP','kcatF',2.8)\n model.addParameter('GAPDP','kcatR',0)\n\n ### FMETTRS Parameters ###\n model.addParameter('FMETTRS','kcatF',0.45)\n\n ### MTHFC Parameters ###\n model.addParameter('MTHFC','kcatF',185)\n\n #### GHMT2 Paramters ####\n model.addParameter('GHMT2','kcatF',0.0)\n model.addParameter('GHMT2','kcatR',0.0)\n \n #### TKT1 Parameters ####\n model.addParameter('TKT1',rxnFormKey='kcatF',value=20.58)\n model.addParameter('TKT1',rxnFormKey='kcatR',value=0.8)\n \n model.addParameter('TKT1',rxnFormKey='KmSub1',value=0.743) #g3p\n model.addParameter('TKT1',rxnFormKey='KmSub2',value=3.7298) #s7p\n model.addParameter('TKT1',rxnFormKey='KmProd1',value=0.4717) #r5p\n model.addParameter('TKT1',rxnFormKey='KmProd2',value=0.134) #xu5p\n \n #### TKT2 Parameters ####\n model.addParameter('TKT2',rxnFormKey='kcatF',value=26.87)\n model.addParameter('TKT2',rxnFormKey='kcatR',value=1.4)\n \n model.addParameter('TKT2',rxnFormKey='KmSub1',value=0.25) #f6p\n model.addParameter('TKT2',rxnFormKey='KmSub2',value=0.743) #g3p\n model.addParameter('TKT2',rxnFormKey='KmProd1',value=0.0227) #e4p\n model.addParameter('TKT2',rxnFormKey='KmProd2',value=0.134) #xu5p\n \n #### TALA Parameters ####\n model.addParameter('TALA',rxnFormKey='kcatF',value=22.3)\n model.addParameter('TALA',rxnFormKey='kcatR',value=0.54)\n \n model.addParameter('TALA',rxnFormKey='KmSub1',value=0.0401) #e4p\n model.addParameter('TALA',rxnFormKey='KmSub2',value=0.6688) #f6p\n model.addParameter('TALA',rxnFormKey='KmProd1',value=1.9) #g3p\n model.addParameter('TALA',rxnFormKey='KmProd2',value=0.285) #s7p\n\n \n #### Speed up DGSN Pathway ####\n model.addParameter('DGSNK',rxnFormKey='kcatF',value=2.25)\n\n #### Speed up DADN pathway ####\n model.addParameter('PUNP2',rxnFormKey='kcatF',value=13.3)\n\n #### Speed up FBA rxn ####\n #model.addParameter('FBA',rxnFormKey='kcatF',value=64.5)\n\n model.addParameter('RNDR2',rxnFormKey='KmSub1',value=0.24)\n\n \n# #### RPI Parameters ####\n model.addParameter('RPI',rxnFormKey='kcatF',value=10.0)\n model.addParameter('RPI',rxnFormKey='kcatR',value=1.0)\n \n #model.addParameter('RPI',rxnFormKey='KmSub1',value=1.0)\n #model.addParameter('RPI',rxnFormKey='KmProd1',value=1.0)\n \n model.addParameter('FBA',rxnFormKey='KmSub1',value=0.12)\n model.addParameter('FBA',rxnFormKey='KmProd2',value=0.05)\n \n \n model.addParameter('GAPD',rxnFormKey='kcatF',value=442.0) \n model.addParameter('GAPD',rxnFormKey='kcatR',value=73.6) \n \n\n model.addParameter('FBA',rxnFormKey='kcatR',value=12.6)\n \n\n model.addParameter('TPI',rxnFormKey='kcatR',value=67)\n \n model.addParameter('TPI',rxnFormKey='KmSub1',value=0.077)\n model.addParameter('TPI',rxnFormKey='KmProd1',value=0.084) \n \n\n model.addParameter('FBA',rxnFormKey='kcatF',value=21.0)\n \n \n model.addParameter('PGK',rxnFormKey='kcatR',value=3.4)\n \n model.addParameter('PGM',rxnFormKey='KmSub1',value=3.6)\n model.addParameter('PGM',rxnFormKey='KmProd1',value=0.2)\n \n \n model.addParameter('PGK',rxnFormKey='KmSub1',value=0.01)\n model.addParameter('PGK',rxnFormKey='KmProd1',value=0.1)\n \n \n model.addParameter('GAPD',rxnFormKey='KmProd1',value=0.47)\n model.addParameter('GAPD',rxnFormKey='KmProd2',value=0.061)\n \n \n model.addParameter('DRPA',rxnFormKey='kcatR',value=34.0)\n \n model.addParameter('DRPA',rxnFormKey='KmProd1',value=0.267)\n model.addParameter('DRPA',rxnFormKey='KmProd2',value=0.2)\n\n \n model.addParameter('PPM2',rxnFormKey='kcatF',value=173)\n \n model.addParameter('PPM2',rxnFormKey='KmSub1',value=0.013)\n model.addParameter('PPM2',rxnFormKey='KmProd1',value=1.2)\n\n\n\n# print('Updated PPP Parameters')\n\n return", "def setup_pair_register():\n register_param = {}\n si = SI.RegisterImagePair()\n register_param['si'] = si\n register_param['model0_name']= 'affine_map'\n register_param['model1_name']= 'svf_vector_momentum_map'\n\n return register_param", "def _init_params(self):\n self.W_ems = []\n self.b_ems = []\n if self.rank_n_approx:\n W_em1 = self.init_fn[0](self.n_in,\n self.rank_n_approx,\n self.sparsity[0],\n self.scale[0],\n self.rng)\n W_em2 = self.init_fn[0](self.rank_n_approx,\n self.n_hids[0],\n self.sparsity[0],\n self.scale[0],\n self.rng)\n self.W_em1 = theano.shared(W_em1,\n name='W1_0_%s'%self.name)\n self.W_em2 = theano.shared(W_em2,\n name='W2_0_%s'%self.name)\n self.W_ems = [self.W_em1, self.W_em2]\n\n else:\n W_em = self.init_fn[0](self.n_in,\n self.n_hids[0],\n self.sparsity[0],\n self.scale[0],\n self.rng)\n self.W_em = theano.shared(W_em,\n name='W_0_%s'%self.name)\n self.W_ems = [self.W_em]\n\n self.b_em = theano.shared(\n self.bias_fn[0](self.n_hids[0], self.bias_scale[0],self.rng),\n name='b_0_%s'%self.name)\n self.b_ems = [self.b_em]\n\n for dx in range(1, self.n_layers):\n W_em = self.init_fn[dx](self.n_hids[dx-1] / self.pieces[dx],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n self.rng)\n W_em = theano.shared(W_em, name='W_%d_%s'%(dx,self.name))\n self.W_ems += [W_em]\n\n b_em = theano.shared(\n self.bias_fn[dx](self.n_hids[dx], self.bias_scale[dx],self.rng),\n name='b_%d_%s'%(dx,self.name))\n self.b_ems += [b_em]\n\n self.params = [x for x in self.W_ems]\n\n if self.learn_bias and self.learn_bias!='last':\n self.params = [x for x in self.W_ems] + [x for x in self.b_ems]\n elif self.learn_bias == 'last':\n self.params = [x for x in self.W_ems] + [x for x in\n self.b_ems][:-1]\n self.params_grad_scale = [self._grad_scale for x in self.params]\n if self.weight_noise:\n self.nW_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_ems]\n self.nb_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_ems]\n\n self.noise_params = [x for x in self.nW_ems] + [x for x in self.nb_ems]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape)\n for x in self.noise_params]", "def map_name_and_data(cls, onnx_model: onnx.ModelProto):\n params = {}\n for init in onnx_model.graph.initializer:\n params[init.name] = numpy_helper.to_array(init)\n for node in onnx_model.graph.node:\n # If two zero_points are identity, one is a reference to the other\n # after optimized by onnx.\n if node.op_type == 'Identity' and len(node.input) == 1 and \\\n node.input[0] in params:\n params[node.output[0]] = copy.deepcopy(params[node.input[0]])\n if node.op_type == 'Constant':\n for attr in node.attribute:\n if attr.name == 'value':\n params[node.output[0]] = numpy_helper.to_array(attr.t)\n return params", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def pair_regression(): \r\n\r\n #get stats\r\n s_xx, s_yy, s_xy = getPairStats()\r\n \r\n #calculcate coefficients\r\n beta_hat = s_xy / s_xx\r\n alpa_hat = y_bar - beta_hat * x_bar\r\n \r\n return alpa_hat, beta_hat", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def _core_init_params(self) :\n\t\ta_list,b_list = [],[]\n\t\tg_list,h_list = [],[]\n\t\t\n\t\t\n\t\tfor eqnid,eqn in enumerate(self.equations) : \n\t\t\treg_p = self.regressors[eqnid]['prod']\n\t\t\treg_d = self.regressors[eqnid]['degrad']\n\t\t\th_eqn = self.initsol['h'][eqn-1]\n\t\t\tg_eqn = self.initsol['g'][eqn-1]\n\n\n\t\t\ta_list.append(self.initsol['alpha'][eqn-1])\n\t\t\tb_list.append(self.initsol['beta'][eqn-1])\n\t\t\t\n\t\t\tg_eqn = np.array([g_eqn[reg-1] for reg in reg_p])\n\t\t\th_eqn = np.array([h_eqn[reg-1] for reg in reg_d])\n\t\t\th_list.append(h_eqn)\n\t\t\tg_list.append(g_eqn)\n\t\n\t\treturn (a_list,b_list,g_list,h_list)", "def initialize_model_params():\n beta_0 = np.array([0., 0.])\n mu_0 = 0.\n return beta_0, mu_0", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def MILP_1(self,xData,uData):\n print \"\\n\",\"*\"*80,\"\\n\",\"MILP 1: Parameter Estimation\\n\",\"*\"*80 \n model=Model(\"parameters\")\n outflow={}\n d={}\n bigM=500\n Q_out={}\n Q_in={}\n N=max(l[1] for l in xData.keys())\n print \"x Data size is\",N\n N=max(l[1] for l in uData.keys())\n print \"u Data size is\",N\n for l in self.links:\n l.d=model.addVar(lb=0,ub=200,obj=0*l.type==\"road\") \n for t in range(1,N):\n d[l,t]=model.addVar(lb=0,ub=200,obj=1)\n for k in l.outgoing:\n outflow[l,k,t]=model.addVar(lb=0,ub=200)\n self.c[l,k]=model.addVar(lb=20,ub=200)\n self.beta[l,k]=model.addVar(lb=0.2,ub=0.8)\n self.alpha[l,k]=model.addVar(lb=0,ub=1)\n self.M[l,k]=model.addVar(lb=0,ub=200)\n d[\"outflow-1\",l,k,t]=model.addVar(vtype=GRB.BINARY) \n d[\"outflow-2\",l,k,t]=model.addVar(vtype=GRB.BINARY) \n model.update()\n for t in range(1,N):\n for l in self.links:\n if True:\n Q_out[l,t]=LinExpr()\n Q_in[l,t]=LinExpr()\n Q_out[l,t].addConstant(0)\n Q_in[l,t].addConstant(0)\n for k in l.outgoing:\n model.addConstr(outflow[l,k,t]<=self.beta[l,k]*uData[l,t]*xData[l,t])\n model.addConstr(outflow[l,k,t]<=self.M[l,k])\n model.addConstr(outflow[l,k,t]<=self.c[l,k]-self.alpha[l,k]*xData[k,t])\n model.addConstr(outflow[l,k,t]>=self.beta[l,k]*uData[l,t]*xData[l,t]+bigM*d[\"outflow-1\",l,k,t]-bigM)\n model.addConstr(outflow[l,k,t]>=self.M[l,k]+bigM*d[\"outflow-2\",l,k,t]-bigM)\n model.addConstr(outflow[l,k,t]>=self.c[l,k]-self.alpha[l,k]*xData[k,t]-bigM*d[\"outflow-1\",l,k,t]-bigM*d[\"outflow-2\",l,k,t])\n Q_out[l,t].add(outflow[l,k,t])\n for k in l.incoming:\n Q_in[l,t].add(outflow[k,l,t])\n if l.type==\"road\":\n model.addConstr(xData[l,t+1]<=xData[l,t]- Q_out[l,t] + Q_in[l,t] + d[l,t] + l.lambda_arrival) \n else:\n model.addConstr(xData[l,t+1]<=xData[l,t]- uData[l,t]*xData[l,t] + Q_in[l,t] + d[l,t] + l.lambda_arrival)\n for l in self.links:\n sum=LinExpr()\n for k in l.outgoing:\n sum.add(self.beta[l,k])\n model.addConstr(sum>=0)\n \n# J=QuadExpr()\n# for l in self.links:\n# for t in range(1,N):\n# if l.type==\"road\":\n# J.add(d[l,t]*d[l,t])\n# model.setObjective(J)\n model.optimize()\n for l in self.links:\n l.d=l.d.X\n for k in l.outgoing:\n self.beta[l,k]=self.beta[l,k].X\n self.c[l,k]=self.c[l,k].X\n self.alpha[l,k]=self.alpha[l,k].X\n self.M[l,k]=self.M[l,k].X\n for l in self.links:\n for t in range(1,N):\n l.d=max(d[l,t].X,l.d)\n \n \n\n \n if True:\n for t in range(1,N):\n print \"*\"*80,\"time=\",t\n for l in self.links:\n print \"\\n\",l,\"x is\",xData[l,t],\"u is\",uData[l,t],\"x+ is\",xData[l,t+1]\n for k in l.outgoing:\n print k,\"beta:\",self.beta[l,k],\"outflow\",outflow[l,k,t].X", "def asn_parameters(model='spk',**kwargs):\n\n pars = {'rhoe' : 6.5e-4,\n 'Ou' : 0.,\n 'Ku' : 100.,\n 'taue' : 1./60,\n 'Gtot' : 200., # MUST BE in [mM]\n 'Og' : 1.5,\n 'taug' : 30.,\n 'alpha': 0.5\n }\n pars = gu.merge_dicts(pars, gtrelease_parameters(),exocytosis_parameters())\n pars['ICs'] = np.asarray([0.,0.,0.05,0.99]) # [G_A,\\Gamma_S,c,h]\n pars['ICr'] = np.asarray([1,0.,0.,1.]) # [x_S,y_S,u_S,x_A]\n ## User-defined parameters\n pars = gu.varargin(pars, **kwargs)\n ## Takes only the first two elements of ICs in the MF model\n if model=='ave':\n pars['ICs'] = pars['ICs'][:2]\n if 'js' in kwargs:\n pars['js'] = kwargs['js']\n else:\n pars['js'] = pars['rhoe']*pars['Og']*1e3*pars['Gtot']*pars['taue']\n for k,item in pars.iteritems():\n if isscalar(item):\n pars[k] = float(item)\n else:\n pars[k] = array(item,dtype=float)\n # pars['Gtot'] *= 1e3 # Convert to [uM]\n return pars", "def assign_model_parameters(self,xmax,zmax,dh,duration):\n self.model_parameters['xmax']=xmax\n self.model_parameters['zmax']=zmax\n self.model_parameters['dh']=dh\n self.model_parameters['duration']=duration", "def get_iperparams(self):\n\t\treturn (self.D, self.K)", "def model_parameters(self) -> Iterator[Tuple[str, torch.Tensor]]:\n return self._model.named_parameters()", "def parameters(self):\n return {\n 'base':self.base.parameters(),\n 'material':[m.parameters() for m in self.material],\n 'fraction':self.fraction,\n }", "def get_model_params(self):\n\n results = self._model.fit()\n model_params = np.expand_dims(results.params.as_matrix(), 1)\n return model_params" ]
[ "0.60898656", "0.5972986", "0.59530956", "0.59530956", "0.5823548", "0.58039397", "0.5769897", "0.57277864", "0.57197624", "0.5719173", "0.56635916", "0.5620125", "0.56128556", "0.5603739", "0.5603092", "0.5579975", "0.5577034", "0.5563761", "0.5550069", "0.5526836", "0.55154026", "0.5512034", "0.5499642", "0.5480926", "0.54732466", "0.54642284", "0.54630834", "0.546222", "0.54434305", "0.5439691" ]
0.64936644
0
get chat ID and message text of most recent message sent to Bot
def get_last_chat_id_and_text(updates): num_updates = len(updates["result"]) last_update = (num_updates - 1) text = updates["result"][last_update]["message"]["text"] chat_id = updates["result"][last_update]["message"]["chat"]["id"] return text, chat_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_updates():\n url = TELEGRAM_URL + TELEGRAM_TOKEN + '/getUpdates'\n response = requests.get(url).json()\n last_object = response['result'][-1] # -1 = last update\n\n chat_id = last_object['message']['chat']['id']\n message_text = last_object['message']['text']\n message = {\n 'chat_id': chat_id,\n 'text': message_text\n }\n return message", "def get_recent_chat_messages(limit, round_id=None):\n query = _query_messages(round_id)\n messages = query[:limit]\n return [_make_message(msg, user_id, user_name)\n for msg, user_id, user_name in messages]", "def lastMessageReceived():", "def get(self):\n return {\"newest_msg\": newest_msg()}", "def calcLastMessageId(self): \n lastReadMessageId = self.lastReadMessageId # if stilll None read all from that global chat ( add field and add to this field )\n try:\n if(len(self.myMsgs()) > 0):\n last = self.myMsgs().last() \n if(last.globalMsg.id > self.lastReadMessageId): # in locals last id = 0 // \n lastReadMessageId = last.globalMsg.id\n except AttributeError as er:\n print('\\n')\n print('IntermediateLayerForMessaging . lastMessageId error in class ' + self.__class__+ ' and instance '+ self)\n print('\\n')\n print(er)\n print('\\n')\n except Exception as er:\n print('Wierd Unexpected Error')\n print(er)\n self.lastReadMessageId = lastReadMessageId\n self.save()", "def cmd_authed_dcc_getLatestMessage(self, c, e):\n MessageDict = self.__database.getLatestMessage(self.__getUserIdByDCCConnection(c))\n if MessageDict.has_key('from'):\n created = self.getFormattedDate(MessageDict['created'])\n c.privmsg(LATEST_MESSAGE_INTRO)\n c.privmsg(LATEST_MESSAGE_FROM % (MessageDict['from'], created))\n c.privmsg(LATEST_MESSAGE_SUBJECT % MessageDict['subject'])\n c.privmsg(LATEST_MESSAGE_BODY % MessageDict['body'])\n else:\n c.privmsg(NO_LATEST_MESSAGE)", "def _extract_chat_data(message):\n chat = message.chat\n chat_id, chat_type = chat.id, getattr(CHAT_TYPE_NAME, chat.type)\n user_or_group = chat.username if chat_type == CHAT_TYPE.PRIVATE else chat.title\n return chat_id, chat_type, user_or_group, chat.first_name, chat.last_name", "def get_last_conversations(self):\n email_token = auth.current_user()[0]\n user_data, last_messages = self.friend_database.get_conversations(email_token)\n last_messages = [{k:v for k,v in m._asdict().items() if k != \"hidden_to\"} for m in last_messages]\n for i in range(len(last_messages)):\n last_messages[i][\"timestamp\"] = last_messages[i][\"timestamp\"].isoformat()\n response = []\n for i in range(len(last_messages)):\n response.append({\"user\": user_data[i], \"last_message\": last_messages[i]})\n return json.dumps(response), 200", "def chat(request):\n team = Hunt.objects.get(is_current_hunt=True).team_from_user(request.user)\n if request.method == 'POST':\n # There is data in the post request, but we don't need anything but\n # the message because normal users can't send as staff or other teams\n m = Message.objects.create(time=timezone.now(), text=request.POST.get('message'),\n is_response=False, team=team)\n team.num_waiting_messages = 0\n messages = [m]\n else:\n if(team is None):\n return render(request, 'access_error.html', {'reason': \"team\"})\n if(team.hunt.is_locked and not team.is_playtester_team):\n return render(request, 'access_error.html', {'reason': \"hunt\"})\n if request.is_ajax():\n messages = Message.objects.filter(pk__gt=request.GET.get(\"last_pk\"))\n else:\n messages = Message.objects\n messages = messages.filter(team=team).order_by('time')\n\n # The whole message_dict format is for ajax/template uniformity\n rendered_messages = render_to_string('chat_messages.html',\n {'messages': messages, 'team_name': team.team_name})\n message_dict = {team.team_name: {'pk': team.pk, 'messages': rendered_messages}}\n try:\n last_pk = Message.objects.latest('id').id\n except Message.DoesNotExist:\n last_pk = 0\n team.num_waiting_messages = 0\n\n team.save() # Save last_*_message vars\n context = {'message_dict': message_dict, 'last_pk': last_pk}\n if request.is_ajax() or request.method == 'POST':\n return HttpResponse(json.dumps(context))\n else:\n context['team'] = team\n return render(request, 'chat.html', context)", "def when_last_chat_with(self, actor_label):\n query = read_query('trust/when_last_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['time']['value'].split('/')[-1] if response != [] else ''", "def get_messages(cls, max, since=None):\n logging.getLogger('app').debug('Inside Database.get_last_n_messages')\n \n with cls.__get_connection() as cnx:\n return cnx.fetch_all(\"\"\"\n select\n m.message_id,\n m.message_text,\n kestava.to_iso_8601_string(m.created_when) as created_when_formatted,\n m.ref_account_id,\n a.email\n from kestava.messages as m\n left join kestava.accounts as a on m.ref_account_id = a.account_id\n order by\n m.created_when desc,\n m.message_id desc\n limit %s;\"\"\",\n (max,))", "def get_message(self, message_id):\n req_data = [ str(message_id) ]\n return self.request(\"find:Message.stats, Message.content\", req_data)", "def get_chat_title(self, message=None, chat_id = None):\n \n if message is not None:\n data = {'chat_id': message.get('sender',{}).get('chat_id',None)}\n elif chat_id is not None:\n data = {'chat_id': chat_id}\n else:\n raise Exception('[ TDLibUtils.get_chat_title ]: Have to provide either message or chat_id')\n result = self._send_data('getChat',data)\n if result.update: \n return result.update.get('title','')", "def get_most_recent_url(channel_history):\n url = None\n for message in channel_history:\n # ignore all bot messages\n if 'bot_id' in message.keys():\n continue\n contained_urls = extract_urls(message['text'])\n if contained_urls:\n # get most recent URL\n url = contained_urls[-1]\n break\n if url is None:\n raise RuntimeError('I can\\'t find the URL to summarize!')\n return url[1:-1]", "def retrieve_message(channel, message_id):\n\n if not settings.SLACK_TOKEN:\n return {'ok': False, 'error': 'config_error'}\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.conversations_history(channel=channel, latest=message_id, inclusive=True, limit=1)\n assert response['ok'] is True\n return response\n except SlackApiError as e:\n assert e.response['ok'] is False\n return e.response", "def get_last_message(self):\n self.driver_Lock.acquire() #acquire driver lock\n message=self.driver.find_elements_by_class_name(message_class)[-1].text #\n driver_Lock.release()\n return message", "def __chat_id_response(self) -> int:\n try:\n fetch_updates = self.__get_updates()\n return fetch_updates[0]['message']['chat']['id']\n except TimeoutError as tm_err:\n print(tm_err)\n sys.exit(1)", "def get_last_messages(self, count):\n return self.buffer.get_last(count)", "def chat(self):\n return self._get(\"chat\")", "def get_last_text_post(self):\n with self.__connection.cursor() as cursor:\n sql = \"\"\"SELECT * FROM `ow_newsfeed_action`\n WHERE `id`= (SELECT MAX(`id`) FROM `ow_newsfeed_action` WHERE `entityType`=\"user-status\")\n AND `entityType`=\"user-status\"\n \"\"\"\n cursor.execute(sql)\n response = cursor.fetchone()\n data = json.loads(response[\"data\"])[\"status\"]\n return data", "async def _get_timestamp(\n self,\n channel: TextChannel,\n msg_id: int,\n ) -> Optional[datetime]:\n try:\n msg = await channel.fetch_message(msg_id)\n except (NotFound, Forbidden, HTTPException) as err:\n self.logger.error(\"Error fetching message: %s\", err)\n msg = None\n\n return msg if msg is None else msg.created_at", "def get_message(self, mid):\n cursor = self.get_cursor()\n query = 'WITH like_count AS (SELECT mid, COUNT(*) AS likes ' \\\n 'FROM vote WHERE upvote = TRUE GROUP BY mid), ' \\\n 'dislike_count AS (SELECT mid, COUNT(*) AS dislikes ' \\\n 'FROM vote WHERE upvote = FALSE GROUP BY mid),' \\\n 'replies_query AS (SELECT replied_to, array_agg(mid) AS replies_list ' \\\n 'FROM replies INNER JOIN messages ON replies.reply = messages.mid ' \\\n 'GROUP BY replied_to) ' \\\n 'SELECT messages.mid, cid, message, image, COALESCE(likes, 0) AS likes, ' \\\n 'COALESCE(dislikes, 0) AS dislikes, username, ' \\\n \"COALESCE(replies_list, '{}') AS replies \" \\\n 'messages.created_on FROM messages ' \\\n 'LEFT OUTER JOIN like_count ON messages.mid = like_count.mid ' \\\n 'LEFT OUTER JOIN dislike_count ON messages.mid = dislike_count.mid ' \\\n 'LEFT OUTER JOIN photo ON messages.mid = photo.mid ' \\\n 'INNER JOIN users ON messages.uid = users.uid ' \\\n 'LEFT OUTER JOIN replies_query ON messages.mid = replies_query.replied_to ' \\\n 'WHERE messages.mid = %s ORDER BY messages.created_on DESC'\n cursor.execute(query, (mid,))\n messages = cursor.fetchall()\n return messages", "def last(self):\n if len(self._messages) == 0:\n return ''\n else:\n return self.format_message(self._messages[-1])", "def get_chat_message(\n self,\n chat_thread_id, # type: str\n chat_message_id, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"_models.ChatMessage\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.ChatMessage\"]\n error_map = {\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),\n 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),\n 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),\n 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2021-09-07\"\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_chat_message.metadata['url'] # type: ignore\n path_format_arguments = {\n 'endpoint': self._serialize.url(\"self._config.endpoint\", self._config.endpoint, 'str', skip_quote=True),\n 'chatThreadId': self._serialize.url(\"chat_thread_id\", chat_thread_id, 'str'),\n 'chatMessageId': self._serialize.url(\"chat_message_id\", chat_message_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response)\n\n deserialized = self._deserialize('ChatMessage', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def _get_messages(self):\n try:\n messages = self.channel.get_messages(int(self.get_argument('since_timestamp', 0)))\n\n except ValueError as e:\n messages = self.channel.get_messages()\n\n return messages", "def get_longest_conv_by_frontend_id(\n message_id: str, api_client: ApiClient = Depends(deps.get_api_client), db: Session = Depends(deps.get_db)\n):\n pr = PromptRepository(db, api_client)\n message = pr.fetch_message_by_frontend_message_id(message_id)\n conv = pr.fetch_longest_conversation(message.message_tree_id)\n return utils.prepare_conversation(conv)", "def get_latest_email(M):\n data = search_email_by_time(M)\n if data is None:\n return\n print \"Access data succeed\"\n print \"Got data as \", data\n ids = data[0]\n id_list = ids.split()\n if len(id_list) > 0:\n latest_email_id = id_list[-1]\n # search unique id\n rv, data = M.uid('fetch', latest_email_id, \"(RFC822)\")\n if rv != \"OK\":\n print \"Error getting message\"\n return\n # here's the body, which is raw text of the whole email\n # including headers and alternate payloads\n raw_email = data[0][1]\n print \"raw_email is \", raw_email\n # print raw_email\n email_message = email.message_from_string(raw_email)\n print \"To: \", email_message['To'], \"\\n\"\n print \"From: \", email.utils.parseaddr(email_message['From']), \"\\n\"\n # print all headers\n # print email_message.items(), \"\\n\"\n\n # print the body text\n print get_first_text_block(email_message)", "async def get_project_recent_messages(self, workspace):\n await self.client.login(os.environ['DISCORD_BOT_TOKEN'], bot=self.is_bot)\n messages = []\n try:\n channel = await self.get_channel(workspace.project_channel_id)\n async for message in channel.history(limit=5):\n if message.author.bot:\n continue\n messages.append(message.content)\n except HTTPException as error:\n self.logger.critical(\n f\"discord {self.get_project_recent_messages.__name__} request failed for workspace {workspace.id} and raised error: {error.text} (code {error.code})\")\n\n await self.client.logout()\n return messages", "def get_most_recent(self):\n return self.unread()[:5]", "def get_overall_message(self):\r\n return self.overall_message" ]
[ "0.64326054", "0.63464683", "0.63145447", "0.62719935", "0.6155986", "0.6133849", "0.60792387", "0.6071617", "0.60530865", "0.6041852", "0.6027591", "0.60080546", "0.5998038", "0.5993367", "0.59932333", "0.59827834", "0.59706444", "0.59483796", "0.59210986", "0.5912884", "0.5833234", "0.5781361", "0.57717836", "0.5761609", "0.5729309", "0.57033694", "0.5701579", "0.5688216", "0.5681105", "0.56782895" ]
0.7100207
0
calculates highest update ID of all the updates we receive from getUpdates.
def get_last_update_id(updates): update_ids = [] for update in updates["result"]: update_ids.append(int(update["update_id"])) return max(update_ids)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calc_autoid(self):\n maxid = 0\n for i in self.items:\n maxid = max(maxid, i['id'])\n self._autoid = maxid + 1", "def update(self, ids=None):\n handlers = self.dm4l.get_handlers()\n if ids == None:\n ids = handlers.keys()\n assert(isinstance(ids, list))\n max_list = np.zeros(len(ids))\n arg_list = np.zeros(len(ids))\n for i, handler_id in enumerate(ids):\n y = handlers[handler_id].log_data[self.config['value_field']]\n max_list[i] = np.max(y)\n arg_list[i] = np.argmax(y)\n\n max = np.max(max_list)\n id = ids[np.argmax(max_list)]\n arg = arg_list[np.argmax(max_list)]\n if self.config['print']:\n res = [max, arg, id]\n print ' '.join([str(res[x]) for x in self.config['format']])\n return max, arg, id", "def _find_newest_update_by_location(updates: Iterable) -> Iterable:\n d = defaultdict(list)\n for update in updates:\n d[update[\"location\"]].append(update)\n\n for k, v in d.items():\n d[k] = max(v, key=lambda x: x[\"date\"])\n\n return d.values()", "def get_max_delta_id(self):\n result = self.query(sql.get_max_id_from(self.IDCOLNAME, self.delta_table_name))\n # If no events has been replayed, max would return a string 'None'\n # instead of a pythonic None. So we should treat 'None' as 0 here\n if result[0][\"max_id\"] == \"None\":\n return max(0, self.max_id_to_replay_upto_for_good2go)\n elif self.max_id_to_replay_upto_for_good2go != -1:\n return self.max_id_to_replay_upto_for_good2go\n\n return result[0][\"max_id\"]", "def get_max_feat_id(self):\n total_nb = 0\n try:\n with open(os.path.join(self.base_update_path,self.master_update_file),'rt') as master_file:\n # sum up sizes of files in master_file\n for line in master_file:\n statinfo = os.stat(os.path.join(self.hashing_outpath,line.strip()+'_itq_norm_'+str(self.bits_num)))\n total_nb += statinfo.st_size*8/self.bits_num\n except Exception as inst:\n print \"[HasherSwig.get_max_feat_id: error] {}\".format(inst)\n return total_nb", "def find_largest_id():\n max_id_val= 0\n for event in Event.query.all():\n if event.id > max_id_val:\n max_id_val = event.id\n return max_id_val", "def get_update_number( self ):", "def GetLatestIds(self, vals=[], default=None):\n ids = []\n \n for request in vals:\n partName, _, updateDate = request\n if updateDate:\n criteria=[('engineering_code', '=', partName), ('write_date', '>', updateDate)]\n else:\n criteria=[('engineering_code', '=', partName)]\n \n partIds = self.search(criteria, order='engineering_revision')\n if len(partIds) > 0:\n ids.append(partIds[len(partIds) - 1].id)\n return getCleanList(ids)", "def get_max_id(self):\r\n max_id = None\r\n for pid in self.players:\r\n if max_id is None or pid > max_id:\r\n max_id = pid\r\n return max_id", "def get_highest_id(self):\n\n return self.mint.get_highest_id()", "def maxid() -> int:\n pass", "def get_updates():\n global PREVIOUS_NEWEST_STR, UPDATED, WIKIDATA_WB_API, WIKIDATA_WB_PARAMS\n r = requests.get(url=WIKIDATA_WB_API, params=WIKIDATA_WB_PARAMS)\n root = etree.fromstring(r.text)\n seen = 0\n updates = []\n oldest_str = None\n newest_str = None\n for entry in root.iterchildren('{http://www.w3.org/2005/Atom}entry'):\n # print(etree.tostring(entry))\n q = entry.find('{http://www.w3.org/2005/Atom}title').text\n updated_str = entry.find('{http://www.w3.org/2005/Atom}updated').text\n if newest_str is None or updated_str > newest_str:\n newest_str = updated_str\n if oldest_str is None or updated_str < oldest_str:\n oldest_str = updated_str\n updated = dateutil.parser.parse(updated_str)\n if not re.match(r'''Q\\d+$''', q):\n # This is not an updated entity, ignore\n pass\n elif q in UPDATED and UPDATED[q] >= updated:\n # print(\"See %s update already\" % (q))\n seen += 1\n else:\n updates.append(q)\n # print(\"Got %s (updated at %s)\" % (q, updated))\n UPDATED[q] = updated\n print(\"%s: Got %d updates (ignored %d already seen)\" % (datetime.now(), len(updates), seen))\n if oldest_str > PREVIOUS_NEWEST_STR:\n print(\"WARNING: Gap between feed dates from %s to %s\" % (PREVIOUS_NEWEST_STR, oldest_str))\n PREVIOUS_NEWEST_STR = newest_str\n return updates", "def getNextUpdate(self):\n\n return self.get_POW().getNextUpdate()", "def getNextUpdate(self):\n\n return self.get_POW().getNextUpdate()", "def get_last_update_id(client):\r\n f = client.get_object(Bucket=,\r\n Key='last_update.json')['Body']\r\n # f is a StreamingBody object in json, load to retrieve id number\r\n return json.load(f)['id']", "def get_highest_seat_id(seat_ids):\n\n return max(seat_ids)", "def _find_latest_updates():\n query = (\n get_db()\n .collection_group(\"updates\")\n .order_by(\"date\", direction=firestore.Query.DESCENDING)\n )\n for doc_ref in query.stream():\n doc = doc_ref.to_dict()\n location = load_location(doc[\"location\"])\n yield {**location, **doc}", "def _get_last_read_id():\n webservice_url_initial = 'http://' + host_cmr + ':' + str(host_cmr_port) + '/rest/data/invocations/overview?latestReadId=' \\\n + str(MaxIdInDB[0])\n\n print('Web Service Url Initial for Last Read id is ', webservice_url_initial)\n response_summary = requests.get(webservice_url_initial)\n\n data = response_summary.json()\n df = pd.DataFrame(json_normalize(data))\n lastreadid_max = df[['id']].max()\n lastreadid_min = df[['id']].min()\n print('Last Read id VALUE in apm is ', lastreadid_max['id'])\n print('the min id VALUE in apm this json ', lastreadid_min['id'])\n\n if int(lastreadid_max) >= MaxIdInDB[0]:\n print(\"Send data to influx and MaxIDINDB[0] is from \", MaxIdInDB[0], ' to LastReadId:', int(lastreadid_max))\n a = lastreadid_max['id']\n print('a is ', a)\n return a\n time.sleep(1)", "def calcLastMessageId(self): \n lastReadMessageId = self.lastReadMessageId # if stilll None read all from that global chat ( add field and add to this field )\n try:\n if(len(self.myMsgs()) > 0):\n last = self.myMsgs().last() \n if(last.globalMsg.id > self.lastReadMessageId): # in locals last id = 0 // \n lastReadMessageId = last.globalMsg.id\n except AttributeError as er:\n print('\\n')\n print('IntermediateLayerForMessaging . lastMessageId error in class ' + self.__class__+ ' and instance '+ self)\n print('\\n')\n print(er)\n print('\\n')\n except Exception as er:\n print('Wierd Unexpected Error')\n print(er)\n self.lastReadMessageId = lastReadMessageId\n self.save()", "def get_updates(self):\n if update_queue:\n return update_queue.pop()", "def get_last_chat_id_and_text(updates):\r\n num_updates = len(updates[\"result\"])\r\n last_update = (num_updates - 1)\r\n text = updates[\"result\"][last_update][\"message\"][\"text\"]\r\n chat_id = updates[\"result\"][last_update][\"message\"][\"chat\"][\"id\"]\r\n return text, chat_id", "def getMaxId():\n root = fetchHtml(overviewUrl)\n rows = root.cssselect(\"div[class='listItemTitle'] span a\")\n max_id = 0\n \n for row in rows:\n m = re.search(\"puv_id=(\\d+)\", str(row.attrib['href']))\n id = int(m.group(1))\n max_id = max(max_id, id)\n return max_id", "def calculate_greatest(self):\n greatest = 0\n for resourceList in self.loading.values():\n for time, use in resourceList:\n if use > greatest:\n greatest = use\n self.emit(\"greatest_calculated\",greatest)\n return greatest", "def mod_max(self):\n phs = set()\n for k in self.get_fd_part_j(self.id):\n phs.add(self.get_prp_j(k)[0])\n if (1 in phs) and (2 not in phs) and (self.get_prp_j(self.id)[0] != max(phs)):\n self.all_seen = set()\n return max(phs)\n else:\n return self.get_prp_j(self.id)[0]", "def _find_updates(since: str):\n updates = _find_update_docs_since(since)\n newest_update_per_location = _find_newest_update_by_location(updates)\n logger.info(f\"Found {len(newest_update_per_location)} updates since {since}\")\n yield from _gen_updates_to_notify(newest_update_per_location)", "def update_DB(self, iterable, entry_columns, update):\n conn = self.conn\n bulk = []\n old_bulk = []\n list_of_id_db = list()\n list_of_id_atuais = self.lista_atual()\n list_of_id_afastados = self.lista_afastados()\n\n if update:\n list_of_id_db = conn.execute('SELECT id_parlamentar FROM {}.{}'.format(self.schema, self.table))\n list_of_id_db = [tup[0] for tup in list_of_id_db]\n id_row_historic = list(conn.execute('SELECT MAX(id) FROM {}.{}_historic'.format(self.schema, self.table)))[0][0]\n if not id_row_historic:\n id_row_historic = 0\n\n for senador in tqdm(iterable):\n entry = self.fill_entry_senador(senador,entry_columns)\n id_parlamentar = entry['id_parlamentar']\n\n if id_parlamentar in list_of_id_atuais:\n entry['situacao_parlamentar'] = 'atual'\n elif id_parlamentar in list_of_id_afastados:\n entry['situacao_parlamentar'] = 'afastado'\n\n if id_parlamentar in list_of_id_db:\n compare_columns = 'id_parlamentar, nome_completo, nome_parlamentar_atual, forma_tratamento, sexo_parlamentar, data_nascimento, data_falecimento, sigla_uf_origem, endereco_origem, nome_cidade_origem, codigo_estado_civil, endereco_congresso, fone, fax, website, email, profissao, id_camara, id_senado, cpf, titulo_de_eleitor, descricao_participacao'\n\n old_row = conn.execute(\"SELECT {} FROM {}.{} WHERE id_parlamentar='{}'\".format(compare_columns,self.schema, self.table,id_parlamentar))\n old_row = list(old_row)[0]\n new_row = tuple([entry[column] for column in compare_columns.split(', ')])\n\n if old_row != new_row:\n old_entry = copy.deepcopy(entry_columns)\n\n for key in old_entry.keys():\n old_date = conn.execute(\"SELECT {} FROM {}.{} WHERE id_parlamentar='{}'\".format(key,self.schema, self.table,id_parlamentar))\n old_entry[key] = list(old_date)[0][0]\n old_entry['change_date'] = datetime.datetime.today() #capture of change date\n id_row_historic += 1\n old_entry['id'] = id_row_historic\n\n old_bulk.append(old_entry)\n conn.execute(\"DELETE FROM {}.{} WHERE id_parlamentar='{}'\".format(self.schema, self.table,id_parlamentar))\n\n bulk.append(entry)\n else:\n bulk.append(entry)\n\n if len(bulk) > 0:\n df = pd.DataFrame(bulk)\n df.set_index('id_parlamentar', inplace=True)\n print('Adding {} entries to SQL table {}.{}.'.format(len(df),self.schema, self.table))\n df.to_sql(self.table, con=self.conn, schema=self.schema, if_exists='append')\n\n if len(old_bulk) > 0:\n df2 = pd.DataFrame(old_bulk)\n df2.set_index('id_parlamentar', inplace=True)\n historic_table_name = self.table + '_historic'\n print('Adding {} entries to SQL table {}.{}.'.format(len(df2),self.schema, historic_table_name))\n df2.to_sql(historic_table_name, con=self.conn, schema=self.schema, if_exists='append')", "def _update_max(self):\n tmp = self\n while tmp.right is not None:\n tmp = tmp.right\n return tmp.parent.key", "def latest_id(self):\n return self.checkpoints[-1]", "def maxQualifiedIndex(self, indices):\n entry = self.getConfig()\n # the leader keep its own record updated to the newest\n indices[self.datacenter_id] = len(self.log) - 1\n # print('!!!!!', indices)\n if entry['config'] == 'single':\n return sorted([indices[x] for x in entry['data']])[(len(entry['data'])-1)/2]\n maxOld = sorted([indices[x] for x in entry['data'][0]])[(len(entry['data'][0])-1)/2]\n maxNew = sorted([indices[x] for x in entry['data'][1]])[(len(entry['data'][1])-1)/2]\n return min(maxOld, maxNew)", "def __find_max_price(self):\n prices_map = map(\n lambda iceberg: utils.get_actual_penguin_amount(\n self.__game, iceberg),\n self.__game.get_all_icebergs()\n )\n return max(prices_map)" ]
[ "0.6287191", "0.60834044", "0.6005314", "0.59129643", "0.56950474", "0.5664871", "0.55984014", "0.55454165", "0.5540562", "0.545689", "0.5405162", "0.54039353", "0.5351502", "0.5351502", "0.53288186", "0.5293417", "0.5273402", "0.5241545", "0.5217625", "0.5178107", "0.5147148", "0.5094075", "0.5071889", "0.5068997", "0.50604063", "0.5058304", "0.5052146", "0.50368196", "0.5031006", "0.50217164" ]
0.7646868
0
for data in projection axe.projection find and mask the overlaps (more 1/2 the axe.projection range) X, Y either the coordinates in axe.projection or longitudes latitudes Z the data operation one of 'pcorlor', 'pcolormesh', 'countour', 'countourf' if source_projection is a geodetic CRS data is in geodetic coordinates and should first be projected in axe.projection X, Y are 2D same dimension as Z for contour and contourf same dimension as Z or with an extra row and column for pcolor and pcolormesh return ptx, pty, Z
def z_masked_overlap(axe, X, Y, Z, source_projection=None): if not hasattr(axe, 'projection'): return Z if not isinstance(axe.projection, ccrs.Projection): return Z if len(X.shape) != 2 or len(Y.shape) != 2: return Z if (source_projection is not None and isinstance(source_projection, ccrs.Geodetic)): transformed_pts = axe.projection.transform_points( source_projection, X, Y) ptx, pty = transformed_pts[..., 0], transformed_pts[..., 1] else: ptx, pty = X, Y with np.errstate(invalid='ignore'): # diagonals have one less row and one less columns diagonal0_lengths = np.hypot( ptx[1:, 1:] - ptx[:-1, :-1], pty[1:, 1:] - pty[:-1, :-1] ) diagonal1_lengths = np.hypot( ptx[1:, :-1] - ptx[:-1, 1:], pty[1:, :-1] - pty[:-1, 1:] ) to_mask = ( (diagonal0_lengths > ( abs(axe.projection.x_limits[1] - axe.projection.x_limits[0])) / 2) | np.isnan(diagonal0_lengths) | (diagonal1_lengths > ( abs(axe.projection.x_limits[1] - axe.projection.x_limits[0])) / 2) | np.isnan(diagonal1_lengths) ) # TODO check if we need to do something about surrounding vertices # add one extra colum and row for contour and contourf if (to_mask.shape[0] == Z.shape[0] - 1 and to_mask.shape[1] == Z.shape[1] - 1): to_mask_extended = np.zeros(Z.shape, dtype=bool) to_mask_extended[:-1, :-1] = to_mask to_mask_extended[-1, :] = to_mask_extended[-2, :] to_mask_extended[:, -1] = to_mask_extended[:, -2] to_mask = to_mask_extended if np.any(to_mask): Z_mask = getattr(Z, 'mask', None) to_mask = to_mask if Z_mask is None else to_mask | Z_mask Z = np.ma.masked_where(to_mask, Z) return ptx, pty, Z
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getYesPoints(pshapes, proj, dx, nmax, touch_center=True):\n\n mxmin = 9e10\n mxmax = -9e10\n mymin = 9e10\n mymax = -9e10\n for pshape in pshapes:\n pxmin, pymin, pxmax, pymax = pshape.bounds\n if pxmin < mxmin:\n mxmin = pxmin\n if pxmax > mxmax:\n mxmax = pxmax\n if pymin < mymin:\n mymin = pymin\n if pymax > mymax:\n mymax = pymax\n\n if not touch_center:\n geodict = GeoDict.createDictFromBox(mxmin, mxmax, mymin, mymax, dx, dx)\n img = rasterizeShapes(pshapes, geodict)\n #now get the numpy array of x/y coordinates where covgrid == 1\n idx = np.where(img == 1)[0]\n x, y = np.unravel_index(idx, (geodict.ny, geodict.nx))\n yespoints = list(zip(x.flatten(), y.flatten()))\n nrows = geodict.ny\n ncols = geodict.nx\n xvar = np.arange(geodict.xmin, geodict.xmax+geodict.dx, geodict.dx)\n yvar = np.arange(geodict.ymin, geodict.ymax+geodict.dy, geodict.dy)\n else:\n xvar = np.arange(mxmin, mxmax+dx, dx)\n yvar = np.arange(mymin, mymax+dx, dx)\n ncols = len(xvar)\n nrows = len(yvar)\n if nmax is not None:\n if ncols*nrows > nmax:\n aspect = ncols/nrows\n ncols = np.sqrt(nmax*aspect)\n nrows = nmax/ncols\n ncols = int(ncols)\n nrows = int(nrows)\n #re-calculate dx here...\n tdx = (mxmax-mxmin)/ncols\n tdy = (mymax-mymin)/nrows\n dx = np.max(tdx, tdy)\n xvar = np.arange(mxmin, mxmax+dx, dx)\n yvar = np.arange(mymin, mymax+dx, dx)\n\n #Get the \"yes\" points to sample from\n yespoints = []\n idx = []\n shapeidx = 0\n if pshapes[0].type == 'Polygon':\n #loop over shapes, projecting each one, then get the sample points\n for pshape in pshapes:\n if not shapeidx % 1000:\n print('Searching polygon %i of %i' % (shapeidx, len(pshapes)))\n shapeidx += 1\n pxmin, pymin, pxmax, pymax = pshape.bounds\n leftcol = np.where((pxmin - xvar) >= 0)[0].argmax()\n rightcol = np.where((xvar - pxmax) >= 0)[0][0]\n bottomrow = np.where((pymin - yvar) >= 0)[0].argmax()\n toprow = np.where((yvar - pymax) >= 0)[0][0]\n xp = np.arange(xvar[leftcol], xvar[rightcol]+dx, dx)\n yp = np.arange(yvar[bottomrow], yvar[toprow]+dx, dx)\n xmesh, ymesh = np.meshgrid(xp, yp)\n xy = list(zip(xmesh.flatten(), ymesh.flatten()))\n for point in xy:\n ix = np.where(xvar == point[0])[0][0]\n iy = np.where(yvar == point[1])[0][0]\n if pshape.contains(Point(point)):\n yespoints.append(point)\n idx.append(np.ravel_multi_index((iy, ix), (nrows, ncols), mode='raise', order='C'))\n else:\n yespoints = []\n for pshape in pshapes:\n yespoints.append(pshape.coords[0])\n\n return (np.array(yespoints), nrows, ncols, xvar, yvar, idx)", "def do_intensity_projection(points, proj_W , proj_H, proj_fov_up, proj_fov_down, fn, idx):\n\n # print(points.shape)\n\n points = points[points.any(axis=1)]\n\n proj_range = np.zeros((proj_H, proj_W),\n dtype=np.float64)\n\n # unprojected range (list of depths for each point)\n unproj_range = np.zeros((0, 1), dtype=np.float32)\n\n # projected point cloud xyz - [H,W,3] xyz coord (-1 is no data)\n proj_xyz = np.full((proj_H, proj_W, 4), -1,\n dtype=np.float32)\n\n # projected remission - [H,W] intensity (-1 is no data)\n proj_remission = np.full((proj_H, proj_W), -1,\n dtype=np.float32)\n\n # projected index (for each pixel, what I am in the pointcloud)\n # [H,W] index (-1 is no data)\n proj_idx = np.full((proj_H, proj_W), -1,\n dtype=np.int32)\n\n # for each point, where it is in the range image\n proj_x = np.zeros((0, 1), dtype=np.int32) # [m, 1]: x\n proj_y = np.zeros((0, 1), dtype=np.int32) # [m, 1]: y\n\n # mask containing for each pixel, if it contains a point or not\n proj_mask = np.zeros((proj_H, proj_W),\n dtype=np.int32) # [H,W] mask\n\n\n\n\n # laser parameters\n fov_up = proj_fov_up / 180.0 * np.pi # field of view up in rad\n fov_down = proj_fov_down / 180.0 * np.pi # field of view down in rad\n fov = abs(fov_down) + abs(fov_up) # get field of view total in rad\n\n\n \n depth = np.linalg.norm(points[:,:3], 2, axis=1)\n\n # print(points[:10,:])\n \n\n # get scan components\n scan_x = points[:, 0]\n scan_y = points[:, 1]\n scan_z = points[:, 2]\n\n # get angles of all points\n yaw = -np.arctan2(scan_y, scan_x) \n pitch = np.arcsin(scan_z / depth)\n\n # get projections in image coords\n proj_x = 0.5 * (yaw / np.pi + 1.0) # in [0.0, 1.0]\n proj_y = 1.0 - (pitch + abs(fov_down)) / fov # in [0.0, 1.0]\n\n proj_x = np.nan_to_num(proj_x)\n\n proj_y = np.nan_to_num(proj_y)\n # scale to image size using angular resolution\n proj_x *= proj_W # in [0.0, W]\n proj_y *= proj_H # in [0.0, H]\n\n \n \n\n # round and clamp for use as index\n proj_x = np.floor(proj_x)\n proj_x = np.minimum(proj_W - 1, proj_x)\n proj_x = np.maximum(0, proj_x).astype(np.int32) # in [0,W-1]\n proj_x = np.copy(proj_x) # store a copy in orig order\n\n proj_y = np.floor(proj_y)\n proj_y = np.minimum(proj_H - 1, proj_y)\n proj_y = np.maximum(0, proj_y).astype(np.int32) # in [0,H-1]\n\n proj_y = np.copy(proj_y) # stope a copy in original order\n\n\n # # copy of depth in original order\n # unproj_range = np.copy(depth)\n\n # indices = np.arange(depth.shape[0])\n # order = np.argsort(depth)[::-1]\n # depth = depth[order]\n # indices = indices[order]\n # points = points[order]\n\n # proj_y = proj_y[order]\n # proj_x = proj_x[order]\n \n\n if DATASET_TYPE == \"kitti\":\n intensities = points[:,3]\n print(\"kitti\")\n # intensities = np.minimum(intensities, 1000)\n # i_min = intensities.min()\n # i_max = intensities.max()\n # intensities = (intensities - i_min)/(i_max - i_min)\n\n\n\n if DATASET_TYPE == \"mulran\" or DATASET_TYPE == \"mulran2\":\n intensities = points[:,3]\n intensities = np.minimum(intensities, 1000)\n i_min = intensities.min()\n i_max = intensities.max()\n \n intensities = (intensities - i_min)/(i_max - i_min)\n\n if DATASET_TYPE == \"dso\":\n \n \n \n intensities = points[:,4]\n \n\n minval = np.percentile(intensities, 2)\n maxval = np.percentile(intensities, 98)\n intensities = np.clip(intensities, minval, maxval)\n # intensities = np.maximum(intensities, 5000)\n # intensities = np.sqrt(intensities)\n\n \n\n\n \n i_min = intensities.min()\n i_max = intensities.max()\n\n intensities = (intensities - i_min)/(i_max - i_min)\n\n \n\n\n \n\n \n\n \n \n\n\n \n \n \n \n \n pixel_tracker = {}\n pc_tracker = {}\n # print(proj_x.shape)\n # print(scan_x.shape)\n\n \n proj_3d_corres = np.zeros((proj_H, proj_W, 3),\n dtype=np.float64)\n\n # print(proj_x[:20])\n # print(proj_y[:70])\n \n \n for i in range(proj_x.shape[0]):\n x_val = proj_x[i]\n y_val = proj_y[i]\n\n \n\n if proj_range[y_val, x_val] != 0:\n continue\n\n \n \n intensity = intensities[i]\n \n \n \n \n proj_range[y_val, x_val] = intensity\n \n proj_3d_corres[y_val,x_val, :] = np.array([scan_x[i], scan_y[i], scan_z[i]])\n \n\n\n \n proj_range *= 255\n\n\n \n \n \n \n proj_range = np.array(proj_range, dtype=np.uint8)\n\n \n newPicPath = None\n\n\n \n\n\n img = Image.fromarray(proj_range, 'L')\n pc_name = fn.split('.')[0]\n newPicPath = os.path.join(CURRENT_DIR, \"intensity_images\", \"mulran_\" + (str(idx)) + \".png\")\n img.save(newPicPath)\n\n\n return newPicPath, proj_3d_corres, proj_range", "def projectionManip(*args, fitBBox: bool=True, projType: int=0, switchType: bool=True, q=True,\n query=True, **kwargs)->Union[None, Any]:\n pass", "def test_project(self):\n import itertools\n from numpy import array, dot\n from numpy.linalg import det\n\n # our little magic constant\n magic = 0.33377777373737737777\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 1 + magic, -1 - magic):\n \n s = space(curvature=k)\n\n # test line preserving projection\n # 3 points are colinear when\n # | x1 y1 1 |\n # | x2 y2 1 | = 0\n # | x3 y3 1 |\n # let's test this!\n\n for p, q in itertools.permutations((\n (1, 0),\n (3/5, 4/5),\n (-5/13, 12/13),\n (-8/17, -15/17),\n ), 2):\n p = s.make_point(p, magic)\n q = s.make_point(q, magic)\n u = p.project(projection_types.preserve_lines)\n v = (p+q).project(projection_types.preserve_lines)\n w = (p+(-magic)*q).project(projection_types.preserve_lines)\n d = det([[*u, 1],[*v, 1],[*w, 1]])\n self.assertTrue(abs(d) < 1e-9)\n\n # test angle preserving projection\n # map will be conformal, so we do like a secant test\n\n delta = 1e-9\n vi = s.make_point((1, 0, 0), delta)\n vj = s.make_point((0, 1, 0), delta)\n vk = s.make_point((0, 0, 1), delta)\n for p in (\n (1, 0, 0),\n (0, 3/5, 4/5),\n (-5/13, 12/13, 0),\n (2/11, 6/11, 9/11),\n (3/7, 6/7, 2/7)\n ):\n p = s.make_point(p, magic)\n pp = p.project(projection_types.preserve_angles)\n pi, pj, pk = (array((p+v).project(projection_types.preserve_angles)) - pp for v in (vi, vj, vk))\n # should stay orthogonal and same size\n # note that we're doing a secant thing so it's only approximate\n # thus we set a relatively high tolerance\n self.assertTrue(isclose(\n dot(pi, pi),\n dot(pj, pj),\n rel_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pi),\n dot(pk, pk),\n rel_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pj),\n 0,\n abs_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pk),\n 0,\n abs_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pj, pk),\n 0,\n abs_tol = 1e-6\n ))", "def FindLocationCornerCoastGrid(landmask, coastgrids, fieldset, A = False):\n \n fU = fieldset.U\n \n \n \"\"\"\n If we're using this function for island A, we set A = True. Then the longitude matrix will be\n shorten one row. This is needed because the island coast grids are determined in four seperate parts.\n However, the following lines of code will determine the longitudes and latitudes of the given fieldset. These will\n be used to set the correct longitudes and latitudes to coastgrids.\n \"\"\"\n \n \n if A == False:\n lon = np.array(fU.lon[:]) \n lat = np.array(fU.lat[:])\n \n if A == True:\n lon = np.array(fU.lon[:]) \n lat = np.array(fU.lat[1:])\n \n \n \n \"\"\"\n Here we make the lon and lat matrix, which will be filled with the longitude and latitudes \n defined above. These lon lat matrices will have the same shape as the coastgrids mask.\n \"\"\"\n lon_matrix = np.zeros((landmask.shape[0], landmask.shape[1]))\n lat_matrix = np.zeros((landmask.shape[0], landmask.shape[1]))\n \n for i in range(landmask.shape[0]):\n lon_matrix[i,:] = lon\n \n for j in range(landmask.shape[1]):\n lat_matrix[:,j] = lat\n \n \"\"\"\n Finally, we will fill to list with the start longitude and latitudes. We're looping through\n the coastgrids mask and lon lat matrices, when a cell in the coastgrids mask is 1, we will append the corresponding lon and \n lat (from the lon lat matrix we're also looping throug) in the list. \n \"\"\"\n startlon_list = []\n startlat_list = []\n \n for i in range(landmask.shape[0]):\n for j in range(landmask.shape[1]):\n if coastgrids[i,j] == 1:\n startlon_list.append(lon_matrix[i,j])\n startlat_list.append(lat_matrix[i,j])\n \n\n return startlon_list, startlat_list", "def proj(self,lon,lat):\n x, y = self(np.atleast_1d(lon),np.atleast_1d(lat))\n x[x > 1e29] = None\n y[y > 1e29] = None\n #return np.ma.array(x,mask=x>1e2),np.ma.array(y,mask=y>1e2)\n return x, y", "def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2", "def _get_grid_cell_indexes(proj, xs, ys, bounding_box):\n # Unpack values from the projection\n eq_rad = proj.semi_major_axis\n polar_rad = proj.semi_minor_axis\n h = proj.perspective_point_height + eq_rad\n lon0 = proj.longitude_of_projection_origin\n \n # Unpack values from the area we want to grab the data\n min_lat, min_lon = bounding_box.sw_corner()\n max_lat, max_lon = bounding_box.ne_corner()\n \n with np.errstate(invalid='ignore'):\n # Calculate the lat and lon grids\n xs, ys = np.meshgrid(xs, ys)\n a_vals = np.power(np.sin(xs), 2.0) + \\\n np.power(np.cos(xs), 2.0) * (np.power(np.cos(ys), 2.0) + \\\n eq_rad * eq_rad / polar_rad / polar_rad * np.power(np.sin(ys), 2.0))\n b_vals = -2 * h * np.cos(xs) * np.cos(ys)\n c_val = h * h - eq_rad * eq_rad\n \n rs = (-b_vals - np.sqrt(np.power(b_vals, 2.0) - 4 * a_vals * c_val)) / (2 * a_vals)\n \n sx = rs * np.cos(xs) * np.cos(ys)\n sy = -rs * np.sin(xs)\n sz = rs * np.cos(xs) * np.sin(ys)\n \n lats = np.arctan((eq_rad *eq_rad * sz) \\\n / (polar_rad * polar_rad * np.sqrt(np.power(h - sx, 2.0) + np.power(sy, 2.0))))\n lats = np.degrees(lats)\n \n lons = np.radians(lon0) - np.arctan(sy / (h - sx))\n lons = np.degrees(lons)\n \n # Flatten the arrays so we get a 1D list of indexes\n lats = lats.flatten()\n lons = lons.flatten()\n \n # Filter out values not in our bounding box\n lats = np.where(np.logical_and(lats >= min_lat, lats <= max_lat))[0]\n lons = np.where(np.logical_and(lons >= min_lon, lons <= max_lon))[0]\n idxs = list(set(lons).intersection(set(lats)))\n \n return idxs", "def safeProj(proj, lon, lat):\n x, y = proj(np.atleast_1d(lon),np.atleast_1d(lat))\n x[x > 1e29] = None\n y[y > 1e29] = None\n #return np.ma.array(x,mask=x>1e2),np.ma.array(y,mask=y>1e2)\n return x, y", "def test_rasters_created_with_projected_srs(self):\n\n # Create test data\n x_ul = 220534 # x value of upper left corner\n y_ul = 827790 # y_value of upper left corner\n numx = 8 # Number of xs\n numy = 5 # Number of ys\n dx = 200\n dy = -200\n\n # Define array where ys are rows and xs columns\n A1 = numpy.zeros((numy, numx))\n\n # Establish coordinates for lower left corner\n y_ll = y_ul - numy * dy\n x_ll = x_ul\n\n # Define pixel centers along each direction\n x = numpy.linspace(x_ll + 0.5, x_ll + numx - 0.5, numx)\n y = numpy.linspace(y_ll + 0.5, y_ll + numy - 0.5, numy)\n\n # Define raster with latitudes going bottom-up (south to north).\n # Longitudes go left-right (west to east)\n for i in range(numy):\n for j in range(numx):\n A1[numy - 1 - i, j] = linear_function(x[j], y[i])\n\n # Throw in a nodata element\n A1[2, 6] = numpy.nan\n\n # Upper left corner\n assert A1[0, 0] == linear_function(x[0], y[4])\n\n # Lower left corner\n assert A1[4, 0] == linear_function(x[0], y[0])\n\n # Upper right corner\n assert A1[0, 7] == linear_function(x[7], y[4])\n\n # Lower right corner\n assert A1[4, 7] == linear_function(x[7], y[0])\n\n # Generate raster object and write\n projection = \"\"\"PROJCS[\"DGN95 / Indonesia TM-3 zone 48.2\",\n GEOGCS[\"DGN95\",\n DATUM[\"Datum_Geodesi_Nasional_1995\",\n SPHEROID[\"WGS 84\",6378137,298.257223563,\n AUTHORITY[\"EPSG\",\"7030\"]],\n TOWGS84[0,0,0,0,0,0,0],\n AUTHORITY[\"EPSG\",\"6755\"]],\n PRIMEM[\"Greenwich\",0,\n AUTHORITY[\"EPSG\",\"8901\"]],\n UNIT[\"degree\",0.01745329251994328,\n AUTHORITY[\"EPSG\",\"9122\"]],\n AUTHORITY[\"EPSG\",\"4755\"]],\n UNIT[\"metre\",1,\n AUTHORITY[\"EPSG\",\"9001\"]],\n PROJECTION[\"Transverse_Mercator\"],\n PARAMETER[\"latitude_of_origin\",0],\n PARAMETER[\"central_meridian\",106.5],\n PARAMETER[\"scale_factor\",0.9999],\n PARAMETER[\"false_easting\",200000],\n PARAMETER[\"false_northing\",1500000],\n AUTHORITY[\"EPSG\",\"23834\"],\n AXIS[\"X\",EAST],\n AXIS[\"Y\",NORTH]]\"\"\"\n\n geotransform = (x_ul, dx, 0, y_ul, 0, dy)\n R1 = Raster(A1, projection, geotransform,\n keywords={'testkwd': 'testval', 'size': 'small'})\n\n # Check string representation of raster class\n assert str(R1).startswith('Raster data')\n assert str(R1.rows) in str(R1)\n assert str(R1.columns) in str(R1)\n\n assert nanallclose(R1.get_data(), A1, rtol=1.0e-12)\n assert nanallclose(R1.get_geotransform(), geotransform,\n rtol=1.0e-12)\n assert 'DGN95' in R1.get_projection()", "def polyContourProjection(*args, caching: bool=True, constructionHistory: bool=True,\n createNewMap: bool=True, flipRails: bool=True, insertBeforeDeformers:\n bool=True, method: Union[int, bool]=0, name: AnyStr=\"\", nodeState:\n Union[int, bool]=0, offset0: Union[float, bool]=0.0, offset1:\n Union[float, bool]=0.0, offset2: Union[float, bool]=0.0, offset3:\n Union[float, bool]=0.0, reduceShear: Union[float, bool]=0.0,\n smoothness0: Union[float, bool]=0.0, smoothness1: Union[float,\n bool]=0.0, smoothness2: Union[float, bool]=0.0, smoothness3:\n Union[float, bool]=0.0, userDefinedCorners: bool=True, uvSetName:\n AnyStr=\"\", worldSpace: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def grid_spherical_decomposed(x, y, z, data, x_i, y_i, z_i, horz_res, missing_value=-32767):\n\n r_map = np.sqrt(x**2.0 + y**2.0) # cartesian radius from map (x,y) center\n az_map = np.arctan2(y,x) #azimuth in the cartesian system. might vary along a ray due to map projection curvature\n vcp = np.fromiter((np.median(az_map[:, i_az, :]) for i_az in range(az_map.shape[1])), np.float32)\n print x.shape\n \n r_i = np.arange(r_map.min(), r_map.max(), horz_res) # cartesian radius from map(x,y) center\n\n # also need to griddata the x, y, z geographic coordinates.\n # decomposed geometry in radar polar coordinates is a not a\n # geophysical coordinate system (it's really a tangent plane\n # coord sys without beam refraction effects), so really there \n # are two xyz systems in play here.\n\n # unless, if by using z and R = np.sqrt(x**2.0 + y**2.0), we remain in a cylinderical \n # system referenced to the map projection in use. I think this is true.\n\n # Interpolate from spherical to cylindrical.\n # Cylindrical system is a different\n # range coordinate than the radar range coordinate.\n az_idx = 1\n cyl_grid_shape = (r_i.shape[0], x.shape[az_idx], z_i.shape[0])\n cyl_grid = np.empty(cyl_grid_shape)\n \n for az_id in range(cyl_grid_shape[az_idx]):\n progress(az_id, cyl_grid_shape[az_idx], 'Gridding along azimuths')\n rhi_r = r_map[:, az_id, :]\n # rhi_y = y[:, az_id, :]\n # R_i = rhir = np.sqrt(x[:, az_id, :]**2.0 + y[:, az_id, :]**2.0)\n rhi_z = z[:, az_id, :]\n rhi_data = data[:, az_id, :]\n \n # input and output coordinates need to be taken from the same coordinate system\n cyl_grid[:, az_id, :] = griddata(rhi_r.flatten(), rhi_z.flatten(), rhi_data.flatten(), r_i, z_i).T\n print \"\\r\" + 'Gridding along azimuths ... done'\n # cyl_grid is r, az, z instead of r, az, el\n \n # get mesh of coordinates for all interpolated radii r_i and along the azimuth\n # since constant radar azimuth might have curvature induced by the map projection\n # it's tricky to do this.\n\n # steps:\n # Do new transform from r,az radar system to map system using r=r_i to get x,y\n # or \n # Just do naive assumption that azimuths are straight and accept the error (used this one)\n \n # interpolate from cylindrical to cartesian.\n grid = np.empty((len(x_i), len(y_i), len(z_i)), dtype=np.float32)\n for z_id in range(z_i.shape[0]):\n progress(z_id, z_i.shape[0], 'Gridding at constant altitude')\n cappi_x = r_i[:, None]*np.cos(vcp[None, :])\n cappi_y = r_i[:, None]*np.sin(vcp[None, :])\n cappi_data = cyl_grid[:,:,z_id]\n \n # input and output coordinates need to be taken from the same coordinate system\n grid_2d = griddata(cappi_x.flatten(), cappi_y.flatten(), cappi_data.flatten(), x_i, y_i).T\n grid[:, :, z_id] = grid_2d\n print \"\\r\" + 'Gridding at constant altitude ... done'\n \n grid[np.isnan(grid)] = missing_value\n \n return grid", "def define_projection(self, region):\n region = {\n \"start_longitude\": region[0],\n \"end_longitude\": region[1],\n \"start_latitude\": region[2],\n \"end_latitude\": region[3],\n }\n projection = \"LambertConformal\"\n plotextend = [\n region[\"start_longitude\"],\n region[\"end_longitude\"],\n region[\"start_latitude\"],\n region[\"end_latitude\"],\n ]\n if projection == \"LambertConformal\":\n # plotextend has to be a little larger so everything is on there\n plotextend = [\n plotextend[0] - 1.0,\n plotextend[1] + 1.0,\n plotextend[2] - 1.0,\n plotextend[3] + 1.0,\n ]\n # path to cut out is exact though\n lons = self.region_to_square(region, \"longitude\")\n lats = self.region_to_square(region, \"latitude\")\n path_ext = [[lon, lat] for lon, lat in zip(lons, lats)]\n path_ext = mpath.Path(path_ext).interpolated(20)\n # South Hemisfere\n if region[\"start_latitude\"] <= 0 and region[\"end_latitude\"] <= 0:\n proj = ccrs.LambertConformal(\n central_longitude=np.sum(plotextend[:2]) / 2.0,\n central_latitude=np.sum(plotextend[2:]) / 2.0,\n cutoff=+30,\n standard_parallels=(-33, -45),\n )\n # North Hemisphere\n else:\n proj = ccrs.LambertConformal(\n central_longitude=np.sum(plotextend[:2]) / 2.0,\n central_latitude=np.sum(plotextend[2:]) / 2.0,\n )\n return proj, path_ext, plotextend", "def _init_projection(self):\n radius = 6370e3\n \n # Spherical latlon used by WRF\n self.latlon_sphere = pyproj.Proj(proj='latlong',\n a=radius, b=radius, towgs84='0,0,0', no_defs=True)\n\n # Lambert Conformal Conic used by WRF\n self.lambert_grid = pyproj.Proj(proj='lcc',\n lat_1=self.truelats[0],\n lat_2=self.truelats[1],\n lat_0=self.ref_latlon[0],\n lon_0=self.stand_lon,\n a=radius, b=radius, towgs84='0,0,0', no_defs=True)\n\n grid_size_i = (self.domain_size[0] - 2) * self.cell_size[0]\n grid_size_j = (self.domain_size[1] - 2) * self.cell_size[1]\n\n grid_center_i, grid_center_j = pyproj.transform(\n self.latlon_sphere, self.lambert_grid,\n self.ref_latlon[1], self.ref_latlon[0])\n \n self.offset_i = grid_center_i - grid_size_i * .5\n self.offset_j = grid_center_j - grid_size_j * .5", "def test_projection_comparisons(self):\n\n # Although the two test datasets have the same projection,\n # this example failed with the message:\n # The reason was that comparison was done with get_projection()\n # rather than the projection objects themselves.\n\n #Projections must be the same: I got\n #GEOGCS[\"GCS_WGS_1984\",DATUM[\"WGS_1984\",\n # SPHEROID[\"WGS_1984\",6378137,298.257223563]],\n # PRIMEM[\"Greenwich\",0],UNIT[\"Degree\",0.017453292519943295]] and\n #GEOGCS[\"WGS 84\",DATUM[\"WGS_1984\",\n # SPHEROID[\"WGS 84\",6378137,298.257223563,\n # AUTHORITY[\"EPSG\",\"7030\"]],TOWGS84[0,0,0,0,0,0,0],\n # AUTHORITY[\"EPSG\",\"6326\"]],PRIMEM[\"Greenwich\",0,\n # AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.01745329251994328,\n # AUTHORITY[\"EPSG\",\"9122\"]],AUTHORITY[\"EPSG\",\"4326\"]]\n\n # Name file names for hazard level and exposure\n hazard_filename = ('%s/rw_jakarta_singlepart.shp' % TESTDATA)\n exposure_filename = ('%s/indonesia_highway_sample.shp' % TESTDATA)\n\n # Read\n H = read_layer(hazard_filename)\n E = read_layer(exposure_filename)\n\n Hp = H.projection\n Ep = E.projection\n msg = 'Projections did not match: %s != %s' % (Hp, Ep)\n assert Hp == Ep, msg", "def esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0):\n if 'glpk' not in solvers.installed_solvers:\n raise Exception(\n \"projection_esp error:\"\n \" Equality set projection requires `cvxopt.glpk` to run.\")\n # Remove zero columns and rows\n nonzerorows = np.nonzero(\n np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0]\n nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0]\n nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0]\n C = CC[nonzerorows, :].copy()\n D = DD[nonzerorows, :].copy()\n C = C[:, nonzeroxcols]\n D = D[:, nonzeroycols]\n b = bb[nonzerorows].copy()\n # Make sure origo is inside polytope\n if not centered:\n xc0, yc0, trans = cheby_center(C, D, b)\n if trans:\n b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten()\n else:\n b = b\n else:\n trans = False\n d = C.shape[1]\n k = D.shape[1]\n if verbose > 0:\n print(\"Projecting from dim \" + str(d + k) + \" to \" + str(d))\n if k == 0:\n # Not projecting\n return C, bb, []\n if d == 1:\n # Projection to 1D\n c = np.zeros(d + k)\n c[0] = 1\n G = np.hstack([C, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n if sol['status'] != \"optimal\":\n raise Exception(\n \"esp: projection to 1D is not full-dimensional, \"\n \"LP returned status \" + str(sol['status']))\n min_sol = np.array(sol['x']).flatten()\n min_dual_sol = np.array(sol['z']).flatten()\n sol = solvers.lpsolve(-c, G, b, solver='glpk')\n if sol['status'] != \"optimal\":\n raise Exception(\n \"esp: projection to 1D is not full-dimensional, \" +\n \"LP returned status \" + str(sol['status']))\n max_sol = np.array(sol['x']).flatten()\n max_dual_sol = np.array(sol['z']).flatten()\n # min, max\n x_min = min_sol[0]\n x_max = max_sol[0]\n y_min = min_sol[range(1, k + 1)]\n y_max = max_sol[range(1, k + 1)]\n if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol):\n # Min case, relax constraint a little to avoid infeasibility\n E_min = unique_equalityset(\n C, D, b, np.array([1.]), x_min + abs_tol / 3, abs_tol=abs_tol)\n else:\n E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0]\n if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol):\n # Max case, relax constraint a little to avoid infeasibility\n E_max = unique_equalityset(\n C, D, b, np.array([1.]), x_max - abs_tol / 3, abs_tol=abs_tol)\n else:\n E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0]\n G = np.array([[1.], [-1.]])\n g = np.array([x_max, -x_min])\n # Relocate\n if trans:\n g = g + np.dot(G, xc0)\n # Return zero cols/rows\n E_max = nonzerorows[E_max]\n E_min = nonzerorows[E_min]\n if verbose > 0:\n print(\n \"Returning projection from dim \" +\n str(d + k) + \" to dim 1 \\n\")\n return G, g, [E_max, E_min]\n E = []\n L = []\n E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol)\n ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose)\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))\n G = af.T\n g = bf\n if verbose > 0:\n print(\"\\nStarting eq set \" + str(E_0) + \"\\nStarting ridges \")\n for rr in L:\n print(str(rr.E_r))\n E.append(E_0)\n while len(L) > 0:\n rid_fac1 = L[0]\n if verbose > 0:\n print(\"\\nLooking for neighbors to \" + str(rid_fac1.E_0) +\n \" and \" + str(rid_fac1.E_r) + \" ..\")\n E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)\n if verbose > 0:\n print(\"found neighbor \" + str(E_adj) +\n \". \\n\\nLooking for ridges of neighbor..\")\n ridge_list = ridge(\n C, D, b, E_adj, a_adj, b_adj,\n abs_tol=abs_tol, verbose=verbose)\n if verbose > 0:\n print(\"found \" + str(len(ridge_list)) + \" ridges\\n\")\n found_org = False\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n E_r = r.E_r\n ar = r.ar\n br = r.br\n found = False\n for j in range(len(L)):\n rid_fac2 = L[j]\n A_r = rid_fac2.E_r\n if len(A_r) != len(E_r):\n continue\n t1 = np.sort(np.array(A_r))\n t2 = np.sort(np.array(E_r))\n if np.sum(np.abs(t1 - t2)) < abs_tol:\n found = True\n break\n if found:\n if verbose > 0:\n print(\"Ridge \" + str(E_r) +\n \" already visited, removing from L..\")\n if rid_fac2 == rid_fac1:\n found_org = True\n L.remove(rid_fac2)\n else:\n if verbose > 0:\n print(\"Adding ridge-facet \" + str(E_adj) +\n \" \" + str(E_r) + \"\")\n L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))\n if not found_org:\n print(\"Expected ridge \" + str(rid_fac1.E_r))\n print(\"but got ridges \")\n for rid in ridge_list:\n print(rid.E_r)\n raise Exception(\n \"esp: ridge did not return neighboring ridge as expected\")\n G = np.vstack([G, a_adj])\n g = np.hstack([g, b_adj])\n E.append(E_adj)\n # Restore center\n if trans:\n g = g + np.dot(G, xc0)\n # Return zero rows\n for Ef in E:\n Ef = nonzerorows[Ef]\n return G, g, E", "def proj(self, X, G):\n raise NotImplementedError", "def CreateLandmask(Fieldset, test = False):\n \n \n \"\"\"\n This first set of lines creates a numpy array with u velocities and a numpy\n array with v velocities. First we get the U and V fields from the dataset. Then\n we compute a time chunk, which is needed because of the dataset. Then we only\n take the first slice of the U and V field (we do not need more for finding the land\n and ocean grids). As last we make an empty array which will be filled with zeros and \n ones.\n \"\"\"\n fU = Fieldset.U\n fV = Fieldset.V\n Fieldset.computeTimeChunk(fU.grid.time[0], 1) \n uvel_mask_c = fU.data[0,:,:] \n vvel_mask_c = fV.data[0,:,:]\n# vvel_mask_c = np.roll(vvel_mask_c, 1, axis = 0)\n landmask = np.zeros((uvel_mask_c.shape[0], uvel_mask_c.shape[1]))\n \n \"\"\"\n The first loop checks the value of the u and v velocitites. Notice that we get the\n values of two adjacent grid, since we're working with a C-grid.\n Visualizations of velocities in the C-grids(see below). So for a grid to be flagged identified\n as a land grid two U velocities and 2 V velocities need to be zero. The first loop makes all\n ocean grids 1 and land grids 0. \n ____ ____ ____ ____\n | V | V | \n | | | \n U T U T U\n | | | \n |____V____|_____V_____| \n \"\"\"\n \n for i in range (len(landmask[:,0])-1):\n for j in range (len(landmask[0,:])-1):\n u1 = uvel_mask_c[i,j]\n\n u2 = uvel_mask_c[i,j+1]\n\n v1 = vvel_mask_c[i,j]\n\n v2 = vvel_mask_c[i+1,j]\n\n if u1 != 0 or u2 != 0 or v1 != 0 or v2 != 0:\n landmask[i,j] = 1\n \n \n \"\"\"\n Change all zero to 1 and rest 0. since we want the land grids to be 1 and ocean\n grids to be 0. \n \"\"\"\n \n landmask = ChangeValues(landmask,0,1) \n \n \"\"\"\n The created landmask needs to be shifted upwards one grid. We will\n use the numpy roll function to do this.\n \"\"\"\n \n if test == True:\n plt.figure()\n plt.imshow(landmask)\n plt.colorbar()\n \n return landmask", "def grid_results(infile, resolution = 0.01, clip_shp = None, \n overwrite=True, contour=False):\n outfile = infile.rstrip('().csv') + '_gridded.tif'\n # if not overwrite:\n if os.path.isfile(outfile):\n if not overwrite:\n print('Not creating file %s as already exists' % outfile)\n print('To re-create file (e.g if inputs changed) set overwrite=True)')\n return\n else:\n try:\n os.remove(outfile)\n os.remove((outfile.rstrip('.tif') + '_clip.tif'))\n except:\n pass\n data = np.genfromtxt(infile, delimiter=',')\n max_lon = max(data[:,0])\n min_lon = min(data[:,0])\n max_lat = max(data[:,1])\n min_lat = min(data[:,1])\n #print max_lon, min_lon, max_lat, min_lat\n xi = np.arange(min_lon, max_lon, resolution)\n yi = np.arange(min_lat, max_lat, resolution)\n XI,YI = np.meshgrid(xi,yi)\n xsize = len(xi)\n ysize = len(yi)\n\n print('Interpolating results')\n gridded_results = griddata((data[:,0],data[:,1]),data[:,2],(XI,YI),method='linear')\n #print gridded_results\n #outfile = infile.rstrip('().csv') + '_gridded.tif'\n print('Writing gridded data to %s' % outfile)\n driver = gdal.GetDriverByName('GTiff')\n ds = driver.Create(outfile, xsize, ysize, 1, gdal.GDT_Float32)\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n ds.SetProjection(srs.ExportToWkt())\n gt = [(min_lon - (resolution/2)), resolution, 0, \n (min_lat - (resolution/2)), 0, resolution]\n ds.SetGeoTransform(gt)\n outband=ds.GetRasterBand(1)\n outband.SetStatistics(np.min(gridded_results), np.max(gridded_results), np.average(gridded_results), np.std(gridded_results))\n outband.WriteArray(gridded_results)\n # Need to close output dataset before we can do clipping\n ds = None\n # now clip by shapefile\n if clip_shp is not None:\n clipfile = outfile.rstrip('.tif') + '_clip.tif'\n cmd = ['gdalwarp',\n '-cutline',\n clip_shp,\n '-crop_to_cutline',\n '-dstalpha',\n outfile,\n clipfile]\n print(cmd)\n call(cmd, shell=False)\n if contour is True:\n cmd = 'gdal_contour -i 1 -off 0.5 %s %s.shp' % (outfile, outfile.rstrip('.tif'))\n print(cmd)\n call(cmd, shell=True)\n cmd = 'gdal_contour -i 1 -off 0.5 %s %s.shp' % (clipfile, clipfile.rstrip('.tif'))\n print(cmd)\n call(cmd, shell=True)", "def __nc_geo_bounds(self, extent, data_sel):\n indx = None\n if extent is not None:\n if len(extent) != 4:\n raise ValueError('parameter extent must have 4 elements')\n\n lats = self.fid['/instrument/latitude_center'][:].reshape(\n self.scanline, self.ground_pixel)\n lons = self.fid['/instrument/longitude_center'][:].reshape(\n self.scanline, self.ground_pixel)\n\n indx = np.where((lons >= extent[0]) & (lons <= extent[1])\n & (lats >= extent[2]) & (lats <= extent[3]))\n data_sel = np.s_[indx[0].min():indx[0].max(),\n indx[1].min():indx[1].max()]\n\n gid = self.fid['/instrument']\n lat_bounds = gid['latitude_corners'][:].data.reshape(\n self.scanline, self.ground_pixel, 4)\n lon_bounds = gid['longitude_corners'][:].data.reshape(\n self.scanline, self.ground_pixel, 4)\n if data_sel is not None:\n lat_bounds = lat_bounds[data_sel + (slice(None),)]\n lon_bounds = lon_bounds[data_sel + (slice(None),)]\n\n return (data_sel, lon_bounds, lat_bounds)", "def _deproject(self, x0, y0, inc, PA, z_func=None, frame='cylindrical'):\n if frame.lower() not in ['cylindrical', 'cartesian']:\n raise ValueError(\"frame must be 'cylindrical' or 'cartesian'.\")\n if z_func is None:\n r, t = self._get_midplane_polar_coords(x0, y0, inc, PA)\n z = np.zeros(r.shape)\n else:\n r, t, z = self._get_flared_coords(x0, y0, inc, PA, z_func)\n if frame == 'cylindrical':\n return r, t, z\n return r * np.cos(t), r * np.sin(t), z", "def projectionX(xdata, ydata, nbins, xrange=None, yrange=None):\n xmin, xmax = (np.min(xdata), np.max(xdata)) if xrange is None else xrange\n ymin, ymax = (np.min(ydata), np.max(ydata)) if yrange is None else yrange\n\n x_out = np.linspace(xmin, xmax, nbins+1)\n y_out = np.empty(nbins)\n dx = np.diff(x_out)[0]\n\n selection = in_range(xdata, xmin, xmax) & in_range(ydata, ymin, ymax)\n xdata, ydata = xdata[selection], ydata[selection]\n for i in range(nbins):\n bin_data = np.extract(in_range(xdata, x_out[i], x_out[i+1]), ydata)\n y_out[i] = bin_data.size\n x_out += dx / 2.\n x_out = x_out[:-1]\n return x_out, y_out", "def extract_region_curvilinear(cube, lat_bounds):\n\n cube = cube.copy() \n \n region_mask = create_region_mask(cube.coord('latitude').points, cube.shape, lat_bounds)\n land_ocean_mask = cube.data.mask\n complete_mask = region_mask + land_ocean_mask\n\n cube.data = numpy.ma.asarray(cube.data)\n cube.data.mask = complete_mask\n\n return cube", "def _get_projection(cls, obj):\n isoverlay = lambda x: isinstance(x, CompositeOverlay)\n opts = cls._traverse_options(obj, 'plot', ['projection'],\n [CompositeOverlay, Element],\n keyfn=isoverlay)\n from_overlay = not all(p is None for p in opts[True]['projection'])\n projections = opts[from_overlay]['projection']\n custom_projs = [p for p in projections if p is not None]\n if len(set(custom_projs)) > 1:\n raise Exception(\"An axis may only be assigned one projection type\")\n return custom_projs[0] if custom_projs else None", "def polyProjection(*args, constructionHistory: bool=True, createNewMap: bool=True,\n imageCenterX: float=0.5, imageCenterY: float=0.5, imageScaleU: float=1,\n imageScaleV: float=1, insertBeforeDeformers: bool=True, keepImageRatio:\n bool=True, mapDirection: AnyStr=\"\", projectionCenterX: float=0.0,\n projectionCenterY: float=0.0, projectionCenterZ: float=0.0,\n projectionScaleU: float=0.0, projectionScaleV: float=0.0, rotateX: float=0.0,\n rotateY: float=0.0, rotateZ: float=0.0, rotationAngle: float=0, seamCorrect:\n bool=True, smartFit: bool=True, type: AnyStr=\"\", uvSetName: AnyStr=\"\",\n **kwargs)->AnyStr:\n pass", "def SH_FindOverlap(xcenter, ycenter, xlength, ylength, xp_corner, yp_corner):\n\n areaClipped = 0.0\n top = ycenter + 0.5 * ylength\n bottom = ycenter - 0.5 * ylength\n\n left = xcenter - 0.5 * xlength\n right = xcenter + 0.5 * xlength\n\n nVertices = 4 # input detector pixel vertices\n MaxVertices = 9\n # initialize xPixel, yPixel to the detector pixel corners.\n # xPixel,yPixel will become the clipped polygon vertices inside the cube pixel\n # xnew,ynew xpixel and ypixel of size MaxVertices\n\n xPixel = []\n yPixel = []\n\n xnew = []\n ynew = []\n\n for j in range(0, 9):\n xnew.append(0.0)\n ynew.append(0.0)\n xPixel.append(0.0)\n yPixel.append(0.0)\n\n\n # Xpixel, YPixel closed (5 corners)\n for i in range(0, 4):\n xPixel[i] = xp_corner[i]\n yPixel[i] = yp_corner[i]\n xPixel[4] = xp_corner[0]\n yPixel[4] = yp_corner[0]\n\n\n for i in range(0, 4): # 0:left, 1: right, 2: bottom, 3: top\n nVertices2 = 0\n for j in range(0, nVertices):\n x1 = xPixel[j]\n y1 = yPixel[j]\n x2 = xPixel[j + 1]\n y2 = yPixel[j + 1]\n condition = calcCondition(i, x1, y1, x2, y2, left, right, top, bottom)\n x = 0\n y = 0\n\n if condition == 1:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2);\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n\n elif condition == 2:\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n elif condition == 3:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2)\n\n#\tcondition == 4: points outside\n# Done looping over J corners\n nVertices2 = addpoint(xnew[0], ynew[0], xnew, ynew, nVertices2) # close polygon\n\n if nVertices2 > MaxVertices:\n raise Error2DPolygon(\" Failure in finding the clipped polygon, nVertices2 > 9 \")\n\n\n nVertices = nVertices2 - 1;\n\n for k in range(0, nVertices2):\n xPixel[k] = xnew[k]\n yPixel[k] = ynew[k]\n\n# done loop over top,bottom,left,right\n nVertices = nVertices + 1\n\n\n if nVertices > 0:\n areaClipped = FindAreaPoly(nVertices, xPixel, yPixel);\n\n\n return areaClipped;", "def Projected(InputFilePath,OutputFilePath): # perform a reproject raster onto a DEM and return\r\n try:\r\n print(\"\"\"\r\nReprojecting Raster...\r\n \"\"\") \r\n \r\n arcpy.ProjectRaster_management(in_raster=InputFilePath,out_raster=OutputFilePath,out_coor_system=\"PROJCS['NAD_1983_StatePlane_California_III_FIPS_0403',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Lambert_Conformal_Conic'],PARAMETER['False_Easting',2000000.0],PARAMETER['False_Northing',500000.0],PARAMETER['Central_Meridian',-120.5],PARAMETER['Standard_Parallel_1',37.06666666666667],PARAMETER['Standard_Parallel_2',38.43333333333333],PARAMETER['Latitude_Of_Origin',36.5],UNIT['Meter',1.0]]\",resampling_type=\"NEAREST\",cell_size=\"27.8165597364916 27.8165597364914\",geographic_transform=\"WGS_1984_(ITRF00)_To_NAD_1983\",Registration_Point=\"#\",in_coor_system=\"GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]\")\r\n print(\"Complete\")\r\n \r\n except Exception, err: # an error occurred (probably in arcGIS)\r\n raise RuntimeError(\"** Error: Project Raster Failed (\"+str(err)+\")\")", "def Projection(W, TYPE_PROJ = proj_l11ball, ETA = 100, AXIS = 0, ETA_STAR = 100, device = \"cpu\" ): \n \n #global TYPE_PROJ, ETA, ETA_STAR, AXIS, device \n if TYPE_PROJ == 'No_proj':\n W_new = W\n if (TYPE_PROJ == proj_l1ball or TYPE_PROJ == proj_l11ball or TYPE_PROJ == proj_l11ball_line ):\n W_new = TYPE_PROJ(W, ETA, device)\n if TYPE_PROJ == proj_l21ball or TYPE_PROJ == proj_l12ball:\n W_new = TYPE_PROJ(W, ETA, AXIS, device = device)\n if TYPE_PROJ == proj_nuclear:\n W_new = TYPE_PROJ(W, ETA_STAR, device=device)\n return W_new", "def _make_mask(data, mask_bounds):\n # For each set of bounds add to the conditional.\n mask = False\n for lat_bounds, lon_bounds in mask_bounds:\n mask |= _add_to_mask(data, lat_bounds, lon_bounds)\n return mask", "def __call__(self):\n #Check source type and sensor type, then call appripriate methods to \n #generate intermediate data, cascading all the way back to geometry \n #calculation if it wasn't already done.\n #Then return a projection matrix...\n\n # NOTE: returned projection_matrix is a numpy.ndarray\n if isinstance(self.sensors, sensors_module.SensorsEEG):\n projection_matrix = self.eeg_gain()\n elif isinstance(self.sensors, sensors_module.SensorsMEG):\n projection_matrix = self.meg_gain()\n\n return projection_matrix" ]
[ "0.57300425", "0.5697506", "0.56191784", "0.55972964", "0.55415964", "0.55406713", "0.552754", "0.55060947", "0.5489971", "0.5464598", "0.53949124", "0.5393436", "0.5373377", "0.53695416", "0.53547066", "0.53487754", "0.5304495", "0.5297561", "0.5294725", "0.5246394", "0.5229677", "0.5226064", "0.5211097", "0.5187981", "0.51643115", "0.5162225", "0.51584226", "0.51430637", "0.5129771", "0.51279396" ]
0.6373255
0
Ingest a packet and put the flow object into the context of the flow that the packet belongs to.
def ingest_packet(self, pkt, pkt_receive_timestamp): #*** Packet length on the wire: self.packet_length = len(pkt) #*** Read into dpkt: eth = dpkt.ethernet.Ethernet(pkt) eth_src = _mac_addr(eth.src) eth_dst = _mac_addr(eth.dst) eth_type = eth.type #*** We only support IPv4 (TBD: add IPv6 support): if eth_type != 2048: self.logger.error("Non IPv4 packet, eth_type is %s", eth_type) return 0 ip = eth.data self.ip_src = socket.inet_ntop(socket.AF_INET, ip.src) self.ip_dst = socket.inet_ntop(socket.AF_INET, ip.dst) #*** We only support TCP: if ip.p != 6: self.logger.error("Non TCP packet, ip_proto=%s", ip.p) return 0 proto = 'tcp' tcp = ip.data self.tcp_src = tcp.sport self.tcp_dst = tcp.dport self.tcp_seq = tcp.seq self.tcp_acq = tcp.ack self.tcp_flags = tcp.flags self.payload = tcp.data #*** Generate a hash unique to flow for packets in either direction self.fcip_hash = _hash_5tuple(self.ip_src, self.ip_dst, self.tcp_src, self.tcp_dst, proto) #*** Check to see if we already know this identity: db_data = {'hash': self.fcip_hash} self.fcip_doc = self.fcip.find_one(db_data) if not self.fcip_doc: #*** Get flow direction (which way is TCP initiated). Client is #*** the end that sends the initial TCP SYN: if _is_tcp_syn(tcp.flags): self.logger.debug("Matched TCP SYN first pkt, src_ip=%s", self.ip_src) self.client = self.ip_src self.server = self.ip_dst self.packet_direction = 'c2s' self.verified_direction = 'verified-SYN' elif _is_tcp_synack(tcp.flags): self.logger.debug("Matched TCP SYN+ACK first pkt, src_ip=%s", self.ip_src) self.client = self.ip_dst self.server = self.ip_src self.packet_direction = 's2c' self.verified_direction = 'verified-SYNACK' else: self.logger.debug("Unmatch state first pkt, tcp_flags=%s", tcp.flags) self.client = self.ip_src self.server = self.ip_dst self.packet_direction = 'c2s' self.verified_direction = 0 #*** Neither direction found, so add to FCIP database: self.fcip_doc = {'hash': self.fcip_hash, 'ip_A': self.ip_src, 'ip_B': self.ip_dst, 'port_A': self.tcp_src, 'port_B': self.tcp_dst, 'proto': proto, 'finalised': 0, 'packet_count': 1, 'latest_timestamp' : pkt_receive_timestamp, 'packet_timestamps': [pkt_receive_timestamp,], 'tcp_flags': [tcp.flags,], 'packet_lengths': [self.packet_length,], 'client': self.client, 'server': self.server, 'packet_directions': [self.packet_direction,], 'verified_direction': self.verified_direction, 'suppressed': 0} self.logger.debug("FCIP: Adding record for %s to DB", self.fcip_doc) db_result = self.fcip.insert_one(self.fcip_doc) self.packet_count = 1 elif self.fcip_doc['finalised']: #*** The flow is already finalised just increment packet count: self.fcip_doc['packet_count'] += 1 #*** Write updated FCIP data back to database: db_result = self.fcip.update_one({'hash': self.fcip_hash}, {'$set': {'packet_count': self.fcip_doc['packet_count']},}) self.packet_count = self.fcip_doc['packet_count'] else: #*** We've found the flow in the FCIP database, now update it: self.logger.debug("FCIP: found existing record %s", self.fcip_doc) #*** Rate this packet as c2s or s2c direction: if self.client == self.ip_src: self.packet_direction = 'c2s' elif self.client == self.ip_dst: self.packet_direction = 's2c' else: self.packet_direction = 'unknown' #*** Increment packet count. Is it at max?: self.fcip_doc['packet_count'] += 1 self.packet_count = self.fcip_doc['packet_count'] if self.fcip_doc['packet_count'] >= self.max_packet_count: #*** TBD: self.fcip_doc['finalised'] = 1 self.logger.debug("Finalising...") #*** Read suppressed status to variable: self.suppressed = self.fcip_doc['suppressed'] #*** Read verified_direction status to variable: self.verified_direction = self.fcip_doc['verified_direction'] #*** Add packet timestamps, tcp flags etc: self.fcip_doc['latest_timestamp'] = pkt_receive_timestamp self.fcip_doc['packet_timestamps'].append(pkt_receive_timestamp) self.fcip_doc['tcp_flags'].append(tcp.flags) self.fcip_doc['packet_lengths'].append(self.packet_length) self.fcip_doc['packet_directions'].append(self.packet_direction) #*** Write updated FCIP data back to database: db_result = self.fcip.update_one({'hash': self.fcip_hash}, {'$set': {'packet_count': self.fcip_doc['packet_count'], 'finalised': self.fcip_doc['finalised'], 'packet_timestamps': self.fcip_doc['packet_timestamps'], 'tcp_flags': self.fcip_doc['tcp_flags'], 'packet_lengths': self.fcip_doc['packet_lengths'], 'packet_directions': self.fcip_doc['packet_directions'] },}) #*** Tests: self.logger.debug("max_packet_size is %s", self.max_packet_size()) self.logger.debug("max_interpacket_interval is %s", self.max_interpacket_interval()) self.logger.debug("min_interpacket_interval is %s", self.min_interpacket_interval())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, packet):\n pass", "def add(self, packet):\n self.fin_cleanse(packet['ts'])\n\n source_key = '%s-%s' % (packet['ip']['src_addr'], \n packet['tcp']['src_port'])\n # If start of handshake create new conversation\n if packet['tcp']['flags']['SYN'] and not packet['tcp']['flags']['ACK']:\n convo = self.begin_convo(source_key, packet)\n # If not, then this should be part of an existing conversation\n else:\n destination_key = '%s-%s' % (packet['ip']['dst_addr'],\n packet['tcp']['dst_port']) \n # Try to find an appropriate conversation and packet handler\n\n if source_key in self.conv: # Client -> Server\n convo = self.conv[source_key]\n handler = self.handle_in_bound\n elif destination_key in self.conv: # Server -> Client\n convo = self.conv[destination_key]\n handler = self.handle_out_bound\n else:\n # Ignore this packet because I don't know\n # of any such conversation. TODO something?\n return\n # deal with the packet.\n self.update_conversation(convo, packet)\n handler(convo, packet)", "def _handle_PacketIn(self, event):\n msg = of.ofp_packet_out()\n msg.data = event.ofp\n msg.in_port = event.port\n msg.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))\n event.connection.send(msg)", "def process(self, pkt):\n pass", "def _handle_PacketIn (self, event):\n packet = event.parsed # This is the parsed packet data.\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n\n packet_in = event.ofp # The actual ofp_packet_in message.\n \n self.do_firewall(packet, packet_in, event)", "def process_packet(self, in_port, packet):\n \n buf = bytearray(packet)\n for idx in range((len(packet) + 19)/20):\n logging.debug(hexify(buf[20*idx : 20*(idx+1)], 20))\n\n if self.disabled:\n logging.debug(\"Switch is disabled; discarding packet\")\n return\n\n parsed_packet = ParsedPacket(buf, self.metadata)\n logging.debug(\"Processing packet %d from port %d with %s\" % \n (parsed_packet.id, in_port,\n self.first_processor.name))\n self.first_processor.process(parsed_packet)", "def handle_packet(cls, packet: scapypacket):\n pass", "def flow(self, flow):\n\n self._flow = flow", "def _handle_PacketIn (self, event):\n packet = event.parsed # This is the parsed packet data.\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n\n packet_in = event.ofp # The actual ofp_packet_in message.\n self.do_final(packet, packet_in, event.port, event.dpid)", "def _push_packet(self, packet):\n self._read_queue.append((decode(packet), packet))\n\n if self._read_waiter is not None:\n w, self._read_waiter = self._read_waiter, None\n w.set_result(None)", "def _apply_flow_layer(self, input, context, flow_step, is_training):\n raise NotImplementedError", "def give_packet(self, packet, verbose=False, cache=False, tunnel=None, source_sock_addr=None):\n assert isinstance(packet, str)\n assert isinstance(verbose, bool)\n assert isinstance(cache, bool)\n assert tunnel is None, \"TUNNEL property is set using init_socket(...)\"\n assert source_sock_addr is None or isinstance(source_sock_addr, tuple), type(source_sock_addr)\n if verbose:\n logger.debug(\"giving %d bytes\", len(packet))\n if source_sock_addr is None:\n source_sock_addr = self.lan_address\n candidate = Candidate(source_sock_addr, self._tunnel)\n self._dispersy.on_incoming_packets([(candidate, packet)], cache=cache, timestamp=time())\n return packet", "def update_flow(self, flow):\r\n self.flow = flow", "def _add_pkt_into_tcp_stream(self, pcap_packet, num):\n \n # the src is server, remote(dst) is client\n if (pcap_packet.ip.dst == _device_ip): # HUA use ip (not 80 port) as direction judgement\n server_addr = pcap_packet.ip.src\n server_port = pcap_packet.tcp.src_port\n client_addr = pcap_packet.ip.dst\n client_port = pcap_packet.tcp.dst_port\n else:\n server_addr = pcap_packet.ip.dst\n server_port = pcap_packet.tcp.dst_port\n client_addr = pcap_packet.ip.src\n client_port = pcap_packet.tcp.src_port\n socket_tuple = (client_addr, client_port, server_addr, server_port)\n if (socket_tuple not in self.tcp_stream_container):\n self.tcp_stream_container[socket_tuple] = Tcp_stream()\n pcap_packet.tcp.stream_index = self.tcp_stream_container[socket_tuple].stream_index\n self.tcp_stream_container[socket_tuple].pcap_num_list.append(num)", "def push_exg(self, packet):\n _, exg_data = packet.get_data(self.exg_fs)\n self.exg_outlet.push_chunk(exg_data.T.tolist())", "def _handle_PacketIn(self, event):\n\n packet = event.parsed # Packet is the original L2 packet sent by the switch\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n # ignore lldp packets\n if event.parsed.type == ethernet.LLDP_TYPE:\n return\n # act like switch\n packet_in = event.ofp # packet_in is the OpenFlow packet sent by the switch\n self.act_like_switch(packet, packet_in)", "def send(self, packet):\n self._loop.create_task(self.send_coro(packet))", "def process_packet(self, packet, udp_dport=UDP_INT_DST_PORT):\n return", "def packet_in_handler(self, ev):\n msg = ev.msg\n datapath = msg.datapath\n port = msg.match['in_port']\n gateway = self.gateway_get(datapath.id)\n\n if gateway is None:# or gateway.idc_id != CONF.idc_id:\n return\n\n pkt = packet.Packet(msg.data)\n pkt_ethernet = pkt.get_protocol(ethernet.ethernet)\n\n if not pkt_ethernet:\n LOG.info(_LI(\"drop non-ethernet packet\"))\n return\n\n pkt_arp = pkt.get_protocol(arp.arp)\n pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)\n\n if pkt_arp:\n self.packet_arp.run(msg, pkt_ethernet, pkt_arp, gateway)\n elif pkt_ipv4:\n pkt_tp = pkt.get_protocol(tcp.tcp) or \\\n pkt.get_protocol(udp.udp) or \\\n pkt.get_protocol(icmp.icmp)\n\n if pkt.get_protocol(icmp.icmp):\n LOG.error(\"packet-in msg %s %s %s from %s\", datapath.id, pkt_ipv4, pkt_tp, port)\n LOG.debug(\"packet-in msg %s %s %s from %s\", \n datapath.id, pkt_ipv4, pkt_tp, port)\n\n if pkt_tp and port:\n self.packet_ipv4.run(msg, pkt_ethernet, pkt_ipv4, pkt_tp, gateway)\n else:\n LOG.debug(_LI(\"drop non-arp and non-ip packet\"))", "def deploy_flow_entry(self, subnet, outport, dstport):\n if outport is None:\n logger.warning('fail to deploy flow entry, cant find output port for %s', str(subnet))\n return\n\n # match by destination IP address\n match = ofctl_v1_0.to_match(self.dp, {'nw_dst': str(subnet), 'dl_type': '2048', 'nw_proto': '1'})\n \n # rewrite source MAC address with gateway's MAC address\n # rewrite destination MAC address with host's MAC address\n # set output port\n actions = []\n actions.append(self.dp.ofproto_parser.OFPActionSetDlSrc(outport.hw_addr.packed))\n actions.append(self.dp.ofproto_parser.OFPActionSetDlDst(dstport.hw_addr.packed))\n actions.append(self.dp.ofproto_parser.OFPActionOutput(outport.port_no))\n\n mod = self.dp.ofproto_parser.OFPFlowMod(\n datapath = self.dp, match = match,\n priority = 1, cookie = 0, actions = actions,\n idle_timeout = FLOW_IDLE_TIMEOUT,\n hard_timeout = FLOW_HARD_TIMEOUT,\n command = self.dp.ofproto.OFPFC_MODIFY)\n\n # send FlowMod\n self.dp.send_msg(mod)", "def handle_packet(self, packet):\n if self.compression:\n compression_len, packet = ParseVarInt(packet, consume=True)\n\n # if we have compressed data decompress it\n if compression_len != 0:\n packet = zlib.decompress(bytearray(packet))\n\n packet_id, packet = ParseVarInt(packet, consume=True)\n try:\n packet_id = str(self.state(packet_id))\n except ValueError:\n # print(\"Unknown packet ID %s for state %s\" % (hex(packet_id), self.state))\n pass\n\n try:\n func = getattr(self, \"handle_\" + packet_id.split(\".\")[1])\n packet = func(packet=packet)\n assert len(packet) == 0\n except AttributeError:\n # print(\"Unknown packet: %s\" % packet)\n pass", "def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)", "def _send_packet(self, packet: bytes):\n self._transport.sendto(packet, self._caddr)", "def put(self, pkt):\n self.packets_received += 1\n now = self.env.now\n flow_id = pkt.flow_id\n # Update of auxVC for the flow. We assume that vticks is the desired bit time\n # i.e., the inverse of the desired bits per second data rate.\n # Hence we then multiply this value by the size of the packet in bits.\n self.auxVCs[flow_id] = max(\n now, self.auxVCs[flow_id]) + self.vticks[flow_id] * pkt.size * 8.0\n # Lots of work to do here to implement the queueing discipline\n return self.store.put((self.auxVCs[flow_id], pkt))", "def forward(self, srcip, packet): #gets entire packet and srcip of that packet\n # get route to send packet\n best_route = self.get_route(srcip, packet[DEST]) #is a socket\n\n sock = best_route\n\n\n jsonpack = json.dumps(packet)\n sock.sendall(jsonpack.encode())\n # TODO fix src and dest\n return True", "def _add_pkt_into_tcp_stream(self, pcap_packet, num):\n \n if (pcap_packet.tcp.src_port == 80):\n server_addr = pcap_packet.ip.src\n client_addr = pcap_packet.ip.dst\n client_port = pcap_packet.tcp.dst_port\n else:\n server_addr = pcap_packet.ip.dst\n client_addr = pcap_packet.ip.src\n client_port = pcap_packet.tcp.src_port\n socket_tuple = (client_addr, client_port, server_addr, 80)\n if (socket_tuple not in self.tcp_stream_container):\n self.tcp_stream_container[socket_tuple] = Tcp_stream()\n self.tcp_stream_container[socket_tuple].pcap_num_list.append(num)", "def _handle_PacketIn (self, event):\n\n packet = event.parsed\n\n def flood (message = None):\n \"\"\" Floods the packet \"\"\"\n msg = of.ofp_packet_out()\n if time.time() - self.connection.connect_time >= _flood_delay:\n # Only flood if we've been connected for a little while...\n\n if self.hold_down_expired is False:\n # Oh yes it is!\n self.hold_down_expired = True\n #log.info(\"%s: Flood hold-down expired -- flooding\",\n # dpid_to_str(event.dpid))\n\n if message is not None: log.debug(message)\n #log.debug(\"%i: flood %s -> %s\", event.dpid,packet.src,packet.dst)\n # OFPP_FLOOD is optional; on some switches you may need to change\n # this to OFPP_ALL.\n msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))\n else:\n pass\n #log.info(\"Holding down flood for %s\", dpid_to_str(event.dpid))\n msg.data = event.ofp\n msg.in_port = event.port\n self.connection.send(msg)\n\n def drop (duration = None):\n \"\"\"\n Drops this packet and optionally installs a flow to continue\n dropping similar ones for a while\n \"\"\"\n if duration is not None:\n if not isinstance(duration, tuple):\n duration = (duration,duration)\n msg = of.ofp_flow_mod()\n msg.match = of.ofp_match.from_packet(packet)\n msg.idle_timeout = duration[0]\n msg.hard_timeout = duration[1]\n msg.buffer_id = event.ofp.buffer_id\n self.connection.send(msg)\n elif event.ofp.buffer_id is not None:\n msg = of.ofp_packet_out()\n msg.buffer_id = event.ofp.buffer_id\n msg.in_port = event.port\n self.connection.send(msg)\n\n def checkDropList():\n if packet.type == ethpkt.IP_TYPE:\n ip_packet = packet.payload\n if ip_packet.protocol == ippkt.UDP_PROTOCOL or ip_packet.protocol == ippkt.TCP_PROTOCOL:\n if self.client.src_ip == ip_packet.srcip: #2\n self.client.counter += 1\n if str(self.client.counter) in self.client.droplist: #2a\n log.debug(\"Dropping client packet: number %d\" %\n (self.client.counter))\n drop()\n return True\n else:\n return False\n elif self.server.src_ip == ip_packet.srcip: #2\n self.server.counter += 1\n if str(self.server.counter) in self.server.droplist: #2a\n log.debug(\"Dropping server packet: number %d\" %\n (self.server.counter))\n drop()\n return True\n else:\n return False\n else:\n return False\n else:\n return False\n else:\n return False\n self.macToPort[packet.src] = event.port #1\n\n if (checkDropList()):\n return\n\n if not self.transparent: # 3\n if packet.type == packet.LLDP_TYPE or packet.dst.isBridgeFiltered():\n drop() # 3a\n return\n\n if packet.dst.is_multicast:\n flood() # 4a\n else:\n if packet.dst not in self.macToPort: #5\n flood(\"Port for %s unknown -- flooding\" % (packet.dst,)) # 5a\n else:\n port = self.macToPort[packet.dst]\n if port == event.port: #6\n # 6a\n log.warning(\"Same port for packet from %s -> %s on %s.%s. Drop.\"\n % (packet.src, packet.dst, dpid_to_str(event.dpid), port))\n drop(10)\n return\n #7\n #log.debug(\"installing flow for %s.%i -> %s.%i\" %\n # (packet.src, event.port, packet.dst, port))\n msg = of.ofp_packet_out()\n msg.actions.append(of.ofp_action_output(port = port))\n msg.data = event.ofp\n self.connection.send(msg)", "def send(self, payload):\n self.emitter.input(payload)", "def process_packet(self, packet, udp_dport=UDP_INT_DST_PORT):\n raise NotImplemented", "def post_process(self, packet: 'dict[str, Any]') -> 'Schema':\n if self.flags['type'] == Enum_SeedID.IPV6_SOURCE_ADDRESS:\n self.seed = packet.get('src', NoValue)\n return self" ]
[ "0.61794573", "0.58237857", "0.57703227", "0.5730365", "0.5621077", "0.5589428", "0.5550984", "0.55054295", "0.53994757", "0.5372806", "0.53469855", "0.5338839", "0.5329753", "0.5328788", "0.5243158", "0.5215885", "0.52092856", "0.5197102", "0.51952356", "0.5168524", "0.51682425", "0.5161903", "0.5154968", "0.51525605", "0.5150464", "0.5141587", "0.5134653", "0.5097539", "0.50782233", "0.50715953" ]
0.62973535
0
Return the size of the largest packet in the flow (in either direction)
def max_packet_size(self): return max(self.fcip_doc['packet_lengths'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SendPacketsSendSize(self) -> int:", "def __payload_size(self):\n return (\n self.SIZE_LINEUP_ID + self.players_per_lineup * self.SIZE_PLAYER) * self.entries.count()", "def min_packet_size(self):\n return min(self.fcip_doc['packet_lengths'])", "def num_packets(self):\n return int(np.ceil(self.layer.numNodes / self.num_packed_elements / self.num_lmts))", "def graph_data_size_max(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size_max or 0)", "def get_payload_length(packet):\n adaptation_field_len = TS.get_adaptation_field_length(packet)\n return 188 - 4 - adaptation_field_len", "def get_max_transfer_size(ft_handle: FtHandle) -> int:\n max_size = c_uint16()\n result: Ft4222Status = _get_max_transfer_size(\n ft_handle, byref(max_size))\n\n if result != Ft4222Status.OK:\n raise Ft4222Exception(result)\n\n return max_size.value", "def data_flow_positive_node_count_max(self) -> Optional[int]:\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_positive_node_count_max or 0)", "def largest_part_size():\n return usb_part_size(largest_partition())", "def length(self):\n return struct.unpack('<H', self.pkt.payload[6:8])[0]", "def get_step_size(self):\r\n msg = struct.pack('>2B', 56, 6)\r\n response = self.query(msg)\r\n return response[1]", "def get_max_size(self):\n max_size = 0\n file = h5py.File(self.filename, 'r')\n for idx in range(len(self)):\n label = self.labels[idx]\n timestamps_group = file['/'][self.mode + '_timestamps']\n timestamps_dset = timestamps_group[label]\n size = len(timestamps_dset)\n if size > max_size: max_size = size\n file.close()\n return max_size\n\n # max_size = 0\n # for i in range(len(self)):\n # item = self[i][0]\n # if len(item) > max_size:\n # max_size = len(item)\n # return max_size", "def network_byte_length(self) -> int:", "def max_size(self):\n sizes = np.array([m.sum() for m in self.masks])\n return sizes.max()", "def get_frame_size(self):\n return self._frames.shape[-1]", "def length(self):\n return struct.unpack('<H', self.pkt.payload[2:4])[0]", "def length(self):\n return struct.unpack('<B', self.pkt.payload[1:2])[0]", "def len_max(self):\n return 16 + 16 + 8 + 8 + Tools.bin_to_dec(self.get_data_size()) + Tools.bin_to_dec(self.get_verification_size())", "def get_size(self):\n return len(self.get_payload()) + 4", "def _get_hop_limit(self):\n return self.__hop_limit", "def _get_hop_limit(self):\n return self.__hop_limit", "def _get_hop_limit(self):\n return self.__hop_limit", "def _get_hop_limit(self):\n return self.__hop_limit", "def _get_hop_limit(self):\n return self.__hop_limit", "def _get_hop_limit(self):\n return self.__hop_limit", "def length(self):\n return struct.unpack('<B', self.pkt.payload[2:3])[0]", "def length(self):\n return struct.unpack('<B', self.pkt.payload[2:3])[0]", "def highest_value():\n maximum_number = 0\n for i in xrange(length):\n challenger = frames[i]\n if abs(challenger) > maximum_number:\n maximum_number = abs(challenger)\n return maximum_number", "def get_tcp_packet_payload_len_with_options(pkt: dpkt.ethernet.Ethernet) -> int:\n if isinstance(pkt, dpkt.ethernet.Ethernet):\n ip = pkt.data\n elif isinstance(pkt, dpkt.ip.IP):\n ip = pkt\n else:\n return None\n return ip.len - ip.hl * 4 - 20", "def upperLayersSize(self):\n return sys.getsizeof(self.segment)" ]
[ "0.6465985", "0.6387028", "0.63805896", "0.63401634", "0.63100195", "0.6305644", "0.62747097", "0.6207945", "0.6198868", "0.61898345", "0.6187164", "0.61824095", "0.6169856", "0.6160437", "0.6136841", "0.6129285", "0.61256397", "0.6122765", "0.6118093", "0.6095568", "0.6095568", "0.6095568", "0.6095568", "0.6095568", "0.6095568", "0.60930884", "0.60930884", "0.6090581", "0.60825646", "0.60737157" ]
0.7188922
0
Return the size of the smallest packet in the flow (in either direction)
def min_packet_size(self): return min(self.fcip_doc['packet_lengths'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def minimum_size(self):\n # Size in arcsec\n size = self.seeing.minimum_size()\n try:\n # Try using `intrinsic` as an object\n size = max(self.intrinsic.minimum_size(), size)\n except AttributeError:\n pass\n return size", "def graph_data_size_min(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size_min or 0)", "def SendPacketsSendSize(self) -> int:", "def minimum_size(self):\n # TODO: Allow `Source` to understand when this returns None?\n return 3.", "def num_packets(self):\n return int(np.ceil(self.layer.numNodes / self.num_packed_elements / self.num_lmts))", "def data_flow_positive_node_count_min(self) -> Optional[int]:\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_positive_node_count_min or 0)", "def __payload_size(self):\n return (\n self.SIZE_LINEUP_ID + self.players_per_lineup * self.SIZE_PLAYER) * self.entries.count()", "def minimum_size(self):\n return self._minimum_size", "def get_tcp_packet_payload_len(pkt: dpkt.ethernet.Ethernet) -> int:\n if isinstance(pkt, dpkt.ethernet.Ethernet):\n ip = pkt.data\n elif isinstance(pkt, dpkt.ip.IP):\n ip = pkt\n else:\n return None\n return ip.len - (ip.hl * 4 + ip.data.off * 4)", "def get_payload_length(packet):\n adaptation_field_len = TS.get_adaptation_field_length(packet)\n return 188 - 4 - adaptation_field_len", "def get_max_min(block_size):\r\n return (int(block_size / 2), int((block_size - 1) / 2))", "def get_tcp_packet_payload_len_with_options(pkt: dpkt.ethernet.Ethernet) -> int:\n if isinstance(pkt, dpkt.ethernet.Ethernet):\n ip = pkt.data\n elif isinstance(pkt, dpkt.ip.IP):\n ip = pkt\n else:\n return None\n return ip.len - ip.hl * 4 - 20", "def max_packet_size(self):\n return max(self.fcip_doc['packet_lengths'])", "def length(self):\n return struct.unpack('<H', self.pkt.payload[2:4])[0]", "def length(self):\n return struct.unpack('<H', self.pkt.payload[6:8])[0]", "def minContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['smallestContig']", "def getPacketCount(self):\n return 1", "def get_step_size(self):\r\n msg = struct.pack('>2B', 56, 6)\r\n response = self.query(msg)\r\n return response[1]", "def data_flow_steps_min(self) -> Optional[int]:\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)", "def minimum_size(self):\n return self.fwhm*2.", "def get_size(self):\n return len(self.get_payload()) + 4", "def min_disk_size(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"min_disk_size\")", "def network_byte_length(self) -> int:", "def avg_packet_size(self):\n result = 0\n try:\n result = sum(self.fcip_doc['packet_lengths'])/float(len(self.fcip_doc['packet_lengths']))\n except:\n pass\n return result", "def length(self):\n return struct.unpack('<B', self.pkt.payload[1:2])[0]", "def minsize(self):# -> int:\r\n return 0", "def get_response_pdu_size(self):\n count = self.count // 8\n if self.count % 8:\n count += 1\n\n return 1 + 1 + count", "def length(self):\n return struct.unpack('<B', self.pkt.payload[2:3])[0]", "def length(self):\n return struct.unpack('<B', self.pkt.payload[2:3])[0]", "def min_disk_size(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"min_disk_size\")" ]
[ "0.6597096", "0.6466371", "0.64621466", "0.64613277", "0.63413686", "0.6225217", "0.62156874", "0.6203628", "0.6146065", "0.6108415", "0.6102389", "0.6100753", "0.60866076", "0.6086556", "0.60830045", "0.6077884", "0.6074226", "0.6069019", "0.60408837", "0.60278606", "0.59812826", "0.59794474", "0.5968266", "0.59409213", "0.5935508", "0.59007335", "0.58839536", "0.5877336", "0.5877336", "0.5840112" ]
0.7491888
0
Return the size of the average interpacket time interval in the flow (assessed per direction in flow). .
def avg_interpacket_interval(self): avg_c2s = 0 avg_s2c = 0 count_c2s = 0 count_s2c = 0 prev_c2s_idx = 0 prev_s2c_idx = 0 for idx, direction in enumerate(self.fcip_doc['packet_directions']): if direction == 'c2s': count_c2s += 1 if count_c2s > 1: current_ts = self.fcip_doc['packet_timestamps'][idx] prev_ts = self.fcip_doc['packet_timestamps'][prev_c2s_idx] delta = current_ts - prev_ts avg_c2s += delta prev_c2s_idx = idx elif direction == 's2c': count_s2c += 1 if count_s2c > 1: current_ts = self.fcip_doc['packet_timestamps'][idx] prev_ts = self.fcip_doc['packet_timestamps'][prev_s2c_idx] delta = current_ts - prev_ts avg_s2c += delta prev_s2c_idx = idx else: #*** Don't know direction so ignore: pass #*** Return the largest interpacket delay overall: result = 0 try: result = (avg_c2s+avg_s2c)/float(count_s2c+count_c2s-2) except: pass return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def avg_packet_size(self):\n result = 0\n try:\n result = sum(self.fcip_doc['packet_lengths'])/float(len(self.fcip_doc['packet_lengths']))\n except:\n pass\n return result", "def get_naive_size(self) -> int:\n return (self.triples.time_end - self.triples.time_begin + 1).sum()", "def graph_data_size_avg(self) -> float:\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)", "def avg_len(steps):\n lens = []\n for i in steps:\n lens.append(len(i))\n avg = sum(lens) / float(len(lens))\n return avg", "def _interFrameLen(self):\n return np.ceil((self.interFrameDuration * self.sampleRate) / self.downsample) * self.downsample", "def size(self, time):\n if self.start_time <= time <= self.end_time:\n return self.masks[time - self.start_time].sum()\n else:\n return 0", "def mean_num_pkt_in_system(self):\n num_pkt_duration = {}\n entire_duration = 0.0\n product_sum = 0.0\n\n start = int(len(self.system.log_time) / 2.0) - 1\n end = int(len(self.system.log_time) / 3.0 * 2) - 1\n for i in range(start, end):\n dur = self.system.log_time[i+1] - self.system.log_time[i]\n num_pkt = self.system.log_num_pkt_inside[i]\n if num_pkt in num_pkt_duration.keys():\n num_pkt_duration[num_pkt] += dur\n else:\n num_pkt_duration[num_pkt] = dur\n entire_duration += dur\n\n for num_pkt, dur in num_pkt_duration.items():\n product_sum += num_pkt * dur\n return product_sum / entire_duration", "def get_step_size(self):\r\n msg = struct.pack('>2B', 56, 6)\r\n response = self.query(msg)\r\n return response[1]", "def meshsize_avg(self):\n nspans = self.numspans\n support = abs(self.kv[-1] - self.kv[0])\n return support / nspans", "def calc_stepsize(nd, d, p_i, delta_t):\n #return 1.0/((nd+d)*np.max(np.sum(p_i, axis=0)))\n return delta_t/(np.sum(np.sqrt(np.sum(np.square(p_i), axis=1))))", "def chunk_size(self):\r\n return int(self.frame_length * self.sample_rate)", "def compute_size(self):\n length = np.max(np.max(self.positions, axis=1) -\n np.min(self.positions, axis=1))\n return length + 2*self.get_radii().max()", "def cal_length (datalist):\n sortedlist = sorted(datalist, key = lambda x : len(x[1]))\n maxle = len(sortedlist[-1][1])\n minle = len(sortedlist[0][1])\n average = sum([len(x[2]) for x in sortedlist])/len(sortedlist)\n return minle, maxle, average", "def meanContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['meanContig']", "def get_stepsize(ds):\n tube_pos = ds.axes[-1]\n if tube_pos.ndim == 2: #very old data, just take one slice\n tube_pos = tube_pos[0]\n tubesep = abs(tube_pos[0]-tube_pos[-1])/(len(tube_pos)-1)\n tube_steps = ds.axes[0]\n bin_size = abs(tube_steps[0]-tube_steps[-1])/(len(tube_steps)-1)\n pixel_step = int(round(tubesep/bin_size))\n bin_size = tubesep/pixel_step\n #print 'Determined tube separation to be %f, corresponding to %d steps' % (tubesep,pixel_step)\n return bin_size", "def get_size(self):\n cum_size = 0\n for stream in self.__streams.values():\n cum_size += sys.getsizeof(stream)\n for trace in stream:\n cum_size += sys.getsizeof(trace)\n cum_size += sys.getsizeof(trace.stats)\n cum_size += sys.getsizeof(trace.stats.__dict__)\n cum_size += sys.getsizeof(trace.data)\n cum_size += trace.data.nbytes\n # Add one percent buffer just in case.\n return cum_size * 1.01", "def avg_par_length(s):\n pars = s.split('\\n')\n return len(s) / float(len(pars))", "def _determine_step(self):\r\n\r\n step_sizes = np.empty(len(self.h))\r\n\r\n for i in range(step_sizes.shape[0]):\r\n step_sizes[i] = np.mean(np.diff(self.h[i], n=1))\r\n\r\n return np.mean(step_sizes)", "def _get_total_read_size(self):\n if self.read_size:\n read_size = EVENT_SIZE * self.read_size\n else:\n read_size = EVENT_SIZE\n return read_size", "def time_bin_width(self):\n return (self.header.time_gate_stop - self.header.time_gate_start) / \\\n self.num_time_bins()", "def get_avg_sentence_length(self):\n sentences = self.blob.sentences\n average_sentence_length = np.mean(np.array([len(sentence.words) for sentence in sentences]))\n return average_sentence_length", "def calc_stepsize(self):\n # Calculate step size\n step = 1.0/((self.n+self.d)*np.max(np.sum(self.p, axis=0)))\n return step", "def determine_number_of_packets(self):\n self.Ltot = 4. * np.pi * np.sum(self.eta * self.dV)\n self.L = self.Ltot / float(self.Npackets)\n\n self.npackets_cell = (4. * np.pi * self.eta * self.dV /\n self.L).astype(np.int)\n self.npackets_cell_cum_frac = (\n np.cumsum(self.npackets_cell).astype(np.float) /\n np.sum(self.npackets_cell))", "def num_packets(self):\n return int(np.ceil(self.layer.numNodes / self.num_packed_elements / self.num_lmts))", "def get_payload_length(packet):\n adaptation_field_len = TS.get_adaptation_field_length(packet)\n return 188 - 4 - adaptation_field_len", "def avg_extend_time(self):\r\n if self.total_extended:\r\n return self.total_extend_time/self.total_extended\r\n else: return 0", "def num_frames(length, fsize, fshift):\n pad = (fsize - fshift)\n if length % fshift == 0:\n M = (length + pad * 2 - fsize) // fshift + 1\n else:\n M = (length + pad * 2 - fsize) // fshift + 2\n return M", "def SendPacketsSendSize(self) -> int:", "def intervalLen(self):\n return self.end-self.start+1", "def size(self):\n size = 1\n for current_slice in self.slices:\n size *= current_slice.stop - current_slice.start\n return size" ]
[ "0.760965", "0.6611826", "0.6488399", "0.6448079", "0.6321379", "0.63145524", "0.62541467", "0.6233074", "0.62164295", "0.62073046", "0.6184405", "0.6179181", "0.6152177", "0.6146496", "0.6124637", "0.61187994", "0.6073599", "0.60345674", "0.6025252", "0.6019595", "0.6007842", "0.60066247", "0.60052794", "0.5998265", "0.59874606", "0.59573007", "0.5950054", "0.5949446", "0.5922815", "0.5907639" ]
0.7346629
1
Return the size of the largest interpacket time interval in the flow (assessed per direction in flow). .
def max_interpacket_interval(self): max_c2s = 0 max_s2c = 0 count_c2s = 0 count_s2c = 0 prev_c2s_idx = 0 prev_s2c_idx = 0 for idx, direction in enumerate(self.fcip_doc['packet_directions']): if direction == 'c2s': count_c2s += 1 if count_c2s > 1: current_ts = self.fcip_doc['packet_timestamps'][idx] prev_ts = self.fcip_doc['packet_timestamps'][prev_c2s_idx] delta = current_ts - prev_ts if delta > max_c2s: max_c2s = delta prev_c2s_idx = idx elif direction == 's2c': count_s2c += 1 if count_s2c > 1: current_ts = self.fcip_doc['packet_timestamps'][idx] prev_ts = self.fcip_doc['packet_timestamps'][prev_s2c_idx] delta = current_ts - prev_ts if delta > max_s2c: max_s2c = delta prev_s2c_idx = idx else: #*** Don't know direction so ignore: pass #*** Return the largest interpacket delay overall: if max_c2s > max_s2c: return max_c2s else: return max_s2c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maxContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['largestContig']", "def maxTurbulenceSize(self, arr: List[int]) -> int:\n if len(arr) == 1:\n return 1\n ret = 1\n tmp_ret = 0\n last_flag = None\n for i in range(1, len(arr)):\n if arr[i] == arr[i - 1]:\n current_flag = None\n else:\n current_flag = arr[i] > arr[i - 1]\n\n if current_flag is None:\n ret = max(ret, tmp_ret)\n tmp_ret = 1\n elif last_flag is None or last_flag == current_flag:\n ret = max(ret, tmp_ret)\n tmp_ret = 2\n else:\n tmp_ret += 1\n\n last_flag = current_flag\n return max(ret, tmp_ret)", "def max_packet_size(self):\n return max(self.fcip_doc['packet_lengths'])", "def get_payload_length(packet):\n adaptation_field_len = TS.get_adaptation_field_length(packet)\n return 188 - 4 - adaptation_field_len", "def get_naive_size(self) -> int:\n return (self.triples.time_end - self.triples.time_begin + 1).sum()", "def get_max_size(self):\n max_size = 0\n file = h5py.File(self.filename, 'r')\n for idx in range(len(self)):\n label = self.labels[idx]\n timestamps_group = file['/'][self.mode + '_timestamps']\n timestamps_dset = timestamps_group[label]\n size = len(timestamps_dset)\n if size > max_size: max_size = size\n file.close()\n return max_size\n\n # max_size = 0\n # for i in range(len(self)):\n # item = self[i][0]\n # if len(item) > max_size:\n # max_size = len(item)\n # return max_size", "def maximum_element_size_for_length(length):\n\t\n\treturn (2**(7*length)) - 2", "def get_max_min(block_size):\r\n return (int(block_size / 2), int((block_size - 1) / 2))", "def compute_size(self):\n length = np.max(np.max(self.positions, axis=1) -\n np.min(self.positions, axis=1))\n return length + 2*self.get_radii().max()", "def largest_cc_size(ugraph):\n\tconnected = cc_visited(ugraph)\n\tmaxnum = 0\n\tfor content in connected:\n\t\tmaxnum = max(maxnum,len(content))\n\treturn maxnum", "def _interFrameLen(self):\n return np.ceil((self.interFrameDuration * self.sampleRate) / self.downsample) * self.downsample", "def graph_data_size_max(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size_max or 0)", "def highest_value():\n maximum_number = 0\n for i in xrange(length):\n challenger = frames[i]\n if abs(challenger) > maximum_number:\n maximum_number = abs(challenger)\n return maximum_number", "def get_step_size(self):\r\n msg = struct.pack('>2B', 56, 6)\r\n response = self.query(msg)\r\n return response[1]", "def length(a):\n return max(a.shape)", "def size(self, time):\n if self.start_time <= time <= self.end_time:\n return self.masks[time - self.start_time].sum()\n else:\n return 0", "def computeMaxTime(ham: Dict[str, Any]) -> Tuple[float, float]:\n # Find the longest time\n maxNs = 0\n for key in ham[\"control\"]:\n ctrls = ham[\"control\"][key]\n for waveform in ctrls[\"waveforms\"]:\n finalNs = waveform[\"insert_ns\"] + waveform[\"duration_ns\"]\n if maxNs < finalNs:\n maxNs = finalNs\n maxDt = floor(maxNs / ham[\"circuit\"][\"dt\"])\n\n ham[\"circuit\"][\"max_time_dt\"] = maxDt\n ham[\"circuit\"][\"max_time_ns\"] = maxNs\n\n return maxNs, maxDt", "def get_length(self) -> int:\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix.timestamp).total_seconds()", "def largest_cc_size(ugraph):\n if not ugraph:\n return 0\n return max(len(cc) for cc in cc_visited(ugraph))", "def _getLongestLength(self, listOfLists):\n\t\tmax = -1\n\t\tfor list in listOfLists:\n\t\t\tif len(list) > max:\n\t\t\t\tmax = len(list)\n\t\treturn max", "def get_typical_size(workers: List[List[int]]) -> int:\n size = 0\n for worker in workers:\n size = max([size,\n np.abs(worker[2]-worker[0]),\n np.abs(worker[3]-worker[1])])\n \n return size", "def get_frame_size(self):\n return self._frames.shape[-1]", "def find_windowsize(data):\n time = [i[0] for i in data]\n voltage = [i[1] for i in data]\n\n if len(time) != len(voltage):\n total_index_data = len(voltage)\n else:\n total_index_data = min(len(time), len(voltage))\n\n windowsize = round(total_index_data / 6)\n\n return windowsize", "def largest_cc_size(ugraph):\r\n\ttotal_list = cc_visited(ugraph)\r\n\tmax_length_list = []\r\n\tfor each_list in total_list:\r\n\t\tif len(max_length_list) < len(each_list):\r\n\t\t\tmax_length_list = each_list\r\n\treturn len(max_length_list)", "def max_size(self):\n sizes = np.array([m.sum() for m in self.masks])\n return sizes.max()", "def largest_cc_size(ugraph):\n ccomp = cc_visited(ugraph)\n if len(ccomp) == 0:\n return 0\n \n return max([len(s) for s in ccomp])", "def num_packets(self):\n return int(np.ceil(self.layer.numNodes / self.num_packed_elements / self.num_lmts))", "def get_time_chunk_size(ts_ds: Optional[xr.Dataset],\n var_name: str,\n ds_id: str) -> Optional[int]:\n if ts_ds is not None:\n ts_var: Optional[xr.DataArray] = ts_ds.get(var_name)\n if ts_var is not None:\n chunks = ts_var.chunks\n if chunks is None:\n LOG.warning(f'variable {var_name!r}'\n f' in time-chunked dataset {ds_id!r}'\n f' is not chunked')\n return None\n try:\n time_index = ts_var.dims.index('time')\n time_chunks = chunks[time_index]\n except ValueError:\n time_chunks = None\n if not time_chunks:\n LOG.warning(f'no chunks found'\n f' for dimension \\'time\\''\n f' of variable {var_name!r}'\n f' in time-chunked dataset {ds_id!r}')\n return None\n if len(time_chunks) == 1:\n return time_chunks[0]\n return max(*time_chunks)\n else:\n LOG.warning(f'variable {var_name!r} not'\n f' found in time-chunked dataset {ds_id!r}')\n return None", "def _get_max_t(self):\n\n return max([\n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n ])", "def getLargestPatternLength(self):\n return self._patternLimit" ]
[ "0.6667583", "0.66518646", "0.6628894", "0.64285105", "0.6397434", "0.6344896", "0.6285462", "0.62777865", "0.6245625", "0.6201934", "0.6187214", "0.6186099", "0.61607647", "0.6120331", "0.60703766", "0.60529447", "0.6047552", "0.60439914", "0.6037402", "0.60335124", "0.6013759", "0.6009632", "0.600914", "0.60015714", "0.5965846", "0.5949872", "0.5949536", "0.5929916", "0.59248084", "0.59151137" ]
0.71880835
0
Return the size of the smallest interpacket time interval in the flow (assessed per direction in flow) .
def min_interpacket_interval(self): min_c2s = 0 min_s2c = 0 count_c2s = 0 count_s2c = 0 prev_c2s_idx = 0 prev_s2c_idx = 0 for idx, direction in enumerate(self.fcip_doc['packet_directions']): if direction == 'c2s': count_c2s += 1 if count_c2s > 1: current_ts = self.fcip_doc['packet_timestamps'][idx] prev_ts = self.fcip_doc['packet_timestamps'][prev_c2s_idx] delta = current_ts - prev_ts if not min_c2s or delta < min_c2s: min_c2s = delta prev_c2s_idx = idx elif direction == 's2c': count_s2c += 1 if count_s2c > 1: current_ts = self.fcip_doc['packet_timestamps'][idx] prev_ts = self.fcip_doc['packet_timestamps'][prev_s2c_idx] delta = current_ts - prev_ts if not min_s2c or delta < min_s2c: min_s2c = delta prev_s2c_idx = idx else: #*** Don't know direction so ignore: pass #*** Return the smallest interpacket delay overall, watch out for #*** where we didn't get a calculation (don't return 0 unless both 0): if not min_s2c: #*** min_s2c not set so return min_c2s as it might be: return min_c2s elif 0 < min_c2s < min_s2c: return min_c2s else: return min_s2c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_packet_size(self):\n return min(self.fcip_doc['packet_lengths'])", "def minContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['smallestContig']", "def get_naive_size(self) -> int:\n return (self.triples.time_end - self.triples.time_begin + 1).sum()", "def max_interpacket_interval(self):\n max_c2s = 0\n max_s2c = 0\n count_c2s = 0\n count_s2c = 0\n prev_c2s_idx = 0\n prev_s2c_idx = 0\n for idx, direction in enumerate(self.fcip_doc['packet_directions']):\n if direction == 'c2s':\n count_c2s += 1\n if count_c2s > 1:\n current_ts = self.fcip_doc['packet_timestamps'][idx]\n prev_ts = self.fcip_doc['packet_timestamps'][prev_c2s_idx]\n delta = current_ts - prev_ts\n if delta > max_c2s:\n max_c2s = delta\n prev_c2s_idx = idx\n elif direction == 's2c':\n count_s2c += 1\n if count_s2c > 1:\n current_ts = self.fcip_doc['packet_timestamps'][idx]\n prev_ts = self.fcip_doc['packet_timestamps'][prev_s2c_idx]\n delta = current_ts - prev_ts\n if delta > max_s2c:\n max_s2c = delta\n prev_s2c_idx = idx\n else:\n #*** Don't know direction so ignore:\n pass\n #*** Return the largest interpacket delay overall:\n if max_c2s > max_s2c:\n return max_c2s\n else:\n return max_s2c", "def get_payload_length(packet):\n adaptation_field_len = TS.get_adaptation_field_length(packet)\n return 188 - 4 - adaptation_field_len", "def num_packets(self):\n return int(np.ceil(self.layer.numNodes / self.num_packed_elements / self.num_lmts))", "def get_max_min(block_size):\r\n return (int(block_size / 2), int((block_size - 1) / 2))", "def graph_data_size_min(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size_min or 0)", "def _interFrameLen(self):\n return np.ceil((self.interFrameDuration * self.sampleRate) / self.downsample) * self.downsample", "def get_step_size(self):\r\n msg = struct.pack('>2B', 56, 6)\r\n response = self.query(msg)\r\n return response[1]", "def size(self, time):\n if self.start_time <= time <= self.end_time:\n return self.masks[time - self.start_time].sum()\n else:\n return 0", "def calc_size_of_queue(ats, dts):\n\n N = len(ats)\n assert len(dts) == N\n\n ats_inf = np.append(ats, float('inf'))\n dts_inf = np.append(dts, float('inf'))\n\n times = [0.0]\n sizes = [0]\n\n i = 0\n j = 0\n\n while i < N or j < N:\n\n # new arrival\n if ats_inf[i] < dts_inf[j]:\n times.append(ats[i])\n sizes.append(sizes[-1] + 1)\n i += 1\n\n # new departure\n elif ats_inf[i] > dts_inf[j]:\n times.append(dts[j])\n sizes.append(sizes[-1] - 1)\n j += 1\n\n # simultaneous arrival and departure\n else:\n i += 1\n j += 1\n\n assert np.all(np.array(sizes) >= 0)\n\n return times, sizes", "def calculate_previous_size(required_hole_size):\n\treturn required_hole_size/8", "def minimum_size(self):\n # Size in arcsec\n size = self.seeing.minimum_size()\n try:\n # Try using `intrinsic` as an object\n size = max(self.intrinsic.minimum_size(), size)\n except AttributeError:\n pass\n return size", "def avg_interpacket_interval(self):\n \n avg_c2s = 0\n avg_s2c = 0\n count_c2s = 0\n count_s2c = 0\n prev_c2s_idx = 0\n prev_s2c_idx = 0\n for idx, direction in enumerate(self.fcip_doc['packet_directions']):\n if direction == 'c2s':\n count_c2s += 1\n if count_c2s > 1:\n current_ts = self.fcip_doc['packet_timestamps'][idx]\n prev_ts = self.fcip_doc['packet_timestamps'][prev_c2s_idx]\n delta = current_ts - prev_ts\n avg_c2s += delta\n prev_c2s_idx = idx\n elif direction == 's2c':\n count_s2c += 1\n if count_s2c > 1:\n current_ts = self.fcip_doc['packet_timestamps'][idx]\n prev_ts = self.fcip_doc['packet_timestamps'][prev_s2c_idx]\n delta = current_ts - prev_ts\n avg_s2c += delta\n prev_s2c_idx = idx\n else:\n #*** Don't know direction so ignore:\n pass\n #*** Return the largest interpacket delay overall:\n result = 0\n try:\n result = (avg_c2s+avg_s2c)/float(count_s2c+count_c2s-2)\n except:\n pass\n return result", "def SendPacketsSendSize(self) -> int:", "def get_length(self) -> int:\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix.timestamp).total_seconds()", "def width(self):\n return abs(self.end[0] - self.start[0])", "def determine_number_of_packets(self):\n self.Ltot = 4. * np.pi * np.sum(self.eta * self.dV)\n self.L = self.Ltot / float(self.Npackets)\n\n self.npackets_cell = (4. * np.pi * self.eta * self.dV /\n self.L).astype(np.int)\n self.npackets_cell_cum_frac = (\n np.cumsum(self.npackets_cell).astype(np.float) /\n np.sum(self.npackets_cell))", "def min_width(blocks):\r\n assert(len(blocks) > 0)\r\n return sum(blocks) + len(blocks) - 1", "def find_windowsize(data):\n time = [i[0] for i in data]\n voltage = [i[1] for i in data]\n\n if len(time) != len(voltage):\n total_index_data = len(voltage)\n else:\n total_index_data = min(len(time), len(voltage))\n\n windowsize = round(total_index_data / 6)\n\n return windowsize", "def compute_size(self):\n length = np.max(np.max(self.positions, axis=1) -\n np.min(self.positions, axis=1))\n return length + 2*self.get_radii().max()", "def get_tcp_packet_payload_len(pkt: dpkt.ethernet.Ethernet) -> int:\n if isinstance(pkt, dpkt.ethernet.Ethernet):\n ip = pkt.data\n elif isinstance(pkt, dpkt.ip.IP):\n ip = pkt\n else:\n return None\n return ip.len - (ip.hl * 4 + ip.data.off * 4)", "def get_tcp_packet_payload_len_with_options(pkt: dpkt.ethernet.Ethernet) -> int:\n if isinstance(pkt, dpkt.ethernet.Ethernet):\n ip = pkt.data\n elif isinstance(pkt, dpkt.ip.IP):\n ip = pkt\n else:\n return None\n return ip.len - ip.hl * 4 - 20", "def data_flow_steps_min(self) -> Optional[int]:\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)", "def intervalLen(self):\n return self.end-self.start+1", "def calc_stepsize(self):\n # Calculate step size\n step = 1.0/((self.n+self.d)*np.max(np.sum(self.p, axis=0)))\n return step", "def minsize(self):# -> int:\r\n return 0", "def determine_window_size(rectified_signal):\n logging.debug('running determine_window_size function')\n first_peak = first_peak_detect(rectified_signal, 1)\n second_peak = first_peak_detect(rectified_signal, first_peak + 1)\n return (second_peak - first_peak) * WINDOW_MULTIPLIER", "def maxTurbulenceSize(self, arr: List[int]) -> int:\n if len(arr) == 1:\n return 1\n ret = 1\n tmp_ret = 0\n last_flag = None\n for i in range(1, len(arr)):\n if arr[i] == arr[i - 1]:\n current_flag = None\n else:\n current_flag = arr[i] > arr[i - 1]\n\n if current_flag is None:\n ret = max(ret, tmp_ret)\n tmp_ret = 1\n elif last_flag is None or last_flag == current_flag:\n ret = max(ret, tmp_ret)\n tmp_ret = 2\n else:\n tmp_ret += 1\n\n last_flag = current_flag\n return max(ret, tmp_ret)" ]
[ "0.66252464", "0.64096874", "0.6360574", "0.63360894", "0.62790656", "0.6177121", "0.6166679", "0.6132873", "0.6110259", "0.6066307", "0.6057291", "0.6012215", "0.598935", "0.5977526", "0.5948044", "0.59432024", "0.5932077", "0.58715415", "0.5861192", "0.58579046", "0.58342046", "0.57983154", "0.5782213", "0.57608855", "0.5752252", "0.5751455", "0.5727445", "0.57121044", "0.57084566", "0.56893265" ]
0.70118415
0
Set the suppressed attribute in the flow database object to the current packet count so that future suppressions of the same flow can be backed off to prevent overwhelming the controller
def set_suppress_flow(self): self.suppressed = self.packet_count self.fcip.update_one({'hash': self.fcip_hash}, {'$set': {'suppressed': self.suppressed},})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_fe_tcf_suppress(self, suppress_dict):\n ofproto = self.datapath.ofproto\n parser = self.datapath.ofproto_parser\n #*** Check it's TCP:\n if suppress_dict['proto'] != 'tcp':\n self.logger.error(\"Unsupported proto=%s\", suppress_dict['proto'])\n return 0\n\n #*** Convert IP addresses strings to integers:\n ipv4_src = _ipv4_t2i(str(suppress_dict['ip_A']))\n ipv4_dst = _ipv4_t2i(str(suppress_dict['ip_B']))\n\n #*** Build match:\n match = parser.OFPMatch(eth_type=0x0800,\n ipv4_src=ipv4_src,\n ipv4_dst=ipv4_dst,\n ip_proto=6,\n tcp_src=suppress_dict['tp_A'],\n tcp_dst=suppress_dict['tp_B']\n )\n actions = []\n inst = [parser.OFPInstructionActions(\n ofproto.OFPIT_APPLY_ACTIONS, actions),\n parser.OFPInstructionGotoTable(self.ft_tt)]\n #*** Needs higher priority than TC rules in same table:\n priority = 2\n mod = parser.OFPFlowMod(datapath=self.datapath, table_id=self.ft_tcf,\n priority=priority,\n idle_timeout=self.suppress_idle_timeout,\n match=match, instructions=inst)\n self.logger.debug(\"Installing suppress forward FE dpid=%s\", self.dpid)\n self.datapath.send_msg(mod)\n #*** Build counter match (reversed flow):\n match = parser.OFPMatch(eth_type=0x0800,\n ipv4_src=ipv4_dst,\n ipv4_dst=ipv4_src,\n ip_proto=6,\n tcp_src=suppress_dict['tp_B'],\n tcp_dst=suppress_dict['tp_A']\n )\n mod = parser.OFPFlowMod(datapath=self.datapath, table_id=self.ft_tcf,\n priority=priority,\n idle_timeout=self.suppress_idle_timeout,\n match=match, instructions=inst)\n self.logger.debug(\"Installing suppress reverse FE dpid=%s\", self.dpid)\n self.datapath.send_msg(mod)", "def lost_packet_count(self, lost_packet_count):\n self._lost_packet_count = lost_packet_count", "def debug_record_count(self, value: int):\n self._debug_record_count = value", "def log_suppression(self, timestamp):\n self.repeats += 1\n if timestamp > self.timestamp:\n self.timestamp = timestamp\n self.save()", "def suppress(self):\n pass", "def track(self):\n scapy.all.sniff(prn = self.add)", "def _reset_count(self):\n self._triple_count = 0\n self._error_count = 0\n self._ignored_count = 0", "def lost_packet_count(self):\n return self._lost_packet_count", "def CapturedPacketCount(self):\n if self.force_auto_sync:\n self.get('CapturedPacketCount')\n return self._CapturedPacketCount", "def snmpqosqos_packets_droppedrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_packets_droppedrate\n\t\texcept Exception as e:\n\t\t\traise e", "def reset_count(self):\n self.count = 0", "def suppress(self):\n return Suppress(self)", "def purge(self):\n self.remaining = 0", "def reset(self):\n super().reset()\n self.sample_count = 1\n self.miss_prob = 1.0\n self.miss_std = 0.0\n self.miss_prob_sd_min = float(\"inf\")\n self.miss_prob_min = float(\"inf\")\n self.miss_sd_min = float(\"inf\")", "def ignore_clicks(self):\n self._ignore_count += 1", "def snmpqosqos_packets_sentrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_packets_sentrate\n\t\texcept Exception as e:\n\t\t\traise e", "def skip_layer(self, count=1):\n self._layer_counter += count", "def _get_discard_packet(self):\n return self.__discard_packet", "def set_number_served(self, no_served):\n self.numbers_served = no_served", "def direct_count(self, direct_count):\n\n self._direct_count = direct_count", "def snmpqosqos_packets_bypassedrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_packets_bypassedrate\n\t\texcept Exception as e:\n\t\t\traise e", "def test_enable_retainUnsent_logs():\n stmt = sqlalchemy.select([_LOGGING_TABLE.c.total_unsent_rows_removed]).select_from(_LOGGING_TABLE).order_by(\n _LOGGING_TABLE.c.id.desc()).limit(1)\n config_info = read_config()\n config_info['retainUnsent'] = True\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n\n time.sleep(convert_sleep(config_info['wait'])*2)\n result = execute_command_with_return_value(stmt)\n\n assert int(result[0][0]) == 0", "def update_link_statistics(self):\n for link in self.links.values():\n link.update_link_statistics()\n if self.track:\n key = self.id + \":\" + globals.PACKETLOSS\n globals.statistics[key][globals.systime] = self.droppedpackets\n self.droppedpackets = 0", "def setPacketLength(self):\n self.packetLength = len(self) - PRIMARY_HEADER_BYTE_SIZE - 1", "def flowcnt_trap(ctx):\n ctx.obj = ConfigDBConnector()\n ctx.obj.connect()", "def test_disable_retainUnsent_logs():\n stmt = sqlalchemy.select([_LOGGING_TABLE.c.total_unsent_rows_removed]).select_from(_LOGGING_TABLE).order_by(\n _LOGGING_TABLE.c.id.desc()).limit(1)\n config_info = read_config()\n config_info['retainUnsent'] = False\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n\n time.sleep(convert_sleep(config_info['wait'])*2)\n result = execute_command_with_return_value(stmt)\n\n assert int(result[0][0]) >= 0", "def set_number_served(self, amount):\n self.number_served = amount", "def getPacketCount(self):\n return 1", "def resetWriteCount(self):\n self.writeCount = 0", "def sn_size(self, val):\n if isinstance(val, int) and val >= 1:\n if val != self._faux._sn_size:\n self._faux._sn_size = val\n self._faux._update()\n else:\n warn(\"`val` not valid, no update performed\")" ]
[ "0.5602831", "0.54044235", "0.52294934", "0.5105551", "0.50889367", "0.5087022", "0.50683844", "0.50344235", "0.50178295", "0.50135696", "0.500748", "0.4920166", "0.48919708", "0.48824057", "0.48358688", "0.48272425", "0.48197994", "0.47963083", "0.47859436", "0.47705513", "0.4761368", "0.4760181", "0.47601783", "0.4758894", "0.47557706", "0.4727454", "0.47251", "0.47131675", "0.46631867", "0.4658324" ]
0.7687831
0
Does the current packet have the TCP FIN flag set?
def tcp_fin(self): return self.tcp_flags & dpkt.tcp.TH_FIN != 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tcp_ack(self):\n return self.tcp_flags & dpkt.tcp.TH_ACK != 0", "def EndOfPacket(self) -> bool:", "def eof_received(self):\n self.connection_lost('EOF')\n return False", "def tcp_rst(self):\n return self.tcp_flags & dpkt.tcp.TH_RST != 0", "def tcp_urg(self):\n return self.tcp_flags & dpkt.tcp.TH_URG != 0", "def fin_sent():\n test_str = \"f1N s3nt\\n\"\n server = start_server()\n client = start_client()\n\n # First write some data.\n write_to(client, test_str)\n if not read_segments_from(client):\n return False\n time.sleep(1)\n\n # Write an EOF character.\n write_to(client, '\\x1a')\n client.stdin.close()\n\n # Check to see that segment sent from client is a FIN.\n segments = read_segments_from(client)\n if not segments:\n return False\n return \"FIN\" in segments[0].flags", "def tcp_ece(self):\n return self.tcp_flags & dpkt.tcp.TH_ECE != 0", "def _is_tcp_synack(tcp_flags):\n if tcp_flags == 0x12:\n return 1\n else:\n return 0", "def eof(self):\n try:\n next_line = self.read_pkt_line()\n except HangupException:\n return True\n self.unread_pkt_line(next_line)\n return False", "def send_after_fin():\n test_str = make_random(100)\n test_str_fin = \"s3nd 4ft3r f1N\\n\"\n server = start_server()\n client = start_client()\n\n # Write an EOF character to client so it sends a FIN.\n write_to(server, test_str)\n write_to(client, '\\x1a')\n client.stdin.close()\n\n # Check that a FIN was received.\n time.sleep(1)\n segments = read_segments_from(server)\n if not segments:\n return False\n if not \"FIN\" in [flag for segment in segments for flag in segment.flags]:\n return False\n\n # Write to server STDIN. It should continue sending data to the client.\n write_to(server, test_str_fin)\n return len(read_segments_from(server)) > 0", "def _is_tcp_syn(tcp_flags):\n if tcp_flags == 2:\n return 1\n else:\n return 0", "def is_final(self):\n return (\n self.status == self.STATUS_DISCONNECT\n or self.status == self.STATUS_DONE\n or self.status == self.STATUS_PARTNER_DISCONNECT\n or self.status == self.STATUS_PARTNER_DISCONNECT_EARLY\n or self.status == self.STATUS_RETURNED\n or self.status == self.STATUS_EXPIRED\n )", "def socket_status(self, sock_num: int) -> bool:\n if not self._send_parse_reply(b\"AT+CIPCLOSE?\", b\"+CIPCLOSE:\", idx=sock_num):\n return False\n if not self._buf == 1:\n return False\n return True", "def eof(self):\n\t\treturn not self.is_alive() and self._queue.empty()", "def recv_after_eof():\n test_str = make_random(100)\n test_str_fin = \"r3c31v3 4ft3r f1N\\n\"\n server = start_server()\n client = start_client()\n\n # Write an EOF character to client so it sends a FIN.\n write_to(server, test_str)\n write_to(client, '\\x1a')\n client.stdin.close()\n\n # Check that a FIN was sent.\n time.sleep(1)\n segments = read_segments_from(client)\n if not segments:\n return False\n if not \"FIN\" in [flag for segment in segments for flag in segment.flags]:\n return False\n\n # Write to server STDIN. The client should receive and output the data.\n write_to(server, test_str_fin)\n return test_str_fin in read_from(client)", "def eof(self):\n return not self.is_alive() and self._queue.empty()", "def connection_closed(self):\n return self.conn_status == self.CONN_CLOSED", "def closed(self):\n return self._stream is None", "def at_eof(self):\n return self._eof and not self._buffer", "def at_eof(self):\n return self.tell() == len(self)", "def eof(self):\r\n\t\treturn self.index == len(self.data)", "def check_finish(self):\r\n return not self.proc.is_alive()", "def is_done(self):\n return True if self.t >= self.max_ep_len else False", "def eof_received(self):\n logger.debug(\"EOF from client, closing.\")\n self.connection_lost(None)", "def tcp_psh(self):\n return self.tcp_flags & dpkt.tcp.TH_PUSH != 0", "def is_tcp(self) -> bool:\n return self.proto == IP_TCP", "def close_connection(self) -> bool:\n return self.get_header('Connection') != 'keep-alive'", "def tcp_syn(self):\n return self.tcp_flags & dpkt.tcp.TH_SYN != 0", "def finished(self):\n return self._state == FINISHED_STATE", "def isClosed(self) -> bool:\n return self._connection is None" ]
[ "0.7094454", "0.7082811", "0.690565", "0.67987853", "0.67850024", "0.67468345", "0.67016333", "0.65382755", "0.64064264", "0.62315774", "0.62288594", "0.61441845", "0.60956466", "0.60806847", "0.6036773", "0.59935206", "0.5980416", "0.59537274", "0.59387845", "0.5922927", "0.59051675", "0.58798194", "0.58587706", "0.5849931", "0.58305657", "0.5829465", "0.5828453", "0.58018416", "0.57933724", "0.57747656" ]
0.8585132
0
Does the current packet have the TCP SYN flag set?
def tcp_syn(self): return self.tcp_flags & dpkt.tcp.TH_SYN != 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_tcp_syn(tcp_flags):\n if tcp_flags == 2:\n return 1\n else:\n return 0", "def _is_tcp_synack(tcp_flags):\n if tcp_flags == 0x12:\n return 1\n else:\n return 0", "def is_tcp(self) -> bool:\n return self.proto == IP_TCP", "def tcp_psh(self):\n return self.tcp_flags & dpkt.tcp.TH_PUSH != 0", "def tcp_rst(self):\n return self.tcp_flags & dpkt.tcp.TH_RST != 0", "def tcp_ack(self):\n return self.tcp_flags & dpkt.tcp.TH_ACK != 0", "def tcp_urg(self):\n return self.tcp_flags & dpkt.tcp.TH_URG != 0", "def tcp_ece(self):\n return self.tcp_flags & dpkt.tcp.TH_ECE != 0", "def tcp_fin(self):\n return self.tcp_flags & dpkt.tcp.TH_FIN != 0", "def tcp_cwr(self):\n return self.tcp_flags & dpkt.tcp.TH_CWR != 0", "def is_icmp(self) -> bool:\n return self.proto == ICMP", "def is_valid_ssdp_packet(data: bytes) -> bool:\n return (\n bool(data)\n and b\"\\n\" in data\n and (\n data.startswith(b\"NOTIFY * HTTP/1.1\")\n or data.startswith(b\"M-SEARCH * HTTP/1.1\")\n or data.startswith(b\"HTTP/1.1 200 OK\")\n )\n )", "def is_outgoing(self, pkt):\n try:\n return pkt[Ether].src.lower() == get_if_hwaddr(conf.iface).lower()\n except IndexError:\n return False", "def is_open(self):\n return self._socket is not None", "def add_packet(self, packet: Dict[str, Any]) -> bool:\n if (\n packet['tcp_header']['source_port'] == self.source_port and\n packet['tcp_header']['destination_port'] == self.destination_port and\n packet['ip_header']['source_address'] == self.source_address and\n packet['ip_header']['destination_address'] == self.destination_address\n ):\n self.packets.append((TCPStream.INBOUND, packet))\n return True\n\n if (\n packet['tcp_header']['source_port'] == self.destination_port and\n packet['tcp_header']['destination_port'] == self.source_port and\n packet['ip_header']['source_address'] == self.destination_address and\n packet['ip_header']['destination_address'] == self.source_address\n\n ):\n self.packets.append((TCPStream.OUTBOUND, packet))\n return True\n\n return False", "def is_connected(self):\n if self._socket:\n return True\n else:\n return False", "def is_connected(self):\r\n return self.__socket is not None", "def is_connected(self):\n return self._current_protocol is not None", "def getIsConnected(self):\n if self._socket == None:\n return False\n\n # Assume we are still connected. TODO: Do a test receive?\n return True", "def negotiation_should_advance(self):\n # Generally, this separates a bare TCP connect() from a True\n # RFC-compliant telnet client with responding IAC interpreter.\n server_do = sum(enabled for _, enabled in self.writer.remote_option.items())\n client_will = sum(enabled for _, enabled in self.writer.local_option.items())\n return bool(server_do or client_will)", "def is_connected(self):\n return self._socket is not None", "def check_socket(self):\n return self.__send_command(cmd=\"PING\")", "def isConnected(self):\n return self.transport is not None and self.started", "def __CheckConnectStatus(self):\r\n if not self.tn:\r\n print \"Connection is down!\"\r\n return False\r\n else:\r\n print \"Connection is alive!\"\r\n return True", "def canSend(self):\n return self._lte.isconnected()", "def is_secure(self):\n return self._is_ssl or self._is_socket", "def xforwardedforclientsrcportenabled(self) -> bool:\n return pulumi.get(self, \"xforwardedforclientsrcportenabled\")", "def master_filter(self, pkt):\n return (IP in pkt and pkt[IP].src == self.receiver and GBN in pkt\n and ICMP not in pkt)", "def is_not_outgoing(self, pkt):\n try:\n return pkt[Ether].src.lower() != get_if_hwaddr(conf.iface).lower()\n except IndexError:\n return False", "def can_failover(self):\n return self._can_failover" ]
[ "0.7690888", "0.7561976", "0.7231197", "0.6869579", "0.66572094", "0.66416705", "0.65288955", "0.6464404", "0.64455825", "0.62220824", "0.6173735", "0.6033871", "0.5989733", "0.58425915", "0.5829437", "0.57948446", "0.57896084", "0.57755846", "0.576857", "0.57579166", "0.57275546", "0.5669701", "0.5657401", "0.5652493", "0.5652068", "0.56305766", "0.56264687", "0.56047", "0.55995977", "0.55876034" ]
0.81022835
0
Does the current packet have the TCP RST flag set?
def tcp_rst(self): return self.tcp_flags & dpkt.tcp.TH_RST != 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tcp_ack(self):\n return self.tcp_flags & dpkt.tcp.TH_ACK != 0", "def tcp_urg(self):\n return self.tcp_flags & dpkt.tcp.TH_URG != 0", "def _is_tcp_synack(tcp_flags):\n if tcp_flags == 0x12:\n return 1\n else:\n return 0", "def tcp_fin(self):\n return self.tcp_flags & dpkt.tcp.TH_FIN != 0", "def _is_tcp_syn(tcp_flags):\n if tcp_flags == 2:\n return 1\n else:\n return 0", "def tcp_syn(self):\n return self.tcp_flags & dpkt.tcp.TH_SYN != 0", "def _check_packet(self, packet):\n src, dst = self._parse_packet_src_dst(packet)\n tcp = get_ip_packet(packet.load).data\n if tcp.flags & dpkt.tcp.TH_RST:\n if (src, dst) in self._last_tcp_seq:\n del self._last_tcp_seq[(src, dst)]\n else:\n if not tcp.data: raise BadPacket(\"no payload\")\n if (src, dst) in self._last_tcp_seq:\n last_seq = self._last_tcp_seq[(src, dst)]\n if tcp.seq <= last_seq:\n # this exception eliminates dups\n raise BadPacket(\"This sequence(%d<=%d) seen before\" % (tcp.seq, last_seq))\n self._last_tcp_seq[(src, dst)] = tcp.seq", "def would_retransmit(self):\n return not self.my_pending_requests.is_empty()", "def tcp_ece(self):\n return self.tcp_flags & dpkt.tcp.TH_ECE != 0", "def tcp_cwr(self):\n return self.tcp_flags & dpkt.tcp.TH_CWR != 0", "def rst(self, qpkt):\n # ACK/RST\n qpkt[TCP].flags = 0x16\n self.remove_computed_fields(qpkt)", "def get_is_reset(self, timeout = 0):\n response = self.send_command_to_shouter(BP_TOOL.IS_RESET)\n if response == BP_TOOL.ACK:\n return False\n elif response == BP_TOOL.IS_RESET:\n return True\n else:\n return False", "def _is_acknowledged(self):\n response = self._port_handle.read(1)\n if len(response) == 0:\n raise DfuException('DFU did not send the answer.')\n else:\n if response != self.__RESPONSE['ack']:\n print('dfu answered nack (0x{})'.format(response.hex()))\n return response == self.__RESPONSE['ack']", "def tcp_psh(self):\n return self.tcp_flags & dpkt.tcp.TH_PUSH != 0", "def ack(self):\n return (self.status == self.STATUS_ACK)", "def EndOfPacket(self) -> bool:", "def socket_status(self, sock_num: int) -> bool:\n if not self._send_parse_reply(b\"AT+CIPCLOSE?\", b\"+CIPCLOSE:\", idx=sock_num):\n return False\n if not self._buf == 1:\n return False\n return True", "def should_reconnect(self):\n if not self.by_remote:\n if self.code == 1006:\n if self.reason == 'Abnormal closure':\n return True\n \n return False", "def hasReset(self, p_int): # real signature unknown; restored from __doc__\n return False", "def connection_closed(self):\n return self.conn_status == self.CONN_CLOSED", "def retry_if_resetpeer_or_timeout(exception):\n return not ((not isinstance(exception, requests_exceptions.ConnectionError)\n and not isinstance(exception, requests_exceptions.ConnectTimeout))\n and not isinstance(exception, BadStatusLine or exception.errno == errno.ECONNRESET))", "def successful(self):\n return (self.power_ack & self.datarate_ack & self.channelmask_ack) == 1", "def is_tcp(self) -> bool:\n return self.proto == IP_TCP", "def test_should_return_the_correct_integer(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_equal(tcp_flags.to_int(), 18)", "def _check_for_life_signs(self):\n self._lock.acquire()\n if not self._running.is_set():\n return False\n try:\n if self._writes_since_check == 0:\n self.send_heartbeat()\n if self._reads_since_check == 0:\n self._threshold += 1\n if self._threshold >= 2:\n self._running.set()\n message = (\n 'Connection dead, no heartbeat or data received in >= '\n '%ds' % (\n self._interval * 2\n )\n )\n why = AMQPConnectionError(message)\n if self._exceptions is None:\n raise why\n self._exceptions.append(why)\n return False\n else:\n self._threshold = 0\n finally:\n self._reads_since_check = 0\n self._writes_since_check = 0\n self._lock.release()\n if self._timer:\n self._start_new_timer()\n return True", "def _is_connection_stale(self):\n\n if time.time() - self.last_ping > HEART_BEAT_PING_TIME:\n self._ping()\n\n return (time.time() - self.last_pong) > HEART_BEAT_PING_TIME + HEART_BEAT_PONG_TIME", "def broken(self) -> bool:\n return self.pgconn.status == ConnStatus.BAD and not self._closed", "def is_reset(self):\n return self._tag == 'reset'", "def check_heartbeat(self):\n return True", "def x_overrun(self):\n return (self.status & 0x10) != 0" ]
[ "0.7516386", "0.7045244", "0.69934404", "0.67368025", "0.6609426", "0.64979655", "0.6353278", "0.62783927", "0.62588733", "0.6225317", "0.60794926", "0.60549366", "0.5991976", "0.5967841", "0.5873444", "0.577649", "0.5690989", "0.56898624", "0.5684263", "0.56341535", "0.5559772", "0.5558609", "0.55305326", "0.5522089", "0.55151963", "0.54820526", "0.54560286", "0.5452284", "0.5445628", "0.5430576" ]
0.8491029
0
Does the current packet have the TCP PSH flag set?
def tcp_psh(self): return self.tcp_flags & dpkt.tcp.TH_PUSH != 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_tcp_synack(tcp_flags):\n if tcp_flags == 0x12:\n return 1\n else:\n return 0", "def _is_tcp_syn(tcp_flags):\n if tcp_flags == 2:\n return 1\n else:\n return 0", "def tcp_syn(self):\n return self.tcp_flags & dpkt.tcp.TH_SYN != 0", "def is_valid_ssdp_packet(data: bytes) -> bool:\n return (\n bool(data)\n and b\"\\n\" in data\n and (\n data.startswith(b\"NOTIFY * HTTP/1.1\")\n or data.startswith(b\"M-SEARCH * HTTP/1.1\")\n or data.startswith(b\"HTTP/1.1 200 OK\")\n )\n )", "def is_tcp(self) -> bool:\n return self.proto == IP_TCP", "def tcp_ack(self):\n return self.tcp_flags & dpkt.tcp.TH_ACK != 0", "def packetCheck(packet):\n info = [packet[i : i + 2] for i in range(0, len(packet), 2)]\n MagicNo = int.from_bytes(info[0], \"big\")\n PacketType = int.from_bytes(info[1], \"big\")\n RequestType = int.from_bytes(info[2], \"big\")\n if MagicNo != 0x497E:\n return False\n if PacketType != 0x0001:\n return False\n if RequestType != 0x0001 and RequestType != 0x0002:\n return False\n return True", "def pes_packet_check_formedness(payload):\n b1 = ord(payload[0])\n b2 = ord(payload[1])\n b3 = ord(payload[2])\n\n b4 = ord(payload[3])\n if b1 != 0 or b2 != 0 or b3 != 1:\n return False\n return True", "def tcp_urg(self):\n return self.tcp_flags & dpkt.tcp.TH_URG != 0", "def is_icmp(self) -> bool:\n return self.proto == ICMP", "def _check_has_ping(data):\r\n return re.match(\r\n r'^PING :tmi\\.twitch\\.tv$', data)", "def tcp_rst(self):\n return self.tcp_flags & dpkt.tcp.TH_RST != 0", "def is_outgoing(self, pkt):\n try:\n return pkt[Ether].src.lower() == get_if_hwaddr(conf.iface).lower()\n except IndexError:\n return False", "def master_filter(self, pkt):\n return (IP in pkt and pkt[IP].src == self.receiver and GBN in pkt\n and ICMP not in pkt)", "def __check_ssh(self):\n sfcs = self.sshTunnelDict[\"target_ip\"]\n\n cmd = \"ps aux | grep ssh | awk '{print $20}'\"\n result = subprocess.Popen(cmd,\n shell= True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = result.communicate()\n if sfcs not in stdout.decode():\n return False\n else: return True", "def add_packet(self, packet: Dict[str, Any]) -> bool:\n if (\n packet['tcp_header']['source_port'] == self.source_port and\n packet['tcp_header']['destination_port'] == self.destination_port and\n packet['ip_header']['source_address'] == self.source_address and\n packet['ip_header']['destination_address'] == self.destination_address\n ):\n self.packets.append((TCPStream.INBOUND, packet))\n return True\n\n if (\n packet['tcp_header']['source_port'] == self.destination_port and\n packet['tcp_header']['destination_port'] == self.source_port and\n packet['ip_header']['source_address'] == self.destination_address and\n packet['ip_header']['destination_address'] == self.source_address\n\n ):\n self.packets.append((TCPStream.OUTBOUND, packet))\n return True\n\n return False", "def tcp_ece(self):\n return self.tcp_flags & dpkt.tcp.TH_ECE != 0", "def tcp_cwr(self):\n return self.tcp_flags & dpkt.tcp.TH_CWR != 0", "def handle_packet(self, srcif, packet) -> bool:\n typeOfPacket = packet[\"type\"]\n if typeOfPacket == DATA:\n return self.forward(srcif, packet)\n elif typeOfPacket == DUMP:\n return self.dump(packet)\n elif typeOfPacket == UPDT:\n return self.update(srcif, packet)\n elif typeOfPacket == RVKE:\n return self.revoke(packet)\n else:\n return False", "def is_terminal(p):\n return isinstance(p, _TerminalPacket)", "def checkPacketLength(self):\n return self.packetLength == len(self) - PRIMARY_HEADER_BYTE_SIZE - 1", "def has_msg(self):\n return self.bufsize >= 4 and self.bufsize - 4 >= struct.unpack('!I', str(self.buf.peek(0, 4)))[0]", "def tcp_fin(self):\n return self.tcp_flags & dpkt.tcp.TH_FIN != 0", "def _check_tcprelay(self):\n check = 'ps -e -opid -ocommand | grep tcprelay | grep -v ' \\\n 'grep | grep {0}'.format(self.locationid_param)\n output, _ = subprocess.Popen(\n [\"bash\", \"-c\", check], stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True).communicate()\n regex = re.search(\n r\"^\\s*([0-9]+).* --portoffset ([0-9]+).*\", output)\n match = regex.groups()\n relaypid = int(match[0])\n portoffset = int(match[1])\n logger.debug(\n \"CHECK TCPRELAY - CMD: {0} \"\n \"OUTPUT: {1} PID: {2} PORT: {3}\".format(check,\n output,\n relaypid,\n portoffset))\n return relaypid, portoffset", "def is_prepared(self):\n try:\n ret = (\n self.is_prepared_for_input_socket() and\n self.is_prepared_for_setting() and\n self.is_prepared_for_hoge()\n )\n except CheckPreparedException as e:\n ret = False\n e.log()\n # raise\n return ret", "def is_not_outgoing(self, pkt):\n try:\n return pkt[Ether].src.lower() != get_if_hwaddr(conf.iface).lower()\n except IndexError:\n return False", "async def ping_ssh(self) -> bool:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n\n # ping port 22 (SSH)\n if await self.ping(22):\n # ping returned true, SSH is up\n return True\n else:\n # ping returned false, SSH is down\n return False", "def has_pending_packets_to_be_sent(self):\n return self.num_packets != 0", "def EndOfPacket(self) -> bool:", "def ping(self):\n\t\t## NOTE: the Microblaze can only accept byte values between -128 and 127 (so 0xCF is too large)\n\t\trb = [0x00]\n\n\t\t# self.spi.transfer([0xCF], rb, 1)\n\t\t# mapped_cmd_byte = [_map_value(0xCF, 0, 255, -128, 127)]\n\t\tmapped_cmd_byte = [0xCF-128]\n\t\tself.spi.transfer(mapped_cmd_byte, rb, 1)\n\n\t\ttime.sleep(0.1)\n\t\tif rb[0] < 0: \t\t\t\t\t\t## Account for implicit unsigned-to-signed \n\t\t\trb[0] += 256\t\t\t\t\t## conversion from the transfer operation\n\t\treturn rb[0] == 0xF3" ]
[ "0.70700914", "0.683914", "0.65811044", "0.6559025", "0.6541669", "0.6536006", "0.6391766", "0.62555486", "0.6231639", "0.6139662", "0.6119093", "0.6116591", "0.61071205", "0.6080696", "0.6056388", "0.604276", "0.6009697", "0.5996681", "0.59924656", "0.5970526", "0.5923695", "0.5915584", "0.5914429", "0.5902292", "0.5898594", "0.58725524", "0.58718693", "0.5796703", "0.5783322", "0.5739184" ]
0.8219081
0
Does the current packet have the TCP ACK flag set?
def tcp_ack(self): return self.tcp_flags & dpkt.tcp.TH_ACK != 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ack(self):\n return (self.status == self.STATUS_ACK)", "def _is_tcp_synack(tcp_flags):\n if tcp_flags == 0x12:\n return 1\n else:\n return 0", "def __is_ack(self, ack) -> bool:\n return ack == ['void']", "def _is_acknowledged(self):\n response = self._port_handle.read(1)\n if len(response) == 0:\n raise DfuException('DFU did not send the answer.')\n else:\n if response != self.__RESPONSE['ack']:\n print('dfu answered nack (0x{})'.format(response.hex()))\n return response == self.__RESPONSE['ack']", "def ack_required(self):\n v = self[22]\n v = v >> 1\n return (v & 0b1) != 0", "def ack(self):\n the_tuple = self.deque.ack(self.tube, self.task_id)\n\n self.update_from_tuple(the_tuple)\n\n return bool(self.state == 3)", "def tcp_fin(self):\n return self.tcp_flags & dpkt.tcp.TH_FIN != 0", "def test_should_build_a_simple_tcp_flags_with_ack_bit_enabled(self):\n\n tcp_flags = TCPControlBits(['ACK'])\n assert_equal(tcp_flags.control_bits['ACK'], 1)", "def tcp_rst(self):\n return self.tcp_flags & dpkt.tcp.TH_RST != 0", "def ACK_IN(self, pkt):\n \n #This is a function that checks whether the received ACK is within the boundaries\n def in_window(num):\n cond_1 = (self.unack + self.receiver_win) >= num >= self.unack\n cond_2 = ((self.unack + self.receiver_win) >= 2**self.n_bits) and (((self.unack + self.receiver_win) % 2**self.n_bits) >= num) \n return (cond_1 or cond_2)\n \n \"\"\"State for received ACK.\"\"\"\n # check if type is ACK\n if pkt.getlayer(GBN).type == 0:\n log.error(\"Error: data type received instead of ACK %s\", pkt)\n raise self.SEND()\n else:\n log.debug(\"Received ACK %s\", pkt.getlayer(GBN).num)\n if(self.Q_3_4):\n if(self.CWND < self.ssthresh):\n self.CWND_fp += 1.0\n self.CWND = math.floor(self.CWND_fp)\n self.effective_window = min(self.CWND,self.win,self.receiver_win)\n self.CWND_data.append(self.CWND_fp)\n log.debug(\"CWND log (from ACK_in): %s. \", self.CWND_data)\n else:\n self.CWND_fp = self.CWND_fp + (1.0 / self.CWND)\n self.CWND = math.floor(self.CWND_fp)\n self.effective_window = min(self.CWND,self.win,self.receiver_win)\n self.CWND_data.append(self.CWND_fp)\n log.debug(\"CWND log (from ACK_in): %s. \", self.CWND_data)\n #Set the receiver window size to the received window value\n self.receiver_win = pkt.getlayer(GBN).win\n #Set the ack number to the received value\n ack = pkt.getlayer(GBN).num\n if(not in_window(ack)):\n log.error(\"Error: ACK received is out-of-window %s, discard!\", pkt)\n #Create a sender buffer key list for easier manipulation\n Sender_buffer_keys = list(self.buffer.keys())\n \n #Detect the index of the key value that is ack'ed, and erase everything coming beforehand\n if ((ack-1)%2**self.n_bits) in Sender_buffer_keys:\n index = Sender_buffer_keys.index((ack-1)%2**self.n_bits)\n Sender_buffer_keys = Sender_buffer_keys[:index+1]\n log.debug(\"Packet numbers deleted from the sender buffer %s\", Sender_buffer_keys)\n for i in Sender_buffer_keys:\n del self.buffer[i]\n log.debug(\"The new sender buffer %s\", list(self.buffer.keys()))\n\n #New unack is the latest ack number\n self.unack = ack\n\n #Question 3.2, detect duplicate acks and go into the transmission state when necessary\n if(self.Q_3_2 or self.Q_3_4): \n #implement a queue for checking duplicated ack\n self.dup_ack.append(ack)\n log.debug(\"duplicated ack buffer is %s\", self.dup_ack)\n if(len(self.dup_ack) >= 3):\n if(self.dup_ack[-1] == self.dup_ack[-2] == self.dup_ack[-3]):\n self.dup_ack.clear()\n log.debug(\"Duplicate ACKs for sequence number %s\", ack)\n if(self.Q_3_2):\n self.dup_ack_hanjing = True\n self.retransmit_flag = True\n if(self.Q_3_4):\n self.ssthresh = self.CWND / 2\n self.CWND_fp = self.ssthresh\n self.CWND = math.floor(self.CWND_fp)\n self.effective_window = min(self.CWND,self.win,self.receiver_win)\n self.CWND_data.append(self.CWND_fp)\n log.debug(\"CWND log: %s. \", self.CWND_data)\n raise self.SEND()\n \n #Q3_3 optional field list construction\n if(self.SACK == 1 and (pkt.getlayer(GBN).options == 1) and (pkt.getlayer(GBN).hlen >6)):\n #Get the header length\n self.hlen = pkt.getlayer(GBN).hlen\n #Pull optional field parameters\n if(self.hlen >= 9):\n self.ledge1 = pkt.getlayer(GBN).ledge1\n self.len1 = pkt.getlayer(GBN).len1\n if(self.hlen >= 12):\n self.ledge2 = pkt.getlayer(GBN).ledge2\n self.len2 = pkt.getlayer(GBN).len2\n if(self.hlen == 15):\n self.ledge3 = pkt.getlayer(GBN).ledge3\n self.len3 = pkt.getlayer(GBN).len3\n self.retransmit_flag = True\n raise self.SEND()\n\n\n # back to SEND state\n raise self.SEND()", "def tcp_psh(self):\n return self.tcp_flags & dpkt.tcp.TH_PUSH != 0", "def tcp_urg(self):\n return self.tcp_flags & dpkt.tcp.TH_URG != 0", "def check_ack_or_nak(message):\n value = message.body[-1]\n\n if value == 0x06:\n return\n elif value == 0x15:\n raise CommandFailure(command_code=message.command_code)\n else:\n raise RuntimeError(\"Unexpected ACK/NAK value (0x%02x)\" % value)", "def EndOfPacket(self) -> bool:", "def tcp_ece(self):\n return self.tcp_flags & dpkt.tcp.TH_ECE != 0", "def handle_packet(self, pkt):\n logger.info('got a message:{}'.format(pkt))\n self._sock_rep_to_server.send_pyobj(packet.Ack())\n \n state = True\n extradata = {}\n \n if hasattr(self, 'handle_action'):\n _tmp = self.handle_action(pkt)\n try:\n state, data = _tmp\n extradata['extra'] = data\n except ValueError:\n extradata['extra'] = _tmp\n if extradata:\n state = False\n \n return state, extradata", "def test_process_packet_ack(self):\n pkt = {'type': 'ack',\n 'ackId': 140,\n 'endpoint': '',\n 'args': []}\n self.ns.process_packet(pkt)\n assert not self.environ['socketio'].error.called", "def has_pending_packets_to_be_sent(self):\n return self.num_packets != 0", "def _is_tcp_syn(tcp_flags):\n if tcp_flags == 2:\n return 1\n else:\n return 0", "def no_more_acks() -> bool:\n return not any(not op.is_set() for op in self._pending_operations.values())", "def has_msg(self):\n return self.bufsize >= 4 and self.bufsize - 4 >= struct.unpack('!I', str(self.buf.peek(0, 4)))[0]", "def is_connected(self):\n if self.connected and self.connack_rec:\n return 1\n return 0", "def ack(self):\n content = self._content \n log('content:'+content)\n self.__tcp_send(content)", "def successful(self):\n return (self.power_ack & self.datarate_ack & self.channelmask_ack) == 1", "def tcp_syn(self):\n return self.tcp_flags & dpkt.tcp.TH_SYN != 0", "def _handle_ok_ack(string):\n if string.strip() == Parser.OK_MSG:\n return True\n return False", "def acknowledge(self, validity = True) -> int:\n (data, s) = self.socket.recvfrom(Rudp.Packet.buffer())\n (packet, validity) = Rudp.Packet.unpack(data)\n if(validity and s == self.server):\n return packet.ack\n else:\n return None", "def isConfirmedDataUp(self):\n return self.mhdr.mtype == CO_DATA_UP", "def check_up(self):\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(BusController.PULSE_DELAY * 0.8)\n output = True\n start_time = time.time()\n try:\n s.connect((self.__address[0], BusController.HEART_BEAT_PORT))\n data = \"Check\".encode()\n s.send(data)\n data = s.recv(1024).decode()\n if int(data) != int(self.__id):\n print(f\"bus had the wrong ID\\nwas supposed to be {self.__id}, but received {data}\")\n output = False\n # listen for an answer\n except Exception as e:\n print(f\"exception in check_up (heart_beat) look:{e}\")\n print(f\"something went wrong, couldn't establish connection with {self.__address}\")\n output = False\n s.close()\n return output", "def no_ack(self):\n\n return self._block.tx_policies[self._lvap.addr].no_ack" ]
[ "0.7713082", "0.7140416", "0.7077276", "0.69622517", "0.68137753", "0.651649", "0.6490735", "0.64123046", "0.6396964", "0.63797945", "0.6331648", "0.63295096", "0.63076216", "0.62877494", "0.62774557", "0.62618184", "0.6235229", "0.6172736", "0.6160738", "0.6155097", "0.61437863", "0.6083535", "0.60732543", "0.60704935", "0.60688365", "0.60150087", "0.5930498", "0.5887783", "0.58675104", "0.58583796" ]
0.82345355
0
Does the current packet have the TCP URG flag set?
def tcp_urg(self): return self.tcp_flags & dpkt.tcp.TH_URG != 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tcp_ack(self):\n return self.tcp_flags & dpkt.tcp.TH_ACK != 0", "def _is_tcp_synack(tcp_flags):\n if tcp_flags == 0x12:\n return 1\n else:\n return 0", "def tcp_fin(self):\n return self.tcp_flags & dpkt.tcp.TH_FIN != 0", "def EndOfPacket(self) -> bool:", "def tcp_ece(self):\n return self.tcp_flags & dpkt.tcp.TH_ECE != 0", "def tcp_rst(self):\n return self.tcp_flags & dpkt.tcp.TH_RST != 0", "def _is_tcp_syn(tcp_flags):\n if tcp_flags == 2:\n return 1\n else:\n return 0", "def tcp_cwr(self):\n return self.tcp_flags & dpkt.tcp.TH_CWR != 0", "def tcp_psh(self):\n return self.tcp_flags & dpkt.tcp.TH_PUSH != 0", "def overrun(self):\n return (self.status & 0x80) != 0", "def would_retransmit(self):\n return not self.my_pending_requests.is_empty()", "def z_overrun(self):\n return (self.status & 0x40) != 0", "def x_overrun(self):\n return (self.status & 0x10) != 0", "def has_msg(self):\n return self.bufsize >= 4 and self.bufsize - 4 >= struct.unpack('!I', str(self.buf.peek(0, 4)))[0]", "def is_tcp(self) -> bool:\n return self.proto == IP_TCP", "def has_connection_down(tile):\n return is_kth_bit_set(tile, 4)", "def checkPacketLength(self):\n return self.packetLength == len(self) - PRIMARY_HEADER_BYTE_SIZE - 1", "def _check_packet(self, packet):\n src, dst = self._parse_packet_src_dst(packet)\n tcp = get_ip_packet(packet.load).data\n if tcp.flags & dpkt.tcp.TH_RST:\n if (src, dst) in self._last_tcp_seq:\n del self._last_tcp_seq[(src, dst)]\n else:\n if not tcp.data: raise BadPacket(\"no payload\")\n if (src, dst) in self._last_tcp_seq:\n last_seq = self._last_tcp_seq[(src, dst)]\n if tcp.seq <= last_seq:\n # this exception eliminates dups\n raise BadPacket(\"This sequence(%d<=%d) seen before\" % (tcp.seq, last_seq))\n self._last_tcp_seq[(src, dst)] = tcp.seq", "def master_filter(self, pkt):\n return (IP in pkt and pkt[IP].src == self.receiver and GBN in pkt\n and ICMP not in pkt)", "def my_frame(self):\n first_byte_as_bits = bin(int(self.packet_body[self.reader], base=16))[2:].zfill(8)\n return first_byte_as_bits[0:3] == \"001\"", "def pes_packet_check_formedness(payload):\n b1 = ord(payload[0])\n b2 = ord(payload[1])\n b3 = ord(payload[2])\n\n b4 = ord(payload[3])\n if b1 != 0 or b2 != 0 or b3 != 1:\n return False\n return True", "def data_available(self):\n return (self.status & 0x08) != 0", "def isUnconfirmedDataUp(self):\n return self.mhdr.mtype == UN_DATA_UP", "def is_not_outgoing(self, pkt):\n try:\n return pkt[Ether].src.lower() != get_if_hwaddr(conf.iface).lower()\n except IndexError:\n return False", "def has_connection_up(tile):\n return is_kth_bit_set(tile, 2)", "def no_excessive_retrans():\n test_str = DEBUG_IGNORE + \"r3tr4n5m15510ns~~~~~~~\\n\"\n server = start_server(reference=True)\n client = start_client()\n\n # Send a segment to reference server, which should ignore it. See how many\n # times it was sent.\n write_to(client, test_str)\n segments = read_segments_from(server)\n if not segments or len(segments) != 6:\n return False\n\n # All segments should have the same content.\n orig_segment = segments[0]\n for segment in segments:\n if (\n segment.source != orig_segment.source or\n segment.source_port != orig_segment.source_port or\n segment.dest != orig_segment.dest or\n segment.dest_port != orig_segment.dest_port or\n segment.seqno != orig_segment.seqno or\n segment.ackno != orig_segment.ackno or\n segment.length != orig_segment.length or\n not segment.has_same_flags(orig_segment) or\n segment.window != orig_segment.window or\n segment.checksum != orig_segment.checksum\n ):\n return False\n\n return True", "def tcp_syn(self):\n return self.tcp_flags & dpkt.tcp.TH_SYN != 0", "def is_ringing(self) -> bool:", "def is_valid_ssdp_packet(data: bytes) -> bool:\n return (\n bool(data)\n and b\"\\n\" in data\n and (\n data.startswith(b\"NOTIFY * HTTP/1.1\")\n or data.startswith(b\"M-SEARCH * HTTP/1.1\")\n or data.startswith(b\"HTTP/1.1 200 OK\")\n )\n )", "def packetCheck(packet):\n info = [packet[i : i + 2] for i in range(0, len(packet), 2)]\n MagicNo = int.from_bytes(info[0], \"big\")\n PacketType = int.from_bytes(info[1], \"big\")\n RequestType = int.from_bytes(info[2], \"big\")\n if MagicNo != 0x497E:\n return False\n if PacketType != 0x0001:\n return False\n if RequestType != 0x0001 and RequestType != 0x0002:\n return False\n return True" ]
[ "0.701477", "0.6874184", "0.6629805", "0.6626216", "0.65771645", "0.65479213", "0.65354097", "0.63849026", "0.6335349", "0.6176544", "0.60870785", "0.60784733", "0.604032", "0.60195976", "0.5977013", "0.59698975", "0.59458387", "0.5925076", "0.5887721", "0.587131", "0.582429", "0.5752833", "0.5752445", "0.57430476", "0.5732796", "0.57278967", "0.5720955", "0.5712851", "0.5711298", "0.56859505" ]
0.78980297
0
Does the current packet have the TCP ECE flag set?
def tcp_ece(self): return self.tcp_flags & dpkt.tcp.TH_ECE != 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tcp_ack(self):\n return self.tcp_flags & dpkt.tcp.TH_ACK != 0", "def tcp_fin(self):\n return self.tcp_flags & dpkt.tcp.TH_FIN != 0", "def EndOfPacket(self) -> bool:", "def is_tcp(self) -> bool:\n return self.proto == IP_TCP", "def tcp_rst(self):\n return self.tcp_flags & dpkt.tcp.TH_RST != 0", "def tcp_cwr(self):\n return self.tcp_flags & dpkt.tcp.TH_CWR != 0", "def tcp_urg(self):\n return self.tcp_flags & dpkt.tcp.TH_URG != 0", "def tcp_psh(self):\n return self.tcp_flags & dpkt.tcp.TH_PUSH != 0", "def validate_encryption(self) -> bool:\n # Receive the first encrypted message from server\n message = self.receive()\n if message != Message.HI:\n print(\"Encryption error! Closing this socket...\")\n return False\n # Send the first encrypted message\n self.send(Message.HI)\n # Receive the encrypted OK message\n message = self.receive()\n if message == Message.OK:\n print(\"Encryption is established.\")\n return True\n else:\n print(\"Encryption error! Closing this socket...\")\n return False", "def _is_tcp_synack(tcp_flags):\n if tcp_flags == 0x12:\n return 1\n else:\n return 0", "def _is_tcp_syn(tcp_flags):\n if tcp_flags == 2:\n return 1\n else:\n return 0", "def pes_packet_check_formedness(payload):\n b1 = ord(payload[0])\n b2 = ord(payload[1])\n b3 = ord(payload[2])\n\n b4 = ord(payload[3])\n if b1 != 0 or b2 != 0 or b3 != 1:\n return False\n return True", "def tcp_syn(self):\n return self.tcp_flags & dpkt.tcp.TH_SYN != 0", "def has_msg(self):\n return self.bufsize >= 4 and self.bufsize - 4 >= struct.unpack('!I', str(self.buf.peek(0, 4)))[0]", "def have_cdc() -> bool:", "def _is_acknowledged(self):\n response = self._port_handle.read(1)\n if len(response) == 0:\n raise DfuException('DFU did not send the answer.')\n else:\n if response != self.__RESPONSE['ack']:\n print('dfu answered nack (0x{})'.format(response.hex()))\n return response == self.__RESPONSE['ack']", "def iseod(self):\n\n return self.byte_ptr >= len(self.data)", "def checkPacketLength(self):\n return self.packetLength == len(self) - PRIMARY_HEADER_BYTE_SIZE - 1", "def is_icmp(self) -> bool:\n return self.proto == ICMP", "def packetCheck(packet):\n info = [packet[i : i + 2] for i in range(0, len(packet), 2)]\n MagicNo = int.from_bytes(info[0], \"big\")\n PacketType = int.from_bytes(info[1], \"big\")\n RequestType = int.from_bytes(info[2], \"big\")\n if MagicNo != 0x497E:\n return False\n if PacketType != 0x0001:\n return False\n if RequestType != 0x0001 and RequestType != 0x0002:\n return False\n return True", "def ecssa_verify(ec: EC, hf, m: bytes, P: Point, sig: ECSS) -> bool:\n\n # this is just a try/except wrapper\n # _ecssa_verify raises Errors\n try:\n return _ecssa_verify(ec, hf, m, P, sig)\n except Exception:\n return False", "def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True", "def is_outgoing(self, pkt):\n try:\n return pkt[Ether].src.lower() == get_if_hwaddr(conf.iface).lower()\n except IndexError:\n return False", "def upstream_fec_enable(self):\n return self._packet.get('upstream-fec-enable', False)", "def is_not_outgoing(self, pkt):\n try:\n return pkt[Ether].src.lower() != get_if_hwaddr(conf.iface).lower()\n except IndexError:\n return False", "def is_connected(self) -> bool:\n try:\n # When MSG_PEEK is used the data is treated as unread\n # and the next recv shall still return this data\n data = self.socket.recv(self.BUFFER_SIZE, socket.MSG_PEEK)\n if len(data) == 0:\n return False\n return True\n except ConnectionResetError:\n return False", "def isConfirmedDataUp(self):\n return self.mhdr.mtype == CO_DATA_UP", "def downstream_fec_enable(self):\n return self._packet.get('downstream-fec-enable', False)", "def isTCPRunningStartup():\r\n\r\n time.sleep(0.5)\r\n logs = open(\"Client1.txt\", 'r')\r\n #print(logs.readlines()[2])\r\n line = logs.readlines()[2]\r\n logs.close()\r\n print(line)\r\n isRunning = line != 'iperf3: error - unable to connect to server: Cannot assign requested address\\n'\r\n print(isRunning)\r\n return isRunning", "def isEncAddress(key):\n\tif re.search('^EAddr38[a-km-zA-HJ-NP-Z0-9]{56}$', key):\n\t\tif checkChecksum(key) is False:\n\t\t\treturn True, 'checksum'\n\t\treturn True, 'good'\n\telse:\n\t\treturn False, 'not valid'" ]
[ "0.6844938", "0.68352866", "0.63678133", "0.62086725", "0.6162233", "0.61555636", "0.6047426", "0.6042629", "0.5913607", "0.58997405", "0.5880692", "0.5828626", "0.58223116", "0.58136815", "0.5796791", "0.578385", "0.57391435", "0.5718324", "0.56315637", "0.55973816", "0.55971795", "0.55971545", "0.557584", "0.5571279", "0.55350554", "0.5524909", "0.54845667", "0.5459068", "0.54565644", "0.5438298" ]
0.8452496
0
Does the current packet have the TCP CWR flag set?
def tcp_cwr(self): return self.tcp_flags & dpkt.tcp.TH_CWR != 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tcp_urg(self):\n return self.tcp_flags & dpkt.tcp.TH_URG != 0", "def tcp_ack(self):\n return self.tcp_flags & dpkt.tcp.TH_ACK != 0", "def tcp_rst(self):\n return self.tcp_flags & dpkt.tcp.TH_RST != 0", "def tcp_psh(self):\n return self.tcp_flags & dpkt.tcp.TH_PUSH != 0", "def tcp_ece(self):\n return self.tcp_flags & dpkt.tcp.TH_ECE != 0", "def _is_tcp_synack(tcp_flags):\n if tcp_flags == 0x12:\n return 1\n else:\n return 0", "def tcp_fin(self):\n return self.tcp_flags & dpkt.tcp.TH_FIN != 0", "def EndOfPacket(self) -> bool:", "def _is_tcp_syn(tcp_flags):\n if tcp_flags == 2:\n return 1\n else:\n return 0", "def overrun(self):\n return (self.status & 0x80) != 0", "def would_retransmit(self):\n return not self.my_pending_requests.is_empty()", "def data_available(self):\n return (self.status & 0x08) != 0", "def has_connection_down(tile):\n return is_kth_bit_set(tile, 4)", "def master_filter(self, pkt):\n return (IP in pkt and pkt[IP].src == self.receiver and GBN in pkt\n and ICMP not in pkt)", "def tcp_syn(self):\n return self.tcp_flags & dpkt.tcp.TH_SYN != 0", "def isconnected(self):\n return self._wlan.isconnected()", "def is_ctrl_message(self):\n return self._id < 0", "def is_connected(self):\n if self.connected and self.connack_rec:\n return 1\n return 0", "def is_not_outgoing(self, pkt):\n try:\n return pkt[Ether].src.lower() != get_if_hwaddr(conf.iface).lower()\n except IndexError:\n return False", "def is_outgoing(self, pkt):\n try:\n return pkt[Ether].src.lower() == get_if_hwaddr(conf.iface).lower()\n except IndexError:\n return False", "def is_connected(self) -> bool:\n try:\n # When MSG_PEEK is used the data is treated as unread\n # and the next recv shall still return this data\n data = self.socket.recv(self.BUFFER_SIZE, socket.MSG_PEEK)\n if len(data) == 0:\n return False\n return True\n except ConnectionResetError:\n return False", "def checkPacketLength(self):\n return self.packetLength == len(self) - PRIMARY_HEADER_BYTE_SIZE - 1", "def have_cdc() -> bool:", "def socket_status(self, sock_num: int) -> bool:\n if not self._send_parse_reply(b\"AT+CIPCLOSE?\", b\"+CIPCLOSE:\", idx=sock_num):\n return False\n if not self._buf == 1:\n return False\n return True", "def canSend(self):\n return self._lte.isconnected()", "def getIsConnected(self):\n if self._socket == None:\n return False\n\n # Assume we are still connected. TODO: Do a test receive?\n return True", "def is_connected(self):\r\n return self.__socket is not None", "def cca(self):\n return self._current_rx_count == 0", "def z_overrun(self):\n return (self.status & 0x40) != 0", "def write_acceptable(self):\n return self.outstanding_wcount == 0" ]
[ "0.6661722", "0.6609961", "0.64189065", "0.6226935", "0.61659545", "0.61400944", "0.6062638", "0.6010988", "0.59357214", "0.584563", "0.5844437", "0.5778082", "0.57590437", "0.57227826", "0.56914073", "0.567778", "0.56738836", "0.5669728", "0.5668987", "0.5657275", "0.5636204", "0.5627518", "0.562317", "0.56025964", "0.5598656", "0.5559593", "0.5543385", "0.5527957", "0.55026305", "0.5489784" ]
0.85348696
0
Passed a TCP flags object (hex) and return 1 if it contains a TCP SYN and no other flags
def _is_tcp_syn(tcp_flags): if tcp_flags == 2: return 1 else: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_tcp_synack(tcp_flags):\n if tcp_flags == 0x12:\n return 1\n else:\n return 0", "def test_should_return_the_correct_integer(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_equal(tcp_flags.to_int(), 18)", "def tcp_syn(self):\n return self.tcp_flags & dpkt.tcp.TH_SYN != 0", "def test_should_return_a_integer(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_is_instance(tcp_flags.to_int(), int)", "def tcp_psh(self):\n return self.tcp_flags & dpkt.tcp.TH_PUSH != 0", "def SocketFlags(self) -> SocketFlags:", "def is_tcp(self) -> bool:\n return self.proto == IP_TCP", "def tcp_ece(self):\n return self.tcp_flags & dpkt.tcp.TH_ECE != 0", "def test_should_return_the_correct_binary(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_equal(tcp_flags.to_bin(), '0b10010')", "def tcp_ack(self):\n return self.tcp_flags & dpkt.tcp.TH_ACK != 0", "def test_should_return_the_correct_hexadecimal(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_equal(tcp_flags.to_hex(), '0x12')", "def tcp_urg(self):\n return self.tcp_flags & dpkt.tcp.TH_URG != 0", "def test_should_return_the_correct_string(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_equal(tcp_flags.to_str(), '00010010')", "def tcp_fin(self):\n return self.tcp_flags & dpkt.tcp.TH_FIN != 0", "def tcp_rst(self):\n return self.tcp_flags & dpkt.tcp.TH_RST != 0", "def test_should_return_a_hexadecimal(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_is_instance(tcp_flags.to_hex(), str)", "def test_should_return_a_binary(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_is_instance(tcp_flags.to_bin(), str)", "def test_should_build_a_simple_tcp_flags_with_ack_bit_enabled(self):\n\n tcp_flags = TCPControlBits(['ACK'])\n assert_equal(tcp_flags.control_bits['ACK'], 1)", "def tcp_cwr(self):\n return self.tcp_flags & dpkt.tcp.TH_CWR != 0", "def test_should_return_a_str(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_is_instance(tcp_flags.to_str(), str)", "def __process_flags(self, flags: int) -> Dict[str, bool]:\n return {\n 'ns': True if flags & 0x100 else False,\n 'cwr': True if flags & 0x080 else False,\n 'ece': True if flags & 0x040 else False,\n 'urg': True if flags & 0x020 else False,\n 'ack': True if flags & 0x010 else False,\n 'psh': True if flags & 0x008 else False,\n 'rst': True if flags & 0x004 else False,\n 'syn': True if flags & 0x002 else False,\n 'fin': True if flags & 0x001 else False,\n }", "def _flags(self):\n done, data = self._request('GE')\n if done:\n flags = int(data[1], 16)\n else:\n raise EvseError\n return {\n 'service_level': (flags & 0x0001) + 1,\n 'diode_check': not flags & 0x0002,\n 'vent_required': not flags & 0x0004,\n 'ground_check': not flags & 0x0008,\n 'stuck_relay_check': not flags & 0x0010,\n 'auto_service_level': not flags & 0x0020,\n 'auto_start': not flags & 0x0040,\n 'serial_debug': not not flags & 0x0080,\n 'lcd_type': 'monochrome' if flags & 0x0100 else 'rgb',\n 'gfi_self_test': not flags & 0x0200\n }", "def isTCPRunningStartup():\r\n\r\n time.sleep(0.5)\r\n logs = open(\"Client1.txt\", 'r')\r\n #print(logs.readlines()[2])\r\n line = logs.readlines()[2]\r\n logs.close()\r\n print(line)\r\n isRunning = line != 'iperf3: error - unable to connect to server: Cannot assign requested address\\n'\r\n print(isRunning)\r\n return isRunning", "def test_should_return_a_list(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_is_instance(tcp_flags.to_list(), list)", "def TCP_SYN_scan(**args):\n dport = args['dport']\n sport = RandShort()\n dest_ip = args['dest_ip']\n timeout = args['timeout']\n if 'sport' in args:\n sport = args['sport']\n # Send Syn Scan.\n ss_res = sr1( IP(dst=dest_ip) /TCP(sport=sport,dport=dport,flags=\"S\"),timeout=timeout)\n # Handle response.\n if(str(type(ss_res))==\"<type 'NoneType'>\"):\n return ScanResult.Filtered\n elif(ss_res.haslayer(TCP)):\n if(ss_res.getlayer(TCP).flags == 0x12):\n # Send RST.\n i = send(IP(dst=dest_ip)/TCP(sport=sport,dport=dport,flags=\"R\"))\n return ScanResult.Open\n elif (ss_res.getlayer(TCP).flags == 0x14):\n return ScanResult.Closed\n # Control ICMP response.\n elif(ss_res.haslayer(ICMP)):\n if(int(ss_res.getlayer(ICMP).type)==3 and int(ss_res.getlayer(ICMP).code) in [1,2,3,9,10,13]):\n return ScanResult.Filtered\n return ScanResult.Unknown", "def tcp_reassembly(packet, *, count=NotImplemented):\n if 'TCP' in packet:\n ip = packet['IP'] if 'IP' in packet else packet['IPv6']\n tcp = packet['TCP']\n data = dict(\n bufid=(\n ipaddress.ip_address(ip.src), # source IP address\n ipaddress.ip_address(ip.dst), # destination IP address\n tcp.sport, # source port\n tcp.dport, # destination port\n ),\n num=count, # original packet range number\n ack=tcp.ack, # acknowledgement\n dsn=tcp.seq, # data sequence number\n syn=bool(tcp.flags.S), # synchronise flag\n fin=bool(tcp.flags.F), # finish flag\n rst=bool(tcp.flags.R), # reset connection flag\n payload=bytearray(bytes(tcp.payload)), # raw bytearray type payload\n )\n raw_len = len(tcp.payload) # payload length, header excludes\n data['first'] = tcp.seq # this sequence number\n data['last'] = tcp.seq + raw_len # next (wanted) sequence number\n data['len'] = raw_len # payload length, header excludes\n return True, data\n return False, None", "def get_tcp_packet_payload_len_with_options(pkt: dpkt.ethernet.Ethernet) -> int:\n if isinstance(pkt, dpkt.ethernet.Ethernet):\n ip = pkt.data\n elif isinstance(pkt, dpkt.ip.IP):\n ip = pkt\n else:\n return None\n return ip.len - ip.hl * 4 - 20", "def getflag(self, flag):\n\t\treturn (pservlet.pipe_get_flags(self._pipe_desc) & flag) != 0", "def test_should_return_a_dict(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_is_instance(tcp_flags.to_dict(), dict)", "def convert(dec, flags=None):\n final = []\n if flags is None:\n flags = TCP_FLAG_DICT\n for i in flags.keys():\n if (dec >= int(i)):\n dec = dec - int(i)\n final.append(flags[i])\n return final" ]
[ "0.80560696", "0.69553995", "0.684681", "0.6703667", "0.64354026", "0.64024794", "0.6313623", "0.6251084", "0.62157923", "0.61875", "0.61465245", "0.61416644", "0.61377066", "0.6108251", "0.5836498", "0.5831984", "0.5767398", "0.56820065", "0.5621239", "0.5521131", "0.54844314", "0.54235786", "0.5371507", "0.53640765", "0.52717966", "0.5239146", "0.52183", "0.5216239", "0.5213642", "0.51869035" ]
0.82989943
0
Passed a TCP flags object (hex) and return 1 if it contains TCP SYN + ACK flags and no other flags
def _is_tcp_synack(tcp_flags): if tcp_flags == 0x12: return 1 else: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_tcp_syn(tcp_flags):\n if tcp_flags == 2:\n return 1\n else:\n return 0", "def test_should_return_the_correct_integer(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_equal(tcp_flags.to_int(), 18)", "def test_should_return_a_integer(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_is_instance(tcp_flags.to_int(), int)", "def tcp_ack(self):\n return self.tcp_flags & dpkt.tcp.TH_ACK != 0", "def test_should_return_the_correct_binary(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_equal(tcp_flags.to_bin(), '0b10010')", "def test_should_return_the_correct_hexadecimal(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_equal(tcp_flags.to_hex(), '0x12')", "def tcp_syn(self):\n return self.tcp_flags & dpkt.tcp.TH_SYN != 0", "def test_should_return_the_correct_string(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_equal(tcp_flags.to_str(), '00010010')", "def tcp_psh(self):\n return self.tcp_flags & dpkt.tcp.TH_PUSH != 0", "def test_should_build_a_simple_tcp_flags_with_ack_bit_enabled(self):\n\n tcp_flags = TCPControlBits(['ACK'])\n assert_equal(tcp_flags.control_bits['ACK'], 1)", "def tcp_ece(self):\n return self.tcp_flags & dpkt.tcp.TH_ECE != 0", "def SocketFlags(self) -> SocketFlags:", "def test_should_return_a_hexadecimal(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_is_instance(tcp_flags.to_hex(), str)", "def tcp_fin(self):\n return self.tcp_flags & dpkt.tcp.TH_FIN != 0", "def tcp_urg(self):\n return self.tcp_flags & dpkt.tcp.TH_URG != 0", "def test_should_return_a_binary(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_is_instance(tcp_flags.to_bin(), str)", "def is_tcp(self) -> bool:\n return self.proto == IP_TCP", "def tcp_rst(self):\n return self.tcp_flags & dpkt.tcp.TH_RST != 0", "def test_should_return_a_str(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_is_instance(tcp_flags.to_str(), str)", "def __process_flags(self, flags: int) -> Dict[str, bool]:\n return {\n 'ns': True if flags & 0x100 else False,\n 'cwr': True if flags & 0x080 else False,\n 'ece': True if flags & 0x040 else False,\n 'urg': True if flags & 0x020 else False,\n 'ack': True if flags & 0x010 else False,\n 'psh': True if flags & 0x008 else False,\n 'rst': True if flags & 0x004 else False,\n 'syn': True if flags & 0x002 else False,\n 'fin': True if flags & 0x001 else False,\n }", "def test_should_return_a_list(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_is_instance(tcp_flags.to_list(), list)", "def tcp_cwr(self):\n return self.tcp_flags & dpkt.tcp.TH_CWR != 0", "def test_should_return_a_dict(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_is_instance(tcp_flags.to_dict(), dict)", "def _flags(self):\n done, data = self._request('GE')\n if done:\n flags = int(data[1], 16)\n else:\n raise EvseError\n return {\n 'service_level': (flags & 0x0001) + 1,\n 'diode_check': not flags & 0x0002,\n 'vent_required': not flags & 0x0004,\n 'ground_check': not flags & 0x0008,\n 'stuck_relay_check': not flags & 0x0010,\n 'auto_service_level': not flags & 0x0020,\n 'auto_start': not flags & 0x0040,\n 'serial_debug': not not flags & 0x0080,\n 'lcd_type': 'monochrome' if flags & 0x0100 else 'rgb',\n 'gfi_self_test': not flags & 0x0200\n }", "def reassemble(self) -> Optional[Dict[str, Any]]:\n # This is really crude, just make sure that we get a SYN -> SYN/AC -> ACK, then a FIN -> FIN/ACK -> ACK\n state: Dict[str, Dict[str, Optional[str]]] = {\n TCPStream.INBOUND: {\n 'syn': None,\n 'fin': None,\n },\n TCPStream.OUTBOUND: {\n 'syn': None,\n 'fin': None,\n }\n }\n sequence = {\n TCPStream.INBOUND: 0,\n TCPStream.OUTBOUND: 0,\n }\n\n def other_direction(direction: str) -> str:\n if direction == TCPStream.INBOUND:\n return TCPStream.OUTBOUND\n else:\n return TCPStream.INBOUND\n\n # Crude state machine to ensure that every SYN was ack'd and every FIN was ack'd. Should probably\n # also check that SYNs are ack'd before FINs but whatever, it works well enough for now.\n for packet in self.packets:\n direction = packet[0]\n other = other_direction(direction)\n syn = packet[1]['tcp_header']['flags']['syn']\n fin = packet[1]['tcp_header']['flags']['fin']\n ack = packet[1]['tcp_header']['flags']['ack']\n seq = packet[1]['tcp_header']['sequence']\n\n if syn:\n if state[direction]['syn'] is None:\n state[direction]['syn'] = 'sent'\n sequence[direction] = seq\n if fin:\n if state[direction]['fin'] is None:\n state[direction]['fin'] = 'sent'\n if ack:\n if state[other]['syn'] == 'sent':\n state[other]['syn'] = 'ackd'\n if state[other]['fin'] == 'sent':\n state[other]['fin'] = 'ackd'\n\n if (\n state[TCPStream.INBOUND]['syn'] == 'ackd' and\n state[TCPStream.INBOUND]['fin'] == 'ackd' and\n state[TCPStream.OUTBOUND]['syn'] == 'ackd' and\n state[TCPStream.OUTBOUND]['fin'] == 'ackd'\n ):\n # This stream is finished, can be reassembled\n data = {\n TCPStream.INBOUND: b'',\n TCPStream.OUTBOUND: b'',\n }\n\n def add_data(packet: bytes, data: bytes, offset: int) -> bytes:\n length = len(data)\n\n if len(packet) < offset:\n # Pad out, then add\n packet = packet + b'\\0' * (offset - len(packet))\n return packet + data\n if len(packet) == offset:\n # Add to end\n return packet + data\n if len(packet) > offset and len(packet) <= (offset + length):\n # Truncate, then add\n packet = packet[:offset]\n return packet + data\n if len(packet) > (offset + length):\n before = packet[:offset]\n after = packet[offset + length:]\n return before + data + after\n\n raise Exception('Logic error!')\n\n for packet in self.packets:\n dir = packet[0]\n syn = packet[1]['tcp_header']['flags']['syn']\n fin = packet[1]['tcp_header']['flags']['fin']\n ack = packet[1]['tcp_header']['flags']['ack']\n seq = packet[1]['tcp_header']['sequence']\n\n if syn:\n continue\n\n # Figure out what this packet has\n length = len(packet[1]['data'])\n position = seq - sequence[dir] - 1\n\n if length > 0:\n data[dir] = add_data(data[dir], packet[1]['data'], position)\n\n return {\n 'source_address': self.source_address,\n 'destination_address': self.destination_address,\n 'source_port': self.source_port,\n 'destination_port': self.destination_port,\n TCPStream.INBOUND: data[TCPStream.INBOUND],\n TCPStream.OUTBOUND: data[TCPStream.OUTBOUND],\n }\n\n return None", "def tcp_reassembly(packet, *, count=NotImplemented):\n if 'TCP' in packet:\n ip = packet['IP'] if 'IP' in packet else packet['IPv6']\n tcp = packet['TCP']\n data = dict(\n bufid=(\n ipaddress.ip_address(ip.src), # source IP address\n ipaddress.ip_address(ip.dst), # destination IP address\n tcp.sport, # source port\n tcp.dport, # destination port\n ),\n num=count, # original packet range number\n ack=tcp.ack, # acknowledgement\n dsn=tcp.seq, # data sequence number\n syn=bool(tcp.flags.S), # synchronise flag\n fin=bool(tcp.flags.F), # finish flag\n rst=bool(tcp.flags.R), # reset connection flag\n payload=bytearray(bytes(tcp.payload)), # raw bytearray type payload\n )\n raw_len = len(tcp.payload) # payload length, header excludes\n data['first'] = tcp.seq # this sequence number\n data['last'] = tcp.seq + raw_len # next (wanted) sequence number\n data['len'] = raw_len # payload length, header excludes\n return True, data\n return False, None", "def handle_tcp(pkt, packets, i, start_point):\r\n src_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n dest_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n sequence_num = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n acknowledgment = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n data_offset = int(pkt[start_point], 16) * 4\r\n start_point += 2\r\n flags = pkt[start_point:start_point+2]\r\n flags_str = \"\"\r\n for f in flags:\r\n flags_str += str(format(int(f), '04b'))\r\n start_point += 2\r\n window_size = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n checksum_value = pkt[start_point:start_point+4]\r\n start_point += 4\r\n urgent_pointer = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n options = int((2 * packets[i][0][0] - start_point)/2)\r\n\r\n packets[i][2].append(src_port)\r\n packets[i][2].append(dest_port)\r\n packets[i][2].append(sequence_num)\r\n packets[i][2].append(acknowledgment)\r\n packets[i][2].append(data_offset)\r\n packets[i][2].append(flags_str)\r\n packets[i][2].append(window_size)\r\n packets[i][2].append(checksum_value)\r\n packets[i][2].append(urgent_pointer)\r\n packets[i][2].append(options)\r\n return packets", "def test_should_have_all_bits_zeroed_because_there_is_no_valid_flag(self):\n tcp_flags = TCPControlBits(['AAA', 'BBB', 'CCC'])\n assert_false(any(tcp_flags.control_bits.values()))", "def process_pkt_from_client(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag):\n if acks[saddr, sport, daddr, dport][co.S2C] >= 0:\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_ACK_TCP] = ts_delta\n if fin_flag:\n connections[conn_id].flow.attr[co.S2C][co.TIME_FIN_ACK_TCP] = ts_delta\n\n bytes_acked = (tcp.ack - acks[saddr, sport, daddr, dport][co.S2C]) % 4294967296\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n increment_value_dict(nb_acks[co.S2C][conn_id], bytes_acked)\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n # If SOCKS command\n if size_payload == 7 and connections[conn_id].attr.get(co.SOCKS_PORT, None) is None:\n crypted_socks_cmd = tcp.data\n # This is possible because of packet stripping\n if len(crypted_socks_cmd) == 7:\n decrypted_socks_cmd = socks_parser.decode(crypted_socks_cmd)\n if decrypted_socks_cmd[0] == b'\\x01': # Connect\n connections[conn_id].attr[co.SOCKS_DADDR] = socks_parser.get_ip_address(decrypted_socks_cmd)\n connections[conn_id].attr[co.SOCKS_PORT] = socks_parser.get_port_number(decrypted_socks_cmd)\n\n if size_payload > 0 and tcp.seq in acks[saddr, sport, daddr, dport][SEQ_C2S]:\n # This is a retransmission! (take into account the seq overflow)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][0],\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1],\n ts_delta - acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT]))\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1] = ts_delta\n elif size_payload > 0:\n acks[saddr, sport, daddr, dport][SEQ_C2S].add(tcp.seq)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_TCP] = ts_delta\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq] = [ts_delta, ts_delta]\n # Don't think will face this issue\n# if len(acks[saddr, sport, daddr, dport][SEQ][co.C2S]) >= 3000000:\n# for x in range(50000):\n# acks[saddr, sport, daddr, dport][SEQ][co.C2S].popleft()\n\n acks[saddr, sport, daddr, dport][co.S2C] = tcp.ack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta", "def convert(dec, flags=None):\n final = []\n if flags is None:\n flags = TCP_FLAG_DICT\n for i in flags.keys():\n if (dec >= int(i)):\n dec = dec - int(i)\n final.append(flags[i])\n return final" ]
[ "0.80400455", "0.7250333", "0.6964621", "0.6685191", "0.6563002", "0.6505053", "0.6482697", "0.64025676", "0.6371088", "0.6360817", "0.62162447", "0.61951274", "0.61621755", "0.61382747", "0.6079574", "0.6032509", "0.6024667", "0.5861897", "0.5688515", "0.56081784", "0.5578663", "0.54791594", "0.5458707", "0.54559803", "0.54459023", "0.5426781", "0.5336177", "0.53309834", "0.52953905", "0.5279169" ]
0.8173602
0
Generate a predictable hash for the 5tuple which is the same not matter which direction the traffic is travelling
def _hash_5tuple(ip_A, ip_B, tp_src, tp_dst, proto): if ip_A > ip_B: direction = 1 elif ip_B > ip_A: direction = 2 elif tp_src > tp_dst: direction = 1 elif tp_dst > tp_src: direction = 2 else: direction = 1 hash_5t = hashlib.md5() if direction == 1: flow_tuple = (ip_A, ip_B, tp_src, tp_dst, proto) else: flow_tuple = (ip_B, ip_A, tp_dst, tp_src, proto) flow_tuple_as_string = str(flow_tuple) hash_5t.update(flow_tuple_as_string) return hash_5t.hexdigest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hash_function(input_tuple):\n return hash(input_tuple)", "def hash_flow(flow_5_tuple):\n ip_A = flow_5_tuple[0]\n ip_B = flow_5_tuple[1]\n tp_src = flow_5_tuple[2]\n tp_dst = flow_5_tuple[3]\n proto = flow_5_tuple[4]\n if proto == 6:\n #*** Is a TCP flow:\n if ip_A > ip_B:\n direction = 1\n elif ip_B > ip_A:\n direction = 2\n elif tp_src > tp_dst:\n direction = 1\n elif tp_dst > tp_src:\n direction = 2\n else:\n direction = 1\n else:\n #*** Isn't a flow, so arbitrarily set direction as 1:\n direction = 1\n if direction == 1:\n flow_tuple = (ip_A, ip_B, tp_src, tp_dst, proto)\n else:\n #*** Flip direction:\n flow_tuple = (ip_B, ip_A, tp_dst, tp_src, proto)\n return hash_tuple(flow_tuple)", "def internal_hash(self): \n return hash(tuple(sorted(self.hashtriples())))", "def HashAlgorithm(self) -> _n_7_t_0:", "def hash_tuple(hash_tuple):\n hash_result = hashlib.md5()\n tuple_as_string = str(hash_tuple)\n hash_result.update(tuple_as_string)\n return hash_result.hexdigest()", "def __hash__(self):\n token = \"\"\n for gamePiece in self.game_pieces:\n token = token + str(gamePiece.x) + str(gamePiece.y)\n \n hash_ = int(token) % 100000\n return hash_", "def hash(self) -> str:\r\n ...", "def __hash__(self):\n x = xxhash.xxh64()\n x.update(self.puzzle)\n return x.intdigest()", "def get_hash_str():\r\n\tli = \"\"\r\n\tfor i in range(5):\r\n\t\tli += str(int(int((6 * random.random()) + 1)))\r\n\treturn li", "def generate_hash(*args):\n key = bytes(' '.join(args), 'utf_8')\n hashh = hashlib.md5()\n hashh.update(key)\n return hashh.hexdigest()", "def hashing_info(string):#KEY HASHING FUNCTION\n nodeInfo = string.encode('utf-8')\n\n #md5 -> 2^7 = 128 bits\n hash_object = hashlib.md5()\n hash_object.update(nodeInfo)\n\n tmp = hash_object.hexdigest()\n tmp = int(tmp,16)\n\n result = tmp >> (128-16)\n return result", "def hash(self) -> bytes:", "def __hash__(self):\n hash_value = 0\n \n # approximate_online_count\n hash_value ^= self.approximate_online_count\n \n # approximate_user_count\n hash_value ^= self.approximate_user_count << 12\n \n # description\n description = self.description\n if (description is not None):\n hash_value ^= hash(description)\n \n # discovery_splash\n hash_value ^= hash(self.discovery_splash)\n \n # emojis\n emojis = self.emojis\n hash_value ^= len(emojis) << 1\n for emoji in emojis.values():\n hash_value ^= hash(emoji)\n \n # features\n features = self.features\n hash_value ^= len(features) << 5\n for feature in features:\n hash_value ^= hash(feature)\n \n # icon\n hash_value ^= hash(self.icon)\n \n # id\n hash_value ^= self.id\n \n # invite_splash\n hash_value ^= hash(self.invite_splash)\n \n # stickers\n stickers = self.stickers\n hash_value ^= len(stickers) << 9\n for sticker in stickers.values():\n hash_value ^= hash(sticker)\n \n # name\n name = self.name\n if (description is None) or (description != name):\n hash_value ^= hash(name)\n \n return hash_value", "def get_hash(self):\n return \"%03d_%03d_%03d\" % (self.chest_region, self.chest_type, self.feature_type)", "def calculate_hash_id(self):\n return get_md5_hash(f'{self.type}{self.get_primary_id()}')", "def getHash():\n return str(uuid.uuid4())[-17:].replace(\"-\", \"\")", "def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "def __hash__(self):\n return 31 * hash(self.head_vertex) + hash(self.tail_vertex) + hash(self.weight)", "def __hash__(self):\n return 31 * hash(self.head_vertex) + hash(self.tail_vertex) + hash(self.weight)", "def hash(x):\r\n return (randint(1,5*c)*x + randint(1,5*c))%c", "def seed_hash(*args):\n args_str = str(args)\n return int(hashlib.md5(args_str.encode(\"utf-8\")).hexdigest(), 16) % (2**31)", "def __hash__(self) -> int:\n # The hash is based on the graph topology and node and edge attributes.\n return hash(\n (\n tuple(self.nodes),\n tuple(self.edges),\n tuple([str(self.nodes[n]) for n in self.nodes]),\n tuple([str(self.edges[i, j]) for i, j in self.edges]),\n )\n )", "def HashValue(self) -> _n_0_t_3[_n_0_t_9]:", "def hash_key(self):", "def __hash__(self):\n return hash(str(self.xCoordinate) + '_' + str(self.yCoordinate))", "def create_hash() -> str:\n length = 6\n char = string.ascii_uppercase + string.digits + string.ascii_lowercase\n\n # Generate a new ID, until one is found that is unique\n while True:\n hash = \"\".join(random.choice(char) for _ in range(length))\n\n if not utils.cache_is_hash_taken(hash):\n return hash", "def IsomorphicHash(self) -> int:\n # The hash is based on the nodes and edges, not their attributes.\n return hash((tuple(self.nodes), tuple(self.edges)))", "def __hash__(self):\n return hash(self.base_location) ^ hash(self.fold_path) ^ hash(self.field)", "def __hash__(self):\n return hash((self._nele, self._m_s))" ]
[ "0.74605143", "0.7353813", "0.724694", "0.7163682", "0.7123714", "0.6989677", "0.6989158", "0.69504875", "0.69009036", "0.6895851", "0.68934995", "0.6888084", "0.68834597", "0.6875137", "0.6846202", "0.6814702", "0.68129855", "0.6799603", "0.6789932", "0.6789932", "0.67869276", "0.6694766", "0.6675768", "0.6668163", "0.6667768", "0.66659737", "0.66647077", "0.6637375", "0.66369", "0.66368484" ]
0.78120553
0
Convert a MAC address to a readable/printable string
def _mac_addr(address): return ':'.join('%02x' % ord(b) for b in address)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mac_ntoa(mac):\n return '%.2x:%.2x:%.2x:%.2x:%.2x:%.2x' % tuple(map(ord, list(mac)))", "def mac_addr(address):\n\tprint(':'.join('%02x' % compat_ord(b) for b in address))\n\treturn ':'.join('%s' % format(compat_ord(b), '0>8b') for b in address)", "def get_mac_string():\n mac_int = getnode()\n mac_str = ':'.join((\"%012x\" % mac_int)[i:i + 2] for i in range(0, 12, 2))\n return mac_str", "def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)", "def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)", "def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)", "def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)", "def eth_addr(a):\n if isinstance(a, bytes):\n a = a.decode(\"latin\")\n string = \"%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\"\n mac = string % (ord(a[0]), ord(a[1]), ord(a[2]),\n ord(a[3]), ord(a[4]), ord(a[5]))\n return mac", "def _get_mac_address():\n if not sys.platform.startswith('linux'):\n raise RuntimeError(\n 'Cannot get the MAC address on non-Linux platforms'\n )\n ifname = get_default_iface_name_linux()\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n info = fcntl.ioctl(s.fileno(), 0x8927,\n struct.pack('256s', bytes(ifname, 'utf-8')[:15]))\n return ''.join('%02x' % b for b in info[18:24])", "def bytes_to_mac_str(buff):\n if len(buff) != DataDescription.B_SEQ_MAC_LEN:\n raise TypeError(\"Invalid input\")\n return \"%02X:%02X:%02X:%02X:%02X:%02X\" % buff", "def get_mac(self) -> str:\n hex_mac = hexlify(self.message)[160:172].decode().upper()\n return (\n hex_mac[0:2]\n + \":\"\n + hex_mac[2:4]\n + \":\"\n + hex_mac[4:6]\n + \":\"\n + hex_mac[6:8]\n + \":\"\n + hex_mac[8:10]\n + \":\"\n + hex_mac[10:12]\n )", "def mac_address(self):\n mac = [\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff)\n ]\n return ':'.join(map(lambda x: f\"{x:02X}\", mac))", "def __get_mac_address(self):\n str_hex_mac = uuid.UUID(int=uuid.getnode()).hex[-12:]\n return str_hex_mac", "def get_mac(self, node_id):\n nc = '%02x' % self.node_class\n nr_iface = '%02x' % self.nr_host_interface\n node_id = '%08x' % node_id\n\n return '%s:%s:%s:%s:%s:%s' % (nc, nr_iface, node_id[0:2], node_id[2:4], node_id[4:6], node_id[6:8])", "def mac_pton(s):\n return binascii.unhexlify(s.replace(\":\", \"\"))", "def mac_aton(str):\n macbytes = [int(i, 16) for i in str.split(':')]\n return struct.pack('6B', *macbytes)", "def get_mac_address(ifname):\n try:\n return open('/sys/class/net/' + ifname + '/address') \\\n .readline().strip()\n except:\n SysTools.logger.error(\"Failed to get mac-address of %s\", ifname)\n return \"00:00:00:00:00:00\"", "def mac_ntop(binary):\n x = b\":\".join(binascii.hexlify(binary)[i : i + 2] for i in range(0, 12, 2))\n return str(x.decode(\"ascii\"))", "def mac_address(self) -> str:\n return self._device.mac", "def create_magic_packet(macaddress: str) -> bytes:\n if len(macaddress) == 17:\n sep = macaddress[2]\n macaddress = macaddress.replace(sep, \"\")\n elif len(macaddress) == 14:\n sep = macaddress[4]\n macaddress = macaddress.replace(sep, \"\")\n if len(macaddress) != 12:\n raise ValueError(\"Incorrect MAC address format\")\n return bytes.fromhex(\"F\" * 12 + macaddress * 16)", "def get_mac_addr(bytes):\n bytes_str = map('{:02x}'.format, bytes)\n mac_addr = ':'.join(bytes_str).upper()\n return mac_addr", "def get_mac(self) -> str:\n self.sendline(\"iw {} info\".format(self.iface_dut))\n # We are looking for MAC definition of STA\n # wdev 0x1\n # addr 96:4e:c9:cc:7a:2c\n # type managed\n self.expect(\"addr (?P<mac>..:..:..:..:..:..)\\r\\n\\t(type|ssid)\")\n return self.match.group('mac')", "def mac_from_vm(vm: libvirt.virDomain = None) -> str:\n doc = minidom.parseString(vm.XMLDesc())\n interfaces = doc.getElementsByTagName('mac')\n return interfaces[0].getAttribute('address')", "def macFor(cls, board):\n return cls.MAC_PREFIX + '{:02X}'.format(int(board))", "def mac_string(self) :\n\t\tif not getattr(self,'pkg02') :\n\t\t\treturn None \n\t\tif self.pkg02[:2] not in ['01','02','04','05' ] :\n\t\t\treturn None\n\t\treturn self.pkg02 + ' ' + self.p_fields.mac_string()", "def get_mac_address():\n eth0_interface = 'eth0'\n addresses = netifaces.ifaddresses(eth0_interface)[netifaces.AF_LINK][0]\n mac_address = addresses['addr']\n return mac_address", "def emulab_mac(mac):\n\n return \"\".join(mac.lower().split(':'))", "def bt_addr_to_str(bt_addr):\n return \":\".join([b.encode(\"hex\") for b in bt_addr])", "def _get_mac_address(self, mac_numbers):\n\n mac = \"\"\n for num in mac_numbers:\n num = self._convert_to_hex(num)\n mac = ':'.join((mac, num))\n mac = mac[1:]\n return mac", "def ether_atob(pretty):\n addr = \"\"\n for i in 0, 3, 6, 9, 12, 15:\n addr += \"%c\" % int(pretty[i:i+2], 16)\n return addr" ]
[ "0.8274901", "0.78023726", "0.7799624", "0.7411498", "0.7411498", "0.7411498", "0.7411498", "0.7393615", "0.72612065", "0.7243841", "0.7214834", "0.71748555", "0.7040925", "0.69946724", "0.6982126", "0.6982003", "0.6913715", "0.67323697", "0.671812", "0.6642322", "0.6640873", "0.6632942", "0.66163164", "0.65952176", "0.6562063", "0.6545116", "0.654056", "0.6491276", "0.6487346", "0.6487143" ]
0.7819506
1
Action partial update test
def test_partial_update(self): action = ActionFactory.create(id=22) data = { 'name': 'Ação para Melhorar', 'institution': 'Vamos Ajudar', } self.assertNotEqual(action.name, data['name']) self.assertNotEqual(action.institution, data['institution']) response = self.client.patch(reverse('action-detail', args=[23]), data=data) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) response = self.client.patch(reverse('action-detail', args=[22]), data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['name'], data['name']) self.assertEqual(response.data['institution'], data['institution'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_partial_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_client_partial_update(self):\n pass", "def test_partial_update(self):\n self.assertEqual(Product.objects.count(), 2)\n self.assertEqual(self.product_1.name, 'Nike Vapor')\n\n payload = {\n 'name': 'Updated name',\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.patch(\n '/api/products/{}/'.format(self.product_1.id),\n data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 2)\n\n product = Product.objects.get(id=self.product_1.id)\n self.assertEqual(product.name, 'Updated name')", "def partial_update(self,request,pk = None):\r\n\r\n return Response({'HTTP method':'PATCH'})", "def test_partial_update(self):\n doctor = DoctorFactory.create(id=22)\n data = {'name': 'Joe'}\n self.assertNotEqual(doctor.name, data['name'])\n\n response = self.unath_client.patch(reverse('doctor-detail', args=[22]), data=data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n response = self.client.patch(reverse('doctor-detail', args=[22]), data=data)\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_post_partial_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title Patched'\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(user_url, response.content)", "def partial_update(self,request,pk= None):\n return Response({'http_method':'PATCH'})", "def test_update_case(self):\n pass", "def test_update_one(self):\n pass", "def partial_update(self, request, pk=None): #partial update a specific object\n return Response({'http_method': 'PATCH'})", "def test_teams_partial_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})", "def partial_update(self, request, pk=None):\n return Response({'http_method':'PATCH'})", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_update_scenario(self):\n pass", "def test_ipam_vrfs_partial_update(self):\n pass", "def test_partial_update_should_not_be_allowed(self):\n response = self.client.patch(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_client_verification_document_partial_update(self):\n pass", "def partial_update(self, request, pk=None):\n\n return Response({'http_method':'PATCH'})", "def test_client_nationlity_partial_update(self):\n pass", "def test_partial_update_recipe(self):\n recipe = sample_recipe()\n original_description = recipe.description\n payload = {'name': 'Panqueques con dulce de leche'}\n\n url = recipe_detail_url(recipe.id)\n res = self.client.patch(url, payload)\n\n recipe.refresh_from_db()\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(recipe.name, payload['name'])\n self.assertEqual(recipe.description, original_description)", "def test_ipam_rirs_partial_update(self):\n pass", "def test_client_risk_assessment_partial_update(self):\n pass", "def test_partial_update_movie(self):\n movie = sample_movie(user=self.user)\n movie.tags.add(sample_tag(user=self.user))\n new_tag = sample_tag(user=self.user, name='Hentai')\n\n payload = {'title': 'Test movie partial check', 'tags': [new_tag.id]}\n url = detail_url(movie.id)\n self.client.patch(url, payload)\n\n movie.refresh_from_db()\n self.assertEqual(movie.title, payload['title'])\n tags = movie.tags.all()\n self.assertEqual(len(tags), 1)\n self.assertIn(new_tag, tags)", "def test_user_update_request(self):\n pass", "def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})", "def test_partial_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tag.add(sample_tag(user=self.user))\n recipe.ingredient.add(sample_ingredient(user=self.user))\n new_tag = sample_tag(user=self.user,name='curry')\n payload = {\n 'title':'chicken tikka recipe',\n 'tag' : [new_tag.id]\n }\n url = detail_url(recipe.id)\n res = self.client.patch(url,payload)\n recipe.refresh_from_db();\n self.assertEqual(recipe.title,payload['title'])\n self.assertEqual(len(recipe.tag.all()),1)\n self.assertIn(new_tag,recipe.tag.all())" ]
[ "0.7925501", "0.7703755", "0.7453596", "0.7446067", "0.74410087", "0.7411792", "0.7282139", "0.7279346", "0.72573346", "0.72541976", "0.7236859", "0.7230735", "0.7230735", "0.7230735", "0.7216292", "0.7194296", "0.7185747", "0.71731967", "0.7138343", "0.71002716", "0.70846397", "0.70832336", "0.7064351", "0.70639086", "0.70248175", "0.7012725", "0.700241", "0.7000315", "0.6992846", "0.69601333" ]
0.8542736
0
Initializing the board and current player.
def __init__(self): self.board = [ BS, BS, BS, BS, BS, BS, BS, BS, BS, BS, BS, EM, EM, EM, WS, WS, WS, WS, WS, WS, WS, WS, WS, WS, WS ] self.curr_player = WHITE_PLAYER
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initBoard(self):\n pass", "def initialize_board(self):\n self.board = np.zeros(shape=(BOARD_SIZE, BOARD_SIZE), dtype=np.int) # another way of defining board: [[for x in range(cm.BOARD_SIZE)] for x in range(cm.BOARD_SIZE)]\n center = int(BOARD_SIZE / 2)\n self.board[center-1][center-1] = self.board[center][center] = WHITE # place the board according to position\n self.board[center][center-1] = self.board[center-1][center] = BLACK\n self.black_piece = 2\n self.white_piece = 2", "def __init__(self):\n # Current player\n self.player = X\n\n # Board\n self.board = [\n [None, None, None],\n [None, None, None],\n [None, None, None]\n ]\n\n # Winner\n self.winner = None\n\n # Game over\n self._gameover = False", "def __init__(self):\n self._game_state = \"UNFINISHED\"\n self._current_player = \"BLACK\"\n self._game_board = Board()", "def __init__(self):\n self.board_dict = dict()\n for i in range(self.BOARD_WIDTH):\n for j in range(self.BOARD_WIDTH):\n self.board_dict[i, j] = 0, None\n\n self.players_locations = dict()\n self.last_moved = None", "def __init__(self):\n # The starting counts are set to 0 and modified when the board is initiated.\n self.num_black_pieces = 0\n self.num_black_kings = 0\n self.num_white_pieces = 0\n self.num_white_kings = 0\n # Creates a new board and fills it with the appropriate pieces.\n self.board = self._initiate_board()\n self.moves = []", "def __init__(self, players):\n self.players = players\n self.board = Board()", "def __init__(self, player, board):\n self.player = player\n self.board = board", "def __init__(self, players):\n\n # Define the players\n self.players = players\n\n # Define who starts the game\n self.nplayer = 1 \n\n # Define the board\n self.board = [0] * 9", "def __init__(self):\n self.game_screen = pygame.display.set_mode((GameData.screen_dim, GameData.screen_dim))\n self.game_screen.fill(GameData.background_color)\n self.player = 1\n self.game_over = False\n self.board = np.zeros((GameData.rows, GameData.columns))", "def initialize_board(self):\n seed = self.seed and self.seed.any()\n if not (self.shape or seed):\n raise Exception(\"Either a shape or a seed is required.\")\n\n elif self.shape and seed:\n # Center the seed on a game board\n board = self._center_seed(self.shape, self.seed)\n\n elif self.shape:\n # The probability a cell starts off dead\n prob_dead = [1 - self.game.weight]\n # Class probabilities for live cells\n probs_alive = [self.game.weight * (1/self.classes)] * self.classes\n\n board = np.random.choice(\n self.classes + 1,\n np.prod(self.shape),\n p = prob_dead + probs_alive\n ).reshape(self.shape)\n \n else: # Only a seed is given\n self.shape = self.seed.shape\n board = self.seed\n\n self.array = board\n self.start_array = board\n self.prev_array = None", "def setUp(self):\n\n self.board = Board(3, 3)", "def initGameState(self):\n print(\"Setting game state: \")\n self.playGUI = GUI()\n self.playGUI.drawBoard(self.player)", "def __init__(self, initial_board):\n self.initial_board = initial_board", "def __init__(self):\n\n self.__turn_info = { 'turn': ChessGame.WHITE }\n self.init_board()", "def setup_new_game(self):\r\n self._player = Player()\r\n self._stats = GameStats(self._bb_settings)\r\n self._scoreboard = Scoreboard(self._bb_settings, self._screen)", "def __init__(self, board=None):\n self.winner = None\n self.board = board or [self.__class__.EMPTY_POSITION_COUNTER] * 9", "def __init__(self):\n self.board = Board()\n #self.player1 = player1\n #self.player2 = player2\n self.winner = None", "def setUp(self):\n self.gameBoard = Grid((100, 100), Cell)", "def initialize():\n\n global PLAYER # this means we use the global var PLAYER and cannot have a local var named PLAYER\n global LEVEL_COUNTER\n\n LEVEL_COUNTER = 1\n \n coordinates = generate_coords()\n\n PLAYER = Stark()\n tree = Tree()\n ww = WhiteWalker()\n crown = Crown()\n gray_gem = GrayGem()\n clear_board()\n GAME_BOARD.create(\"Snow\",\"Snow\")\n GAME_BOARD.draw_msg(\"Level \" + str(LEVEL_COUNTER) + \". Winter is coming.\")\n generate_level(coordinates, [PLAYER, ww, gray_gem, crown, tree, tree, gray_gem, tree, tree, gray_gem, tree])\n\n # for i in range(0,NUM_ELTS):\n # place_on_board(elts[i], coordinates[i][0], coordinates[i][1])", "def __init__(self, player):\n \n self.colour = player\n self.game_in_head = Board()", "def __init__(self):\n self.board = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n self.last_move = None", "def initialize_board(self):\n self.board_values = {x:x for x in(range(1,10))}", "def setup(self):\n piece_order = ['ROOK','KNIGHT','BISHOP','QUEEN','KING','BISHOP','KNIGHT','ROOK']\n for row,colour in zip([0,7],['BLACK','WHITE']):\n for col,piece in enumerate(piece_order):\n self.board[row][col] = colour + '_' + piece\n \n for row,colour in zip([1,6],['BLACK','WHITE']):\n for i in range(8):\n self.board[row][i] = colour + '_' + 'PAWN'\n \n self.toplay = 'WHITE'", "def init_board(self):\n\n self.__board = dict()\n order = ['rook', 'knight', 'bishop', 'queen', 'king', 'bishop',\n 'knight', 'rook']\n for j, name in enumerate(order):\n\n self.__board[(0, j)] = ChessGame.Piece( name, ChessGame.WHITE)\n self.__board[(7, j)] = ChessGame.Piece( name, ChessGame.BLACK)\n self.__board[(1, j)] = ChessGame.Piece('pawn', ChessGame.WHITE)\n self.__board[(6, j)] = ChessGame.Piece('pawn', ChessGame.BLACK)\n\n self.__players = { ChessGame.WHITE: set(), ChessGame.BLACK: set() }\n for color in (ChessGame.BLACK, ChessGame.WHITE):\n self.__players[color] = {(x, y) for (x, y), piece in\n self.__board.iteritems() if piece.color == color }\n\n return", "def __init__(self):\r\n\t\tself.game_board = [['0','0','0'],['0','0','0'],['0','0','0']]\r\n\t\tself.count = 0\r\n\t\tself.x_turn = True\r\n\t\r\n\r\n\t\tpass", "def __init__(self):\r\n self._board = None\r\n self._bb_settings = Settings()\r\n self._screen = pygame.display.set_mode((self._bb_settings.screen_width,\r\n self._bb_settings.screen_height))\r\n self._player = Player()\r\n self._stats = GameStats(self._bb_settings)\r\n self._scoreboard = Scoreboard(self._bb_settings, self._screen)\r\n self._image = pygame.image.load('board.bmp')\r\n self._rect = self._image.get_rect()\r\n self._play_mode_button_list = self.make_play_mode_buttons()\r\n self._replay_button_list = self.make_replay_buttons()", "def __init__(self):\n self.board = [\n [None, None, None],\n [None, None, None],\n [None, None, None]\n ]", "def initialize_board():\n # Wipe current board\n for x in range(len(THE_BOARD.positions)):\n for y in range(len(THE_BOARD.positions)):\n THE_BOARD.positions[x][y] = ' '\n\n all_pieces = []\n\n # Pawns\n white_pawns = [Pawn('white', (6, i)) for i in range(len(THE_BOARD.positions[6]))]\n black_pawns = [Pawn('black', (1, i)) for i in range(len(THE_BOARD.positions[1]))]\n all_pieces.extend(white_pawns)\n all_pieces.extend(black_pawns)\n\n # Rooks\n rook1 = Rook('black', (0, 0))\n all_pieces.append(rook1)\n rook2 = Rook('black', (0, 7))\n all_pieces.append(rook2)\n rook3 = Rook('white', (7, 0))\n all_pieces.append(rook3)\n rook4 = Rook('white', (7, 7))\n all_pieces.append(rook4)\n\n # Knights\n knight1 = Knight('black', (0, 1))\n all_pieces.append(knight1)\n knight2 = Knight('black', (0, 6))\n all_pieces.append(knight2)\n knight3 = Knight('white', (7, 1))\n all_pieces.append(knight3)\n knight4 = Knight('white', (7, 6))\n all_pieces.append(knight4)\n\n # Bishops\n bishop1 = Bishop('black', (0, 2))\n all_pieces.append(bishop1)\n bishop2 = Bishop('black', (0, 5))\n all_pieces.append(bishop2)\n bishop3 = Bishop('white', (7, 2))\n all_pieces.append(bishop3)\n bishop4 = Bishop('white', (7, 5))\n all_pieces.append(bishop4)\n\n # King and Queen\n queen1 = Queen('black', (0, 4))\n all_pieces.append(queen1)\n queen2 = Queen('white', (7, 4))\n all_pieces.append(queen2)\n king1 = King('black', (0, 3))\n all_pieces.append(king1)\n king2 = King('white', (7, 3))\n all_pieces.append(king2)\n\n # Add every single piece to the board. Only then can they update their spaces threatened\n for piece in all_pieces:\n THE_BOARD.update(piece)\n THE_BOARD.update_all_spaces_threatened()", "def __init__(self):\n self.__grid = create_grid(\n Settings.SIZE_X, Settings.SIZE_Y, MarkerType.NONE)\n\n self.__turn = 0\n self.__state = GameState.PLAYING\n self.__winner = MarkerType.NONE\n self.__loser = MarkerType.NONE\n\n # Separate counter for turns, because __turn depends on starting player\n self.__turns_played = 0" ]
[ "0.7926913", "0.749798", "0.741034", "0.7391423", "0.72973835", "0.72447056", "0.7236445", "0.7228515", "0.7197612", "0.7186307", "0.71519154", "0.71053326", "0.70884544", "0.7085167", "0.70822626", "0.70528626", "0.69869745", "0.6971108", "0.6967911", "0.6957216", "0.6941116", "0.6923901", "0.69153506", "0.69038254", "0.6899789", "0.6887272", "0.68858236", "0.6881707", "0.68599755", "0.6853681" ]
0.77943414
1
Calculating all the possible single moves.
def calc_single_moves(self): single_soldier_moves = [(i, j) for (i, j) in SOLDIER_SINGLE_MOVES[self.curr_player] if self.board[i][:1] == SOLDIER_COLOR[self.curr_player] and self.board[j] == EM] single_officer_moves = [(i, j) for (i, j) in OFFICER_SINGLE_MOVES if self.board[i][:1] == OFFICER_COLOR[self.curr_player] and self.board[j] == EM] return single_soldier_moves + single_officer_moves
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exec_all_moves(self,level=0):\n\n capts = [self.op_capture_north, self.op_capture_nwest, self.op_capture_neast, self.op_capture_east, self.op_capture_west]\n jmps = [self.op_jump_north, self.op_jump_nwest, self.op_jump_neast]\n moves = [self.op_move_north,self.op_move_nwest, self.op_move_neast]\n result = []\n ops = []\n\n # Pre-select all operations that may be executed\n if self.next_move == self.FREE:\n capturingStarts, otherStarts = self.possible_capture()\n\n # Check for pieces that may capture\n if len(capturingStarts) > 0:\n self.next_move = self.CAPT\n self.next_pieces = capturingStarts\n else:\n self.next_move = self.FREE\n jmps.extend(moves)\n ops = jmps\n self.next_pieces = otherStarts\n\n elif self.next_move == self.CAPT:\n ops = capts\n elif self.next_move == self.JUMP:\n ops = jmps\n elif self.next_move == self.FREE:\n jmps.extend(moves)\n ops = jmps\n elif self.next_move == self.ADDPIECE_2:\n return self.op_add_piece_bot(self.next_pieces)\n\n # Execute possible operations for all viable pieces\n for pos in self.next_pieces:\n for op in ops:\n newState = op(pos)\n\n # Check if op succeeded\n if newState:\n\n # If the next player is the current player than the function is called recursevely, this is done so that outcomes account for successive plays by the same\n # player ( successive jumps and captures or piece addition)\n if newState.curr_player != self.curr_player:\n result.append(newState)\n else:\n result.extend(newState.exec_all_moves(level+1))\n\n\n return result", "def get_all_moves(self):\n # 2d matrix of true/false, true if something can be placed\n legal_move_board = []\n possible_move_list = []\n for row in range(self.size):\n move_row = []\n for col in range(self.size):\n empty = self.board[row][col].state == PegState.EMPTY\n move_row.append(empty)\n if empty:\n possible_move_list.append((row, col))\n legal_move_board.append(move_row)\n \n # every position where something can be placed (list of tuples) (Combined with above)\n \"\"\" possible_move_list = []\n for row in range(self.size):\n for col in range(self.size):\n if legal_move_board[row][col] == True:\n possible_move_list.append((row, col))\n \"\"\"\n return legal_move_board, possible_move_list", "def get_possible_moves(self):\n moves = []\n for i in range(1, self.current_total + 1):\n if i ** 2 <= self.current_total:\n moves.append(i ** 2)\n\n return moves", "def possible_moves(self, piece):\n def _index(orig, off):\n \"\"\"Helper function to find the new index.\"\"\"\n orig_x, orig_y = orig\n off_x, off_y = off\n return (orig_y - off_y) * self.ncols + (orig_x - off_x)\n\n p_x, p_y = piece\n p_i = _index(piece, (0, 0))\n\n # pass a list of the four corners first for basic possibles\n move_land = [((p_x + i, p_y + j), self.squares[_index(piece, (i, j))])\\\n for i in [-1, 1] for j in [-1, 1]]\n possibles = self.squares[p_i].can_move(piece, move_land)\n\n # next append the new list from jumps\n jump_land = [((p_x + i, p_y + j), self.squares[_index(piece, (i, j))])\\\n for j in [-2, 2] for i in [-2, 2]]\n possibles += self.squares[p_i].can_jump(piece, move_land, jump_land)\n\n # clean out the list of duplicates, although there should be none\n return [m for i, m in enumerate(possibles) if m not in possibles[:i]]", "def get_next_moves1(self):\n moves = []\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n if self.board[i][j] == \"\":\n next_board = copy.deepcopy(self.board)\n next_board[i][j] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n next_turn = get_opponent(self.turn)\n moves.append(DotsAndBoxesState(next_board, next_turn))\n return moves", "def moves(self):\n\n # define a full range, which we can compare against columns,\n # rows, or blocks. they're all the same when stored as sets.\n line = set(range(1, 10))\n moves = []\n\n # iterate every cell on the board\n for row in range(0, 9):\n for col in range(0, 9):\n\n # ignore this cell if it's already filled\n i = self._index(col, row)\n if self.data[i] is not None:\n continue\n\n # fetch the adjacent cells\n row_values = set(self._row(row))\n col_values = set(self._column(col))\n bck_values = set(self._block(col, row))\n\n # subtract the values present in the adjacent cells\n # (since this cell *can't* be of any of those values),\n # to leave the list of possibilities for this cell\n missing = line.difference(row_values, col_values, bck_values)\n\n # if there's only *one* possibility, we've found the\n # solution to this cell\n if len(missing) == 1:\n moves.append((col, row, missing.pop()))\n\n return moves", "def generate_possible_moves(self):\r\n\t\t# Moves:\r\n\t\t# 0 - North\r\n\t\t# 1 - East\r\n\t\t# 2 - South\r\n\t\t# 3 - West\r\n\r\n\t\tmoves = []\r\n\r\n\t\tif self.x != 0:\r\n\t\t\tmoves.append(0)\r\n\t\tif self.y != self.n-1:\r\n\t\t\tmoves.append(1)\r\n\t\tif self.x != self.n-1:\r\n\t\t\tmoves.append(2)\r\n\t\tif self.y != 0:\r\n\t\t\tmoves.append(3)\r\n\r\n\t\treturn moves", "def get_valid_moves(self):\r\n # castling and en-passant rights are stored, because move affects these values\r\n temp_enpassant_possible = self.enpas_pos\r\n temp_castle = CastleRights(self.cr_castle_r.wks, self.cr_castle_r.bks,\r\n self.cr_castle_r.wqs, self.cr_castle_r.bqs)\r\n\r\n # for validating a possible move\r\n #1 all possibile moves are generated\r\n #2 each pos moves are made\r\n #3 generate opponent move\r\n #4 check if any of those moves let the king attacked\r\n #5 moves which let the king in chess are eliminated\r\n #6 the moves are undone\r\n moves = self.get_all_possible_moves() # 1\r\n\r\n # castle moves are directly introduced in valid moves\r\n if not self.turn_white:\r\n self.get_castle_moves(self.bKingPos[0], self.bKingPos[1], moves)\r\n else:\r\n self.get_castle_moves(self.wKingPos[0], self.wKingPos[1], moves)\r\n\r\n for i in range(len(moves) - 1, -1, -1): # 2\r\n self.make_move(moves[i])\r\n # 3 #4\r\n self.turn_white = not self.turn_white\r\n if self.in_check():\r\n moves.remove(moves[i]) # 5\r\n self.turn_white = not self.turn_white\r\n self.undo_move()\r\n\r\n # game ending possibilities\r\n if len(moves) == 0:\r\n if self.in_check():\r\n self.checkMate = True\r\n print(\"Checkmate !\")\r\n else:\r\n self.staleMate = True\r\n print(\"Stalemate !\")\r\n else:\r\n self.checkMate = False\r\n self.staleMate = False\r\n\r\n # the rigths are restored, and the values are not affected\r\n self.enpas_pos = temp_enpassant_possible\r\n self.cr_castle_r = temp_castle\r\n\r\n return moves", "def moves(self) -> List[List[PushState]]:\n # seen_moves = set()\n possible_moves = [[], []]\n\n if state := self.prune_states(self.state): # Could return None\n size = len(self.prune_states(state))\n else:\n return possible_moves\n\n for count, each_slot in enumerate(state):\n # for count, each_slot in reversed(list(enumerate(state))):\n if each_slot == \"L\" and not possible_moves[0]:\n next_state = deepcopy(state)\n next_state = tuple(self.push(list(next_state), count))\n next_state = self.prune_states(next_state)\n\n # if next_state not in seen_moves:\n # seen_moves.add(next_state)\n possible_moves[0].append(next_state)\n\n elif each_slot == \"R\" and not possible_moves[1]:\n next_state = deepcopy(state)\n next_state = tuple(self.push(list(next_state), count))\n next_state = self.prune_states(next_state)\n\n # if next_state not in seen_moves:\n # seen_moves.add(next_state)\n possible_moves[1].append(next_state)\n\n if possible_moves[0] and possible_moves[1]:\n break\n\n return possible_moves", "def getPossibleMoves(self): # called to get possible positions this piece can go\r\n \r\n moves = {}\r\n\r\n ids = []\r\n\r\n for piece in self.board.pieces.values():\r\n if piece.name == \"empty\":\r\n piece.glow = False\r\n piece.ready = False\r\n\r\n self.piece = self\r\n\r\n def check(direction=\"left\", heading=\"north\", x=None, y=None):\r\n piece = self.piece\r\n if direction == \"left\": x -= 50\r\n else: x += 50\r\n\r\n if heading == \"north\": y -= 50\r\n else: y += 50\r\n\r\n if (x, y) in self.board.pieces: # position is empty\r\n empty = self.board.getPiece((x, y))\r\n empty.glow = True\r\n old, new, obj = (direction, heading), (x, y), piece\r\n identity = self.getRandomID(ids) # get an ID for the move\r\n moves[identity] = old, new, obj\r\n\r\n if piece.isKing: # piece is a king, so go on\r\n check(direction, heading, x, y)\r\n else: # its not empty, so check if its comrade\r\n x1, y1 = x+25, y+25\r\n piece2 = self.board.getPiece((x1, y1))\r\n try:\r\n if piece.isComrade(piece2):# piece is comrade so return\r\n return\r\n else: # piece is not comrade, so check empty\r\n if direction == \"left\": x2 = x1-25-50\r\n else: x2 = x1-25+50\r\n\r\n if heading == \"north\": y2 = y1-25-50\r\n else: y2 = y1-25+50\r\n\r\n if (x2, y2) in self.board.pieces: # its empty, so notify player\r\n empty = self.board.getPiece((x2, y2))\r\n empty.glow = True\r\n empty.ready = True\r\n\r\n old, new, obj = (direction, heading), (x2, y2), piece2\r\n identity = self.getRandomID(ids)\r\n moves[identity] = old, new, obj\r\n\r\n check(direction, heading, piece2.x-25, piece2.y-25)\r\n check(direction, heading, x2, y2)\r\n \r\n # check empty or comrade again\r\n if direction == \"left\": x3 = x2-50\r\n else: x3 = x2+50\r\n\r\n if heading == \"north\": y3 = y2-50\r\n else: y3 = y2+50\r\n\r\n if (x3, y3) in self.board.pieces: # positon(address) is empty\r\n return\r\n else: # there is a piece, so check if comrade, stop, if not comrade continue\r\n x3+=25\r\n y3+= 25\r\n\r\n piece3 = self.board.getPiece((x3, y3))\r\n if piece3.isComrade(piece2): # comrades, so stop\r\n return\r\n else: # not comrades, so continue\r\n self.piece = piece3\r\n check(direction, heading, x, y)\r\n\r\n #self.piece = piece2\r\n \r\n #check(direction, heading, x2, y2) # keep searching\r\n else: # its not empty, so return\r\n return\r\n except:\r\n pass\r\n\r\n if self.piece.name == \"white\": direction = \"north\"\r\n else: direction = \"south\"\r\n \r\n check(\"left\", direction, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", direction, self.piece.x-25, self.piece.y-25)\r\n \r\n if self.piece.isKing:\r\n if self.piece.name == \"white\": heading = \"south\"\r\n else: heading = \"north\"\r\n \r\n check(\"left\", heading, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", heading, self.piece.x-25, self.piece.y-25)\r\n\r\n if self.piece.name == \"white\":\r\n eatMoves = self.board.game.thinkEatMoves(moves, \"person\")\r\n if eatMoves is not None:\r\n return eatMoves\r\n\r\n return moves", "def solve (M, cpos, move): \n if move == 64:\n print (\"\\n\\nmove: \", move)\n print (\"sum: \", sum(M))\n pprint (M)\n #exit()\n for next in get_moves(cpos, M):\n solve(ulist(M, next, move+1), next, move+1)", "def get_possible_moves(board):\n\n possible_moves = []\n\n ret_tuple_left = move_left(board)\n ret_tuple_right = move_right(board)\n ret_tuple_up = move_up(board)\n ret_tuple_down = move_down(board)\n\n if ret_tuple_left[0]:\n possible_moves.append(ret_tuple_left[1])\n if ret_tuple_right[0]:\n possible_moves.append(ret_tuple_right[1])\n if ret_tuple_up[0]:\n possible_moves.append(ret_tuple_up[1])\n if ret_tuple_down[0]:\n possible_moves.append(ret_tuple_down[1])\n\n return possible_moves", "def get_possible_moves(self, board: np.ndarray):\n board_size = board.shape[0]\n moves = []\n if abs(self.value) == 1:\n if self.start_row <= 2:\n directions = [np.array((1, -1)), np.array((1, 1))]\n else:\n directions = [np.array((-1, 1)), np.array((-1, -1))]\n else:\n directions = [np.array((-1, 1)), np.array((1, 1)), np.array((-1, -1)), np.array((1, -1))]\n for direction in directions:\n within_board = True\n i = 1\n while within_board:\n coord = self.coord + direction * i\n within_board = _check_if_position_on_board(coord, board_size)\n # break if first step is already out of board\n if not within_board:\n break\n value_board = board[coord[0], coord[1]]\n # break if there is a stone of them same player in the way\n if value_board < 0 and self.value < 0 or value_board > 0 and self.value > 0:\n break\n # if there is no stone, than add this to move list.\n if value_board == 0:\n moves += [{\"old_coord\": self.coord, \"new_coord\": coord, \"jumped_stones\": [], \"jumped_values\": 0,\n \"move_coords\": [coord]}]\n # if there is a stone of the enemy\n if (value_board < 0 < self.value) or (self.value < 0 < value_board):\n # check if it can be jumped\n coord_jump = coord + direction\n move_coords = [coord_jump.copy()]\n within_board_after_jump = _check_if_position_on_board(coord_jump, board_size)\n # break if place behind stone is out of border\n if not within_board_after_jump:\n break\n value_board_jump = board[coord_jump[0], coord_jump[1]]\n jumped_stones = []\n # break if there is no free place\n if value_board_jump != 0:\n break\n jumped_stones += [coord]\n moves_tmp = self.jump_chain(directions, board, coord_jump, value_board, jumped_stones, move_coords)\n if len(moves_tmp) > 0:\n moves += moves_tmp\n else:\n moves += [{\"old_coord\": self.coord, \"new_coord\": coord_jump, \"jumped_stones\": jumped_stones,\n \"jumped_values\": abs(value_board), \"move_coords\": [coord_jump]}]\n i += 1\n # break if normal stone, because they can only move one field\n if abs(self.value) == 1:\n break\n return moves", "def possibleMoves(self,i,j):\n piece = self.board[i][j].piece\n if(piece.pieceCode == \"None\"):\n return []\n \n if(piece.name == \"pawn\"):\n return self.pawnMoves(piece,self.board)\n elif(piece.name == \"king\"):\n return self.kingSteps(self.board,piece.color)\n else:\n return self.pieceMoves(piece,self.board)", "def get_all_moves(self, board, player):\n result = []\n for startx in range(8):\n for starty in range(8):\n for destx in range(8):\n for desty in range(8):\n if self.is_legal_move(board, [startx, starty], [destx, desty], player):\n result.append([[startx, starty], [destx, desty]])\n return result", "def get_moves(self):", "def get_all_possible_moves(self):\r\n moves = []\r\n for i in range(8):\r\n for j in range(8):\r\n color = self.board[i][j][0]\r\n if (color == 'b' and not self.turn_white) or (color == 'w' and self.turn_white):\r\n p_type = self.board[i][j][1]\r\n if p_type == 'r':\r\n self.get_rook_moves(i, j, moves)\r\n elif p_type == 'k':\r\n self.get_king_moves(i, j, moves)\r\n elif p_type == 'q':\r\n self.get_queen_moves(i, j, moves)\r\n elif p_type == 'p':\r\n self.get_pawn_moves(i, j, moves)\r\n elif p_type == 'b':\r\n self.get_bishop_moves(i, j, moves)\r\n elif p_type == 'n':\r\n self.get_knight_moves(i, j, moves)\r\n return moves", "def moves(self):\n\n moves = list()\n\n for row in range(HEIGHT):\n for col in range(WIDTH):\n\n move = (row, col)\n\n if self.board[row][col] == 9:\n moves.append(move)\n\n if self.board[row][col] == 1 or self.board[row][col] == 2:\n\n move = (row - 1, col)\n\n if self.board[row - 1][col] == 1 or self.board[row - 1][col] == 2:\n\n pass\n\n else:\n\n moves.append(move)\n\n return moves", "def get_all_possible_moves(self, state):\n move_list = []\n done_finding_moves = False\n any_non_pass_moves = False\n while not done_finding_moves:\n try:\n m = next(self.move_generator) # Gets a (move, state) pair.\n # print(\"next returns: \",m[0]) # Prints out the move. For debugging.\n if m[0] != 'p':\n any_non_pass_moves = True\n move_list.append(m) # Add the move to the list.\n except StopIteration as e:\n done_finding_moves = True\n if not any_non_pass_moves:\n move_list.append(('p',state))\n return move_list", "def get_best_moves():\n game_data = set()\n\n def next_move(game):\n winner = game.get_winner()\n if winner is not None:\n return {winner}\n next_wins = set()\n future_wins = set()\n future_draws_and_wins = set()\n future_draws = set()\n results = set()\n for row, col in game.valid_spaces():\n next_game = game.move(row, col)\n next_results = next_move(next_game)\n results |= next_results\n if game.turn == 2 and next_game.get_winner() == 2:\n # If Player 2 can win with their move, reject this line\n return {2}\n elif game.turn == 1 and next_game.get_winner() == 1:\n # If Player 1 can win with their move, only accept these lines\n next_wins.add((tuple([tuple(row) for row in game.board]), row * 3 + col))\n elif game.turn == 1:\n if next_results == {1}:\n # Player 1 will only win in this future\n future_wins.add((tuple([tuple(row) for row in game.board]), row * 3 + col)) \n elif next_results == {0, 1}:\n # Player 1 could win or draw in this future\n future_draws_and_wins.add((tuple([tuple(row) for row in game.board]), row * 3 + col))\n elif next_results == {0}:\n # Player 1 could only draw in this future\n future_draws.add((tuple([tuple(row) for row in game.board]), row * 3 + col))\n # We only accept the draws if we don't have any just wins\n if game.turn == 2:\n return results\n\n if next_wins:\n game_data.update(next_wins)\n return {1}\n elif future_wins:\n game_data.update(future_wins)\n return {1}\n elif future_draws_and_wins:\n game_data.update(future_draws_and_wins)\n return {0, 1}\n elif future_draws:\n game_data.update(future_draws)\n return {0}\n return set()\n\n next_move(TTTGame())\n next_move(TTTGame(turn=2))\n return tuple(game_data)", "def actions(board):\n avail_moves = set()\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == EMPTY:\n avail_moves.add((i,j))\n \n if len(avail_moves) == 0:\n return 0\n\n return avail_moves", "def moves(self, board_state):\n # pos_moves = generate_moves(board_state) # Naive moves function here\n blacks = board_state.search_board('B')\n # Generate the possible moves required to kill the first black piece\n # on the board\n pos_moves = sorted_generate_moves_piece(board_state, blacks[0])\n return pos_moves", "def get_next_moves(board, player):\r\n\r\n if player == 'hare':\r\n moves = []\r\n next_moves = []\r\n\r\n (row_from, col_from) = get_hare_positions(board)\r\n moves = possible_moves_list(row_from, col_from)\r\n\r\n for move in moves:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_from, col_from, row_to, col_to):\r\n \"\"\" if move is allowed then add to list of next moves\"\"\"\r\n next_moves.append(move)\r\n\r\n return next_moves\r\n\r\n else:\r\n \"\"\" for individual hounds\r\n get next moves\"\"\"\r\n moves = []\r\n next_moves_hound1 = []\r\n next_moves_hound2 = []\r\n next_moves_hound3 = []\r\n\r\n (row_hound_1, col_hound_1), (row_hound_2, col_hound_2), (row_hound_3, col_hound_3) = get_hound_positions(board)\r\n moves_hound1 = possible_moves_list(row_hound_1, col_hound_1)\r\n moves_hound2 = possible_moves_list(row_hound_2, col_hound_2)\r\n moves_hound3 = possible_moves_list(row_hound_3, col_hound_3)\r\n\r\n for move in moves_hound1:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_1, col_hound_1, row_to, col_to):\r\n next_moves_hound1.append(move)\r\n\r\n for move in moves_hound2:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_2, col_hound_2, row_to, col_to):\r\n next_moves_hound2.append(move)\r\n\r\n for move in moves_hound3:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_3, col_hound_3, row_to, col_to):\r\n next_moves_hound3.append(move)\r\n\r\n return (next_moves_hound1, next_moves_hound2, next_moves_hound3)", "def find_next_moves(self):\n # iterate through all cells, and group them with upper cells and left\n # cells\n\n # generate separated cells then merge the them with same neighbours\n matrix_rows = len(self.status)\n if matrix_rows == 0:\n matrix_cols = 0\n else:\n matrix_cols = len(self.status[0])\n matrix = []\n for i in range(matrix_rows):\n matrix.append([[(i, j)] for j in range(matrix_cols)])\n # merge coordinations\n for i in range(matrix_rows):\n for j in range(matrix_cols):\n if self.status[i][j] != '':\n # is same with right cell?\n if j < matrix_cols - 1 and self.status[i][j] == self.status[i][j + 1]:\n new_item = matrix[i][j] + matrix[i][j + 1]\n matrix[i][j] = matrix[i][j + 1] = new_item\n # is same with down cell?\n if i < matrix_rows - 1 and self.status[i][j] == self.status[i + 1][j]:\n new_item = matrix[i][j] + matrix[i + 1][j]\n matrix[i][j] = matrix[i + 1][j] = new_item\n\n # filter out all unvalid results\n result = []\n # filter out all single-cell groups\n for i in range(matrix_rows):\n for j in range(matrix_cols):\n if (len(matrix[i][j]) > 1 and\n matrix[i][j] not in result):\n result.append(matrix[i][j])\n\n # filter sublists\n result = sorted(result, key=len, reverse=True)\n changed = True\n while changed:\n changed = False\n for i in range(len(result)):\n for j in range(i + 1, len(result)):\n if set(result[i]).issuperset(set(result[j])):\n result.remove(result[j])\n changed = True\n break\n if changed:\n break\n\n if result:\n for i in result:\n yield (self.convert_coordinations(i),\n len(i) * len(i) * 5,\n self.calc_new_status(i))\n else:\n left_cells = sum([len(i) - i.count('') for i in self.status])\n left_cells_score = 2000 - 20 * left_cells * left_cells\n if left_cells_score < 0:\n left_cells_score = 0\n for i in self.parents:\n i.children[self] = [(i.children[self][0][0] + left_cells_score,\n i.children[self][0][1],\n i.children[self][0][2])]", "def possible_moves(self): \n return [a + 1 for a, b in enumerate(self.board) if b == 0]", "def generate_possible_moves_single_gp(self, game):\n for color in game.paths:\n path = game.paths[color]\n if path.is_complete():\n continue\n gp1, gp2 = path.get_grow_points()\n adj2gp1 = utils.get_adjacent_points(gp1)\n for possible in adj2gp1:\n # print \"Attempting\"\n # print game\n # print possible, path.can_be_added_to_path(possible, 1)\n if path.can_be_added_to_path(possible, 1):\n copy_game = deepcopy(game)\n \"\"\":type: Flow\"\"\"\n copy_game.paths[color].add_to_path(possible, 1)\n self.queue.append(copy_game)\n # print \"after attempt\"\n # print copy_game", "def get_legal_moves(self):\n # for each square in the castle figure out if an moves can occur from it.\n moves = []\n allowed = [self.turn]\n if self.turn == DEFENDER:\n allowed.extend((KING, CASTLE_OCCUPIED))\n it = np.nditer(self.board_state, flags=['multi_index'])\n while not it.finished:\n index = it.multi_index\n curr_loc = it[0]\n if curr_loc in allowed:\n moves.extend(self.get_legal_move_piece(curr_loc, index))\n it.iternext()\n return moves", "def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move", "def run_each_move(root: State, state: GameState) -> State:\n for m, base_move in enumerate(root.moves):\n for _ in range(50):\n base_move = expand(base_move)\n outcome, _ = MCTS(base_move, state, False, 0)\n if outcome == state.player:\n base_move.winner()\n else:\n base_move.loser()\n root.attempts += 1\n root.moves[m] = base_move\n print(\"Move: \", base_move.idx)\n print(\" Wins: \", base_move.wins)\n update_ucbs(root)\n return root", "def get_move(self, board, possible_moves):\n next_move = None\n max_score = -float('Inf')\n self.start_time = datetime.now()\n for depth in range(2,3): # iterative deepening\n try:\n for move in possible_moves:\n board_copy = deepcopy(board)\n self.man.play_move(board_copy, move, self.color)\n score = self.minimaxm(depth, board, False)\n if score > max_score:\n max_score = score\n next_move = move\n\n except TimeoutError:\n print(\"ran out of time\")\n break\n return next_move" ]
[ "0.697692", "0.672974", "0.6712475", "0.66967314", "0.66270834", "0.66235846", "0.66208565", "0.6607919", "0.65993315", "0.65738255", "0.65693104", "0.65388745", "0.653464", "0.64876544", "0.64746815", "0.64696103", "0.64522076", "0.6426022", "0.641516", "0.63979876", "0.6394314", "0.6392142", "0.6381418", "0.6381379", "0.6355259", "0.6345586", "0.6301897", "0.6282665", "0.6280442", "0.6276055" ]
0.6921761
1
Calculating all the possible capture moves, but only the first step.
def calc_capture_moves(self): capture_soldier_moves = [(i, j, k) for (i, j, k) in SOLDIER_CAPTURE_MOVES[self.curr_player] if self.board[i][:1] == SOLDIER_COLOR[self.curr_player] and self.board[j][:1] in OPPONENT_COLORS[self.curr_player] and self.board[k][:1] == EM] capture_officer_moves = [(i, j, k) for (i, j, k) in OFFICER_CAPTURE_MOVES if self.board[i][:1] == OFFICER_COLOR[self.curr_player] and self.board[j][:1] in OPPONENT_COLORS[self.curr_player] and self.board[k][:1] == EM] return capture_soldier_moves + capture_officer_moves
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_possible_moves(self):\n possible_capture_moves = self.calc_capture_moves()\n if possible_capture_moves:\n # There is at least one capture move. Let's DFS them!\n self_curr_player = self.curr_player\n next_moves = []\n for capture_move in possible_capture_moves:\n if self.board[capture_move[0]][:1] == SOLDIER_COLOR[self_curr_player]:\n next_moves += self.find_following_moves(capture_move, SOLDIER_CAPTURE_MOVES_FROM[self_curr_player])\n else:\n next_moves += self.find_following_moves(capture_move, OFFICER_CAPTURE_MOVES_FROM)\n\n return next_moves\n\n # There were no capture moves. We return the single moves.\n return self.calc_single_moves()", "def enumerate_moves(self):\n add_ew = lambda x: [x+'e', x+'w']\n allowed_catches = add_ew(self._directions[0])\n moves = []\n # First add the one/two step forward moves\n new_slot = self._board.get_dir(self._current_space, self._directions[0])\n if new_slot and new_slot.is_free():\n moves.append(ChessMove(self._current_space, new_slot))\n if (self._side == BLACK and new_slot.row == self._board.size - 1) or \\\n (self._side == WHITE and new_slot.row == 0):\n moves[-1].add_promotion()\n if (self._side == BLACK and self._current_space.row == 1) or \\\n (self._side == WHITE and self._current_space.row == self._board.size -2):\n new_slot = self._board.get_dir(new_slot, self._directions[0])\n if new_slot and new_slot.is_free():\n moves.append(ChessMove(self._current_space, new_slot))\n\n # Now add all the captures.\n for direction in allowed_catches:\n new_slot = self._board.get_dir(self._current_space, direction)\n if new_slot and new_slot.has_opponent(self._side):\n moves.append(ChessMove(self._current_space, new_slot, [new_slot]))\n if (self._side == BLACK and new_slot.row == self._board.size - 1) or \\\n (self._side == WHITE and new_slot.row == 0):\n moves[-1].add_promotion()\n return moves", "def move(self):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception \n if self.destination is None:\n return array([0,0])\n\n if not self.awake:\n return array([0,0])\n\n\n if self.phase == 4 and self.proper_formation is not None:\n no_go = []\n for i in range(0,len(self.proper_formation)):\n if i != self.order and self.proper_formation[i][0] == self.proper_formation[self.order][0]:\n no_go.append(self.transform(self.proper_formation[i][1] - self.position))\n pos = merge_array_lists(pos, no_go)\n\n if self.phase == 2:\n point = self.destination.copy() - self.position\n elif self.phase > 2:\n point = self.transform(self.destination.copy() - self.position)\n else:\n point = self.destination.copy()\n\n if not array_equal(point, array([0,0])):\n reachable, path = findpathtoclosest(array([0,0]), point, pos)\n \n if len(path) == 0:\n move = array([0,0]) \n else:\n move = path[0]\n if not reachable and not array_equal(move,array([0,0])):\n if self.phase == 2:\n self.closest_i_could_get = path[-1] + self.position\n elif self.phase > 2:\n self.closest_i_could_get = self.transform2(path[-1]) + self.position\n else:\n self.closest_i_could_get = path[-1]\n elif not reachable:\n if self.phase > 1:\n self.closest_i_could_get = self.position\n else:\n self.closest_i_could_get = array([0,0])\n else:\n self.closest_i_could_get = None\n\n if reachable and self.phase == 4 and array_equal(move,array([0,0])):\n move = self.randomStep()\n self.closest_i_could_get = None\n\n else:\n move = array([0,0])\n self.closest_i_could_get = None\n\n return move", "def get_next_moves1(self):\n moves = []\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n if self.board[i][j] == \"\":\n next_board = copy.deepcopy(self.board)\n next_board[i][j] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n next_turn = get_opponent(self.turn)\n moves.append(DotsAndBoxesState(next_board, next_turn))\n return moves", "def exec_all_moves(self,level=0):\n\n capts = [self.op_capture_north, self.op_capture_nwest, self.op_capture_neast, self.op_capture_east, self.op_capture_west]\n jmps = [self.op_jump_north, self.op_jump_nwest, self.op_jump_neast]\n moves = [self.op_move_north,self.op_move_nwest, self.op_move_neast]\n result = []\n ops = []\n\n # Pre-select all operations that may be executed\n if self.next_move == self.FREE:\n capturingStarts, otherStarts = self.possible_capture()\n\n # Check for pieces that may capture\n if len(capturingStarts) > 0:\n self.next_move = self.CAPT\n self.next_pieces = capturingStarts\n else:\n self.next_move = self.FREE\n jmps.extend(moves)\n ops = jmps\n self.next_pieces = otherStarts\n\n elif self.next_move == self.CAPT:\n ops = capts\n elif self.next_move == self.JUMP:\n ops = jmps\n elif self.next_move == self.FREE:\n jmps.extend(moves)\n ops = jmps\n elif self.next_move == self.ADDPIECE_2:\n return self.op_add_piece_bot(self.next_pieces)\n\n # Execute possible operations for all viable pieces\n for pos in self.next_pieces:\n for op in ops:\n newState = op(pos)\n\n # Check if op succeeded\n if newState:\n\n # If the next player is the current player than the function is called recursevely, this is done so that outcomes account for successive plays by the same\n # player ( successive jumps and captures or piece addition)\n if newState.curr_player != self.curr_player:\n result.append(newState)\n else:\n result.extend(newState.exec_all_moves(level+1))\n\n\n return result", "def solveOneStep(self):\n ### Student code goes here\n if self.first_step == False:\n self.first_step = True\n if self.solveOneStep():\n return True\n if self.queue:\n self.gm_init()\n ele = self.queue.get()\n #print (len(ele))\n state = ele[0]\n premoves = ele[1]\n\n for m in premoves:\n self.gm.makeMove(m)\n if state.state == self.victoryCondition:\n return True\n self.visited[state] = True\n print(\"CURRENTSTATE:\")\n print(self.gm.getGameState())\n print(\"*******\")\n moves = self.gm.getMovables()\n for m in moves:\n self.gm.makeMove(m)\n if (((state.parent is not None) and (self.gm.getGameState() == state.parent.state))) or GameState(self.gm.getGameState(), 0, None) in self.visited:\n self.gm.reverseMove(m)\n continue\n self.visited[GameState(self.gm.getGameState(), 0, None)] = True\n new_pmv = [i for i in premoves]\n new_pmv.append(m)\n next_state = GameState(self.gm.getGameState(), state.depth+1, m)\n next_state.parent = state\n state.children.append(next_state)\n self.queue.put([next_state, new_pmv])\n self.gm.reverseMove(m)\n self.currentState = state\n\n #for i in range(len(premoves)-1, -1, -1):\n # mv = premoves[i]\n # self.gm.reverseMove(mv)\n return False", "def findPlacesToMove():\n movesDestinations = [];\n \n curY = curBlank[0];\n curX = curBlank[1];\n\n if(curY-1 >= 1): #UP\n movesDestinations.append((curY-1, curX));\n if(curY+1 <= n): #DOWN\n movesDestinations.append((curY+1, curX));\n if(curX-1 >= 1): #LEFT\n movesDestinations.append((curY, curX-1));\n if(curX+1 <= n): #RIGHT\n movesDestinations.append((curY, curX+1));\n \n return movesDestinations;", "def calculate_next_move(self, visit):\n self.depth += 1\n new_boards = []\n for vehicle_id in range(len(self.vehicles)):\n vehicle = self.vehicles[vehicle_id]\n state = self.get_board()\n if vehicle.orientation == 0: #horizontal\n if vehicle.x > 0: #left\n if state[vehicle.y][vehicle.x-1] == \"..\":\n self.vehicles[vehicle_id].x -=1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x += 1\n\n if vehicle.x + vehicle.length <= (len(state)-1): #right\n if state[vehicle.y][vehicle.x+vehicle.length] == \"..\":\n self.vehicles[vehicle_id].x += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x -= 1\n\n else: #vertical\n if vehicle.y - 1 >= 0: #up\n if state[vehicle.y-1][vehicle.x] == \"..\":\n self.vehicles[vehicle_id].y -= 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y += 1\n\n if vehicle.y + vehicle.length <= (len(state)-1):\n if state[vehicle.y + vehicle.length][vehicle.x] == \"..\":#down\n self.vehicles[vehicle_id].y += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y -= 1\n self.depth -= 1\n return new_boards", "def decide_next_move(self):\n pass", "def find_following_moves(self, capture_move, move_privilege):\n # Temporarily changing the board, simulating the move and checking if there are more to follow.\n floating_piece = self.board[capture_move[1]]\n self.board[capture_move[1]] = EM\n self.board[capture_move[2]] = self.board[capture_move[0]]\n self.board[capture_move[0]] = EM\n\n next_moves = [(i, j, k) for (i, j, k) in move_privilege[capture_move[2]]\n if self.board[j][:1] in OPPONENT_COLORS[self.curr_player]\n and self.board[k][:1] == EM]\n\n def return_back_pieces():\n # Returning the board to its previous state\n self.board[capture_move[1]] = floating_piece\n self.board[capture_move[0]] = self.board[capture_move[2]]\n self.board[capture_move[2]] = EM\n\n if not next_moves:\n # This was the final move in a series of moves\n return_back_pieces()\n return [capture_move]\n\n possible_next_moves = []\n for next_move in next_moves:\n for move in self.find_following_moves(next_move, move_privilege):\n possible_next_moves.append(capture_move + move[1:])\n\n return_back_pieces()\n return possible_next_moves", "def firstMove(self):\n return (10, 10)", "def get_next_moves2(self):\n moves = []\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n if self.board[i][j] == \"\" and self.move_makes_box(i, j):\n next_board = copy.deepcopy(self.board)\n next_board[i][j] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n next_turn = get_opponent(self.turn)\n moves.append(DotsAndBoxesState(next_board, next_turn))\n return moves", "def moves(self) -> List[List[PushState]]:\n # seen_moves = set()\n possible_moves = [[], []]\n\n if state := self.prune_states(self.state): # Could return None\n size = len(self.prune_states(state))\n else:\n return possible_moves\n\n for count, each_slot in enumerate(state):\n # for count, each_slot in reversed(list(enumerate(state))):\n if each_slot == \"L\" and not possible_moves[0]:\n next_state = deepcopy(state)\n next_state = tuple(self.push(list(next_state), count))\n next_state = self.prune_states(next_state)\n\n # if next_state not in seen_moves:\n # seen_moves.add(next_state)\n possible_moves[0].append(next_state)\n\n elif each_slot == \"R\" and not possible_moves[1]:\n next_state = deepcopy(state)\n next_state = tuple(self.push(list(next_state), count))\n next_state = self.prune_states(next_state)\n\n # if next_state not in seen_moves:\n # seen_moves.add(next_state)\n possible_moves[1].append(next_state)\n\n if possible_moves[0] and possible_moves[1]:\n break\n\n return possible_moves", "def move(self, state):\n result = None\n self.currentDepthLimit = 0\t\n\tself.transposition = {}\n\tself.counter = 0\n\n\twhile True:\n u = float(\"inf\")\n\t v = float(\"-inf\")\n\t self.counter = 0\n\t result = None\n\t self.transposition = {}\n\t for a in state.actions():\n new = self.min_value(state.result(a), float(\"-inf\"), float(\"inf\"),self.currentDepthLimit)\n\t if new > v:\n\t v = new\n\t result = a\n\n\t elif new == v:\n\t if a.index < result.index:\n\t result = a\n\t if self.is_time_up():\n\t return result\n\t \n\t self.currentDepthLimit += 1\n\t \"\"\"If we never use evaluate function, it means all state are terminated, so return whatever the result is\"\"\"\n\t if self.counter == 0:\n\t break\n\t if self.is_time_up():\n \t return result\n\treturn result", "def move_replay(self) -> Generator[List[List[str]], None, None]:\n current_moves = OrderedDict()\n for move in self.moves:\n current_moves[move] = self.moves[move]\n yield self.compile_board(current_moves)", "def get_move(self, board, possible_moves):\n next_move = None\n max_score = -float('Inf')\n self.start_time = datetime.now()\n for depth in range(2,3): # iterative deepening\n try:\n for move in possible_moves:\n board_copy = deepcopy(board)\n self.man.play_move(board_copy, move, self.color)\n score = self.minimaxm(depth, board, False)\n if score > max_score:\n max_score = score\n next_move = move\n\n except TimeoutError:\n print(\"ran out of time\")\n break\n return next_move", "def step(self, action):\n reward = 0\n pose_all = []\n self.rotor_speeds = np.array([action]*4)\n for _ in range(self.action_repeat):\n done = self.sim.next_timestep(self.rotor_speeds) # update the sim pose and velocities\n reward += self.get_reward()\n pose_all += [self.sim.pose]\n if self.sim.crashed:\n reward = -5\n done = True\n #if (np.square(self.sim.pose[:3] - self.target_pos)).sum() < 1: # Close enough!\n #done = True\n next_state = np.concatenate(pose_all)\n return next_state, reward, done", "def step(self, move):", "def movee(self):\n\n #return the initial state if he cant move and he's in the initial state\n if not self.move and self.index == 0:\n return self.path[self.index]\n\n #return the goal state if he's at the goal state\n if self.index == len(self.path):\n return self.path[-1]\n\n #return the next move and increments the index attribute\n nextMove = self.path[self.index]\n self.index += 1\n\n return nextMove", "def step(self, action):\n if np.abs(action[0][0]-action[1][0])==2:\n self.board[(action[0][0]+action[1][0])//2,(action[0][1]+action[1][1])//2]=0\n self.board[action[1]] = self.board[action[0]] \n if action[1][0]==0 or action[1][0]==7:\n self.board[action[1]] = 2*np.sign(self.board[action[0]])\n self.board[action[0]] = 0\n self.turn = (self.turn + 1)%2\n self.actions=[]\n for i in range(8):\n for j in range(8):\n if np.sign(self.board[i,j])==(-1)**self.turn:\n moves=(self.bdiag(i,j),self.badiag(i,j),self.fdiag(i,j),self.fadiag(i,j))\n for r in range(4):\n if moves[r] is not None:\n self.actions.append(moves[r])\n winner = self.winner(action)\n if winner is not None:\n rewards = np.array([winner,(-1)*winner])\n else:\n rewards = np.array([0,0])\n self.done = winner is not None\n return self.board.copy(), rewards, self.done, self.turn", "def decide_move(self, game_state):\n # Get all possible moves\n valid_pos = game_state.get_valid_positions(game_state.pacs_pos[self.pac_id], 'pac')\n # Get the value of the expression tree for each possible move.\n # Feed the calculator the values of G, P, W, F, M instead of\n # recalculating those values each time we hit them in the tree.\n valid_pos_vals = [ self.tree.root.calc([game_state.G(pos),\n game_state.P(pos),\n game_state.W(pos),\n game_state.F(pos),\n game_state.M(pos, pac_id = self.pac_id)]) \\\n for pos in valid_pos ]\n # Find the index of the highest-valued move\n new_pos_idx = valid_pos_vals.index(max(valid_pos_vals))\n # Set the next move\n self.next_move = valid_pos[new_pos_idx]", "def _sequence(game_record):\n seq = []\n for item in game_record.get_main_sequence():\n color, move = item.get_move()\n # color == None is entries that are not actual game play\n # move == None is a pass, which in theory we could try to\n # predict, but not yet\n if color is not None and move is not None:\n seq.append((color, move))\n return seq", "def make_move(self, move_to_play, color_to_move, return_capture=False):\r\n captures = 0\r\n if move_to_play == 'PASS':\r\n board_copy = Board(self.state, self.previous_state, self.to_move)\r\n if self.to_move == 1:\r\n board_copy.to_move = 2\r\n else:\r\n board_copy.to_move = 1\r\n if return_capture:\r\n return board_copy, captures\r\n else:\r\n return board_copy\r\n\r\n current_state = np.array(self.state)\r\n ptemp_state = np.array(current_state)\r\n\r\n for p in ORTHOGONAL_POSITIONS[move_to_play]:\r\n if self.board[p[0]][p[1]].chain_liberty == 1 and self.board[p[0]][p[1]].color != color_to_move:\r\n captures += len(self.chains[(self.board[p[0]][p[1]].chain_num, self.board[p[0]][p[1]].color)])\r\n current_state = self.remove_chain(self.board[p[0]][p[1]].chain_num, self.board[p[0]][p[1]].color,\r\n current_state)\r\n\r\n elif self.board[p[0]][p[1]].liberty == 1 and self.board[p[0]][p[1]].color != color_to_move:\r\n captures += 1\r\n current_state[p[0]][p[1]] = 0\r\n\r\n current_state[move_to_play[0]][move_to_play[1]] = color_to_move\r\n if color_to_move == 1:\r\n temp_board = Board(current_state, ptemp_state, 2)\r\n else:\r\n temp_board = Board(current_state, ptemp_state, 1)\r\n if return_capture:\r\n return temp_board, captures\r\n else:\r\n return temp_board", "def get_tiger_capturing_moves(self) -> List[tuple]:\n tuples: List[tuple] = []\n for pos in self.get_all_tiger_positions():\n for landing_pos, goat_pos in pos.piece._get_capturing_positions():\n tuples.append((landing_pos, goat_pos))\n\n return tuples", "def get_all_valid_moves(self, player):\n moves = [] # Stores the possible moves\n capture_move_exists = False # Indicates if a capturing move is possible\n\n for piece in self.get_all_pieces(player):\n valid_moves = self._get_valid_moves(piece)\n\n for move, skip in valid_moves.items():\n moves.append([(piece.row, piece.col), move, skip])\n\n if len(skip) > 0:\n # Checks if there is a move that can capture a piece\n capture_move_exists = True\n\n if capture_move_exists:\n # Only gets the capturing moves if there is one\n eating_moves = []\n for move in moves:\n if len(move[2]) != 0:\n eating_moves.append(move)\n\n moves = eating_moves\n\n return moves", "def choose_move(self):\n return 0", "def next(self):\n # first, am i legal? Do I exit the bounds of 3x3x3, or do i intersect an\n # already filled block?\n\n piece_len = pieces[self.index]\n final_pos = tuple(self.pos[i] + (self.direction[i] * piece_len) for i in range(3))\n \n # the only values should be 0,1,2\n if not all(0 <= val <= 2 for val in final_pos):\n return []\n \n # next, lets update fill_state, checking that its not already filled\n for i in range(piece_len):\n self.pos = tuple(self.pos[i] + self.direction[i] for i in range(3))\n # check that the currnt value is 0\n if self.fill_state.get(self.pos):\n return []\n # mark the box as filled\n self.fill_state[self.pos] = True\n \n # And if we made it this far, we know now that the Step is valid\n\n # sanity check that we're where we should be\n assert final_pos == self.pos\n\n # Next we need to find the four directions we can turn\n next_steps = []\n for i in range(len(self.direction)):\n # if we're moving in this direction (pos or neg), we can't turn that way\n if self.direction[i] != 0:\n continue\n for posneg in [1, -1]:\n direction = [0, 0, 0]\n direction[i] = posneg\n next_steps.append(\n Step(self.index+1, final_pos, direction, self.fill_state.copy(), self)\n )\n\n return next_steps", "def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n while to_move < len(movables):\n # Make the move\n movable_statement = movables[to_move]\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n\n # Find out if this state has already been explored\n visited = False\n for visited_state in self.visited.keys():\n if visited_state.state == new_state:\n visited = True\n\n # If the new state hasn't been visited then add it as a child then move down to this child\n if not visited:\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.currentState = new_gs\n break\n\n # Else skip this state and try going to the next movable statement\n else:\n # print(\"SKIP THIS STATE\")\n self.gm.reverseMove(movable_statement)\n to_move += 1\n\n # Went all the way down to a leaf, backtrack\n if (to_move >= len(movables)):\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n\n return False", "def op_capture_postconditions(self,oldPieceCoords,newPieceCoords,capturedPieceCoords):\n\n # Start of new state constrution\n next_gs_board = Board.from_binary_matrix(self.board)\n next_gs_board.set_element(newPieceCoords[0], newPieceCoords[1], self.curr_player)\n next_gs_board.remove_element(oldPieceCoords[0], oldPieceCoords[1])\n next_gs_board.remove_element(capturedPieceCoords[0], capturedPieceCoords[1])\n next_gs_next_player = self.curr_player\n next_gs_next_move = self.FREE\n next_gs_next_pieces = set()\n\n new_gs = Eximo(next_gs_next_player,next_gs_next_move,next_gs_next_pieces,next_gs_board)\n\n # Check if moved piece has reached opposite side\n if(new_gs.reach_otherside(newPieceCoords)):\n new_gs.board.remove_element(newPieceCoords[0], newPieceCoords[1])\n new_gs.next_move = self.ADDPIECE_2\n new_gs.next_pieces = self.addition_viable_tiles()\n new_gs.perform_checkup()\n\n # Check if the next move must also be a capture by the same player\n elif(new_gs.can_capture(newPieceCoords)):\n new_gs.next_move = self.CAPT\n new_gs.next_pieces = {newPieceCoords}\n else:\n new_gs.curr_player = self.get_enemy(self.curr_player)\n\n # Check if the next_piece checkup needs to be made\n if new_gs.curr_player == self.get_enemy(self.curr_player):\n new_gs.perform_checkup()\n\n new_gs.last_piece = newPieceCoords\n\n return new_gs", "def generate_possible_moves_single_gp(self, game):\n for color in game.paths:\n path = game.paths[color]\n if path.is_complete():\n continue\n gp1, gp2 = path.get_grow_points()\n adj2gp1 = utils.get_adjacent_points(gp1)\n for possible in adj2gp1:\n # print \"Attempting\"\n # print game\n # print possible, path.can_be_added_to_path(possible, 1)\n if path.can_be_added_to_path(possible, 1):\n copy_game = deepcopy(game)\n \"\"\":type: Flow\"\"\"\n copy_game.paths[color].add_to_path(possible, 1)\n self.queue.append(copy_game)\n # print \"after attempt\"\n # print copy_game" ]
[ "0.6862329", "0.65180343", "0.6380384", "0.6203256", "0.6088648", "0.6003288", "0.59560937", "0.5951401", "0.5936796", "0.59350836", "0.5897749", "0.5875835", "0.58558494", "0.58550346", "0.5852458", "0.5844779", "0.58106405", "0.5792629", "0.5773724", "0.57558537", "0.5754133", "0.57363236", "0.57331115", "0.57289684", "0.572805", "0.5727553", "0.5722534", "0.5708691", "0.56920165", "0.5689542" ]
0.6520749
1
Given a capture move, return all long capture moves following this one. We do recursive DFS. We also don't replicate the board, but use the same self.board to avoid replication time.
def find_following_moves(self, capture_move, move_privilege): # Temporarily changing the board, simulating the move and checking if there are more to follow. floating_piece = self.board[capture_move[1]] self.board[capture_move[1]] = EM self.board[capture_move[2]] = self.board[capture_move[0]] self.board[capture_move[0]] = EM next_moves = [(i, j, k) for (i, j, k) in move_privilege[capture_move[2]] if self.board[j][:1] in OPPONENT_COLORS[self.curr_player] and self.board[k][:1] == EM] def return_back_pieces(): # Returning the board to its previous state self.board[capture_move[1]] = floating_piece self.board[capture_move[0]] = self.board[capture_move[2]] self.board[capture_move[2]] = EM if not next_moves: # This was the final move in a series of moves return_back_pieces() return [capture_move] possible_next_moves = [] for next_move in next_moves: for move in self.find_following_moves(next_move, move_privilege): possible_next_moves.append(capture_move + move[1:]) return_back_pieces() return possible_next_moves
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_possible_moves(self):\n possible_capture_moves = self.calc_capture_moves()\n if possible_capture_moves:\n # There is at least one capture move. Let's DFS them!\n self_curr_player = self.curr_player\n next_moves = []\n for capture_move in possible_capture_moves:\n if self.board[capture_move[0]][:1] == SOLDIER_COLOR[self_curr_player]:\n next_moves += self.find_following_moves(capture_move, SOLDIER_CAPTURE_MOVES_FROM[self_curr_player])\n else:\n next_moves += self.find_following_moves(capture_move, OFFICER_CAPTURE_MOVES_FROM)\n\n return next_moves\n\n # There were no capture moves. We return the single moves.\n return self.calc_single_moves()", "def dfsl(board, depth_limit):\n # base cases\n if all(not piece.alive for piece in board.black_pieces):\n # goal! start building a path\n return []\n\n elif depth_limit == 0:\n # no path found to goal with this depth limit\n return None\n\n # recursive case: try all possible moves for all remaining pieces\n remaining_pieces = [p for p in board.white_pieces if p.alive]\n for piece in remaining_pieces:\n for newpos in piece.moves():\n oldpos = piece.pos\n eliminated_pieces = piece.makemove(newpos)\n result = dfsl(board, depth_limit-1)\n piece.undomove(oldpos, eliminated_pieces)\n\n if result is not None:\n # recursively found a sequence of moves to a goal state! hooray!\n # continue building the (reversed) sequence on the way back up\n result.append((piece, newpos))\n return result\n # otherwise, continue searching\n\n # no sequence found using any possible move (with this depth limit)\n return None", "def calculate_next_move(self, visit):\n self.depth += 1\n new_boards = []\n for vehicle_id in range(len(self.vehicles)):\n vehicle = self.vehicles[vehicle_id]\n state = self.get_board()\n if vehicle.orientation == 0: #horizontal\n if vehicle.x > 0: #left\n if state[vehicle.y][vehicle.x-1] == \"..\":\n self.vehicles[vehicle_id].x -=1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x += 1\n\n if vehicle.x + vehicle.length <= (len(state)-1): #right\n if state[vehicle.y][vehicle.x+vehicle.length] == \"..\":\n self.vehicles[vehicle_id].x += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x -= 1\n\n else: #vertical\n if vehicle.y - 1 >= 0: #up\n if state[vehicle.y-1][vehicle.x] == \"..\":\n self.vehicles[vehicle_id].y -= 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y += 1\n\n if vehicle.y + vehicle.length <= (len(state)-1):\n if state[vehicle.y + vehicle.length][vehicle.x] == \"..\":#down\n self.vehicles[vehicle_id].y += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y -= 1\n self.depth -= 1\n return new_boards", "def enumerate_moves(self):\n add_ew = lambda x: [x+'e', x+'w']\n allowed_catches = add_ew(self._directions[0])\n moves = []\n # First add the one/two step forward moves\n new_slot = self._board.get_dir(self._current_space, self._directions[0])\n if new_slot and new_slot.is_free():\n moves.append(ChessMove(self._current_space, new_slot))\n if (self._side == BLACK and new_slot.row == self._board.size - 1) or \\\n (self._side == WHITE and new_slot.row == 0):\n moves[-1].add_promotion()\n if (self._side == BLACK and self._current_space.row == 1) or \\\n (self._side == WHITE and self._current_space.row == self._board.size -2):\n new_slot = self._board.get_dir(new_slot, self._directions[0])\n if new_slot and new_slot.is_free():\n moves.append(ChessMove(self._current_space, new_slot))\n\n # Now add all the captures.\n for direction in allowed_catches:\n new_slot = self._board.get_dir(self._current_space, direction)\n if new_slot and new_slot.has_opponent(self._side):\n moves.append(ChessMove(self._current_space, new_slot, [new_slot]))\n if (self._side == BLACK and new_slot.row == self._board.size - 1) or \\\n (self._side == WHITE and new_slot.row == 0):\n moves[-1].add_promotion()\n return moves", "def get_move(self, board, possible_moves):\n next_move = None\n max_score = -float('Inf')\n self.start_time = datetime.now()\n for depth in range(2,3): # iterative deepening\n try:\n for move in possible_moves:\n board_copy = deepcopy(board)\n self.man.play_move(board_copy, move, self.color)\n score = self.minimaxm(depth, board, False)\n if score > max_score:\n max_score = score\n next_move = move\n\n except TimeoutError:\n print(\"ran out of time\")\n break\n return next_move", "def moves(self) -> List[List[PushState]]:\n # seen_moves = set()\n possible_moves = [[], []]\n\n if state := self.prune_states(self.state): # Could return None\n size = len(self.prune_states(state))\n else:\n return possible_moves\n\n for count, each_slot in enumerate(state):\n # for count, each_slot in reversed(list(enumerate(state))):\n if each_slot == \"L\" and not possible_moves[0]:\n next_state = deepcopy(state)\n next_state = tuple(self.push(list(next_state), count))\n next_state = self.prune_states(next_state)\n\n # if next_state not in seen_moves:\n # seen_moves.add(next_state)\n possible_moves[0].append(next_state)\n\n elif each_slot == \"R\" and not possible_moves[1]:\n next_state = deepcopy(state)\n next_state = tuple(self.push(list(next_state), count))\n next_state = self.prune_states(next_state)\n\n # if next_state not in seen_moves:\n # seen_moves.add(next_state)\n possible_moves[1].append(next_state)\n\n if possible_moves[0] and possible_moves[1]:\n break\n\n return possible_moves", "def select_move(self, game_state, return_visit_counts=False):\n \n # Start with a tree consisting of a root node only. The root node\n # is associated with the given board position.\n root = self.create_node(game_state)\n \n # If no legal moves can be made from the given board position, pass \n # the turn. This happens when all of the players pieces are surrounded,\n # if the player has no pieces left or if the game is over. \n if not root.branches:\n if return_visit_counts:\n return Act.pass_turn(), {}\n return Act.pass_turn()\n \n for i in range(self.num_rounds):\n # On each iteration, walk down the tree to a leaf node and select\n # a move to make from the corresponding leaf game state.\n node = root\n next_move = self.select_branch(node)\n while node.has_child(next_move):\n node = node.get_child(next_move)\n next_move = self.select_branch(node)\n \n # Create a new tree node for the selected move and add it to\n # the tree. If the leaf node corresponds to a finished game\n # then don't create a new node and assign a value to the node\n # based on who won.\n if node.state.is_not_over():\n if next_move:\n new_state = copy.deepcopy(node.state)\n new_state.take_turn_with_no_checks(Act.play(next_move))\n child_node = self.create_node(new_state, \n move=next_move, parent=node)\n move = next_move\n value = -1 * child_node.value \n else:\n # If the current player can't make any moves from the\n # selected gamestate then next_move will be 'None' meaning\n # the player passes the turn.\n new_state = copy.deepcopy(node.state)\n new_state.take_turn_with_no_checks(Act.pass_turn())\n child_node = self.create_node(new_state, \n move=next_move, parent=node)\n move = next_move\n value = -1 * child_node.value\n else:\n # If the game in the current state is over, then the last\n # player must have won the game. Thus the value/reward for the\n # other player is 1. The current node is not updated with\n # the new reward as no branches can stem from a finished game\n # state.\n move = node.last_move\n node = node.parent\n value = 1\n \n # Update the nodes traversed to get to the leaf node with the \n # new value for the new move.\n while node is not None:\n node.record_visit(move, value)\n move = node.last_move\n node = node.parent\n value *= -1\n \n # Get the visit counts of the branches if they were requested.\n if return_visit_counts:\n visit_counts = {}\n for move in root.branches.keys():\n visit_counts[move] = root.branches[move].visit_count\n \n # Get a list of possible moves sorted according to visit count,\n # the move with the highest visit count should be first in the list.\n moves = [move for move in root.moves()]\n moves = sorted(moves, key=root.visit_count, reverse=True)\n \n # Loop through the sorted moves and return the first legal one.\n for move in moves:\n if not game_state.is_move_illegal(move):\n if return_visit_counts:\n return Act.play(move), visit_counts\n return Act.play(move)\n \n # If no legal move is found then pass the turn.\n if return_visit_counts:\n return Act.pass_turn(), visit_counts\n return Act.pass_turn()", "def find_next_moves(self):\n # iterate through all cells, and group them with upper cells and left\n # cells\n\n # generate separated cells then merge the them with same neighbours\n matrix_rows = len(self.status)\n if matrix_rows == 0:\n matrix_cols = 0\n else:\n matrix_cols = len(self.status[0])\n matrix = []\n for i in range(matrix_rows):\n matrix.append([[(i, j)] for j in range(matrix_cols)])\n # merge coordinations\n for i in range(matrix_rows):\n for j in range(matrix_cols):\n if self.status[i][j] != '':\n # is same with right cell?\n if j < matrix_cols - 1 and self.status[i][j] == self.status[i][j + 1]:\n new_item = matrix[i][j] + matrix[i][j + 1]\n matrix[i][j] = matrix[i][j + 1] = new_item\n # is same with down cell?\n if i < matrix_rows - 1 and self.status[i][j] == self.status[i + 1][j]:\n new_item = matrix[i][j] + matrix[i + 1][j]\n matrix[i][j] = matrix[i + 1][j] = new_item\n\n # filter out all unvalid results\n result = []\n # filter out all single-cell groups\n for i in range(matrix_rows):\n for j in range(matrix_cols):\n if (len(matrix[i][j]) > 1 and\n matrix[i][j] not in result):\n result.append(matrix[i][j])\n\n # filter sublists\n result = sorted(result, key=len, reverse=True)\n changed = True\n while changed:\n changed = False\n for i in range(len(result)):\n for j in range(i + 1, len(result)):\n if set(result[i]).issuperset(set(result[j])):\n result.remove(result[j])\n changed = True\n break\n if changed:\n break\n\n if result:\n for i in result:\n yield (self.convert_coordinations(i),\n len(i) * len(i) * 5,\n self.calc_new_status(i))\n else:\n left_cells = sum([len(i) - i.count('') for i in self.status])\n left_cells_score = 2000 - 20 * left_cells * left_cells\n if left_cells_score < 0:\n left_cells_score = 0\n for i in self.parents:\n i.children[self] = [(i.children[self][0][0] + left_cells_score,\n i.children[self][0][1],\n i.children[self][0][2])]", "def find_possible_moves(self, board, self_color):\r\n possible_moves = []\r\n delta = [(0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1)]\r\n\r\n for r in range(len(board)):\r\n for c in range(len(board[r])):\r\n if board[r][c] == self_color:\r\n for i in range(0, 8):\r\n coords = (r, c)\r\n\r\n found_move = self.check_moves(board, self_color, coords, delta[i])\r\n\r\n if found_move is not None and found_move not in possible_moves:\r\n possible_moves.append(found_move)\r\n return possible_moves", "def make_move(self, move_to_play, color_to_move, return_capture=False):\r\n captures = 0\r\n if move_to_play == 'PASS':\r\n board_copy = Board(self.state, self.previous_state, self.to_move)\r\n if self.to_move == 1:\r\n board_copy.to_move = 2\r\n else:\r\n board_copy.to_move = 1\r\n if return_capture:\r\n return board_copy, captures\r\n else:\r\n return board_copy\r\n\r\n current_state = np.array(self.state)\r\n ptemp_state = np.array(current_state)\r\n\r\n for p in ORTHOGONAL_POSITIONS[move_to_play]:\r\n if self.board[p[0]][p[1]].chain_liberty == 1 and self.board[p[0]][p[1]].color != color_to_move:\r\n captures += len(self.chains[(self.board[p[0]][p[1]].chain_num, self.board[p[0]][p[1]].color)])\r\n current_state = self.remove_chain(self.board[p[0]][p[1]].chain_num, self.board[p[0]][p[1]].color,\r\n current_state)\r\n\r\n elif self.board[p[0]][p[1]].liberty == 1 and self.board[p[0]][p[1]].color != color_to_move:\r\n captures += 1\r\n current_state[p[0]][p[1]] = 0\r\n\r\n current_state[move_to_play[0]][move_to_play[1]] = color_to_move\r\n if color_to_move == 1:\r\n temp_board = Board(current_state, ptemp_state, 2)\r\n else:\r\n temp_board = Board(current_state, ptemp_state, 1)\r\n if return_capture:\r\n return temp_board, captures\r\n else:\r\n return temp_board", "def calc_capture_moves(self):\n capture_soldier_moves = [(i, j, k) for (i, j, k) in SOLDIER_CAPTURE_MOVES[self.curr_player]\n if self.board[i][:1] == SOLDIER_COLOR[self.curr_player]\n and self.board[j][:1] in OPPONENT_COLORS[self.curr_player]\n and self.board[k][:1] == EM]\n capture_officer_moves = [(i, j, k) for (i, j, k) in OFFICER_CAPTURE_MOVES\n if self.board[i][:1] == OFFICER_COLOR[self.curr_player]\n and self.board[j][:1] in OPPONENT_COLORS[self.curr_player]\n and self.board[k][:1] == EM]\n return capture_soldier_moves + capture_officer_moves", "def get_possible_moves(board):\n\n possible_moves = []\n\n ret_tuple_left = move_left(board)\n ret_tuple_right = move_right(board)\n ret_tuple_up = move_up(board)\n ret_tuple_down = move_down(board)\n\n if ret_tuple_left[0]:\n possible_moves.append(ret_tuple_left[1])\n if ret_tuple_right[0]:\n possible_moves.append(ret_tuple_right[1])\n if ret_tuple_up[0]:\n possible_moves.append(ret_tuple_up[1])\n if ret_tuple_down[0]:\n possible_moves.append(ret_tuple_down[1])\n\n return possible_moves", "def get_rook_moves(self, i, j, moves):\r\n # the rook can move in 4 directions\r\n directions = ((-1, 0), (0, -1), (1, 0), (0, 1))\r\n\r\n # player's turn are important de determine what pieces they can capture\r\n if self.turn_white:\r\n oponent_colour = 'b'\r\n else:\r\n oponent_colour = 'w'\r\n\r\n # all 4 directions are covered as long as the fields are empty\r\n for d in directions:\r\n for m in range(1, 8):\r\n cri = i + d[0] * m\r\n crj = j + d[1] * m\r\n if 0 <= cri <= 7 and 0 <= crj <= 7: # check if the table ends\r\n # the landing square mai be empty\r\n if self.board[cri][crj] == '--':\r\n moves.append(Move((i, j), (cri, crj), self.board))\r\n # oponent_colour piece that may be captured\r\n elif self.board[cri][crj][0] == oponent_colour:\r\n moves.append(Move((i, j), (cri, crj), self.board))\r\n break\r\n # own piece\r\n else:\r\n break\r\n else:\r\n break", "def possible_moves(self, piece):\n def _index(orig, off):\n \"\"\"Helper function to find the new index.\"\"\"\n orig_x, orig_y = orig\n off_x, off_y = off\n return (orig_y - off_y) * self.ncols + (orig_x - off_x)\n\n p_x, p_y = piece\n p_i = _index(piece, (0, 0))\n\n # pass a list of the four corners first for basic possibles\n move_land = [((p_x + i, p_y + j), self.squares[_index(piece, (i, j))])\\\n for i in [-1, 1] for j in [-1, 1]]\n possibles = self.squares[p_i].can_move(piece, move_land)\n\n # next append the new list from jumps\n jump_land = [((p_x + i, p_y + j), self.squares[_index(piece, (i, j))])\\\n for j in [-2, 2] for i in [-2, 2]]\n possibles += self.squares[p_i].can_jump(piece, move_land, jump_land)\n\n # clean out the list of duplicates, although there should be none\n return [m for i, m in enumerate(possibles) if m not in possibles[:i]]", "def traverseRook(self):\n\t\tmoves = np.empty(14, dtype=object)\n\t\tcnt = [0]\n\t\tPiece.traverse(self, cnt, moves, -1, 0)\n\t\tPiece.traverse(self, cnt, moves, 1, 0)\n\t\tPiece.traverse(self, cnt, moves, 0, -1)\n\t\tPiece.traverse(self, cnt, moves, 0, 1)\n\t\treturn moves[:cnt[0]]", "def get_all_valid_moves(self, player):\n moves = [] # Stores the possible moves\n capture_move_exists = False # Indicates if a capturing move is possible\n\n for piece in self.get_all_pieces(player):\n valid_moves = self._get_valid_moves(piece)\n\n for move, skip in valid_moves.items():\n moves.append([(piece.row, piece.col), move, skip])\n\n if len(skip) > 0:\n # Checks if there is a move that can capture a piece\n capture_move_exists = True\n\n if capture_move_exists:\n # Only gets the capturing moves if there is one\n eating_moves = []\n for move in moves:\n if len(move[2]) != 0:\n eating_moves.append(move)\n\n moves = eating_moves\n\n return moves", "def get_valid_moves(self, id:int=0) -> List[B_path]:\n\t\t# could use numpy to make the code more efficient\n\t\t# HERE\n\t\trt = [copy.deepcopy(B_path(id)) for x in repeat(None, MAX_INT)]\n\t\t# 15 elements\n\t\t# print(rt)\n\t\tnonzero=np.where(self.board != 0)[0]\n\t\t# print(nonzero)\n\t\tfor key, select in enumerate(nonzero):\n\t\t\t# print(\"-- select --\")\n\t\t\t# print(select)\n\t\t\tif self.board[select] > 1:\n\t\t\t\t# print(\"tile appears more than twice. adding paired\")\n\t\t\t\trt[0].paths.append(Path(np.byte(select), np.byte(select))) # in c, it would be presented as a single byte\n\t\t\t# print(\"-- target --\")\n\t\t\tfor target in nonzero[key+1:]:\n\t\t\t\t# print(target)\n\t\t\t\trt[target-select].paths.append(Path(np.byte(select), np.byte(target)))\n\t\treturn rt", "def find_next_move(board: Type[Board], turn: int, colour) -> Union[Tuple[int, int], Tuple[Tuple[int, int], Tuple[int, int]]]:\n # for this version, restart search for each turn\n # not sure if we can persist the tree\n root_node = _new_tree_node(board, turn-1, get_opponent_colour(colour))\n print(\"root_node already has \", root_node.winning, '/', root_node.visited) # DEBUG\n \n start_time = datetime.datetime.now()\n elapsed = datetime.timedelta(0)\n simulation_rounds = 0\n while (elapsed <= config.MC_TIME_LIMIT):\n promising_node, path = select(root_node)\n node_to_explore = promising_node\n\n if promising_node.board.get_status(is_placing=promising_node.current_turn <= 24) \\\n == BoardStatus.ON_GOING:\n children_nodes = _get_children_nodes(node_to_explore)\n if (len(children_nodes) > 0):\n action, node_to_explore = random.choice(children_nodes)\n path.append(node_to_explore)\n\n playout_result = random_playout(node_to_explore)\n\n back_prop(path, playout_result)\n\n elapsed = datetime.datetime.now() - start_time\n simulation_rounds += 1\n\n print(f\"\\n\\n\\n[MC] {simulation_rounds} rounds of simulation run.\\n\\n\\n\")\n winning_action, winning_node = _get_child_with_max_score(root_node)\n return winning_action", "def get_move(self, board):\n color = 1\n interval = [-math.inf, math.inf]\n if board.count(color) + board.count(-1 * color) < 6:\n self.step_count = 0\n self.step_count += 2 \n if self.step_count < 45:\n _, move = self._max(board, color, 0, *interval)\n else:\n _, move = self._max(board, color, -2, *interval)\n return move", "def getMove(self, grid):\n\t\tmove = self.performIterativeDepthSearch(grid)\n\t\tendtime = time.clock()\n\t\t#print (endtime - starttime)\n\t\treturn move", "def _get_possible_moves(board, lightcycle):\n result = []\n for diff in ((0, 1, PlayerActions.MOVE_DOWN), (1, 0, PlayerActions.MOVE_RIGHT), (0, -1, PlayerActions.MOVE_UP), (-1, 0, PlayerActions.MOVE_LEFT)):\n next_x = lightcycle['position'][0] + diff[0]\n next_y = lightcycle['position'][1] + diff[1]\n if 0 <= next_x < len(board) and 0 <= next_y < len(board[0]):\n if board[next_x][next_y] in (EMPTY, POWERUP):\n result += [diff]\n return result", "def moves(self):\n\n # define a full range, which we can compare against columns,\n # rows, or blocks. they're all the same when stored as sets.\n line = set(range(1, 10))\n moves = []\n\n # iterate every cell on the board\n for row in range(0, 9):\n for col in range(0, 9):\n\n # ignore this cell if it's already filled\n i = self._index(col, row)\n if self.data[i] is not None:\n continue\n\n # fetch the adjacent cells\n row_values = set(self._row(row))\n col_values = set(self._column(col))\n bck_values = set(self._block(col, row))\n\n # subtract the values present in the adjacent cells\n # (since this cell *can't* be of any of those values),\n # to leave the list of possibilities for this cell\n missing = line.difference(row_values, col_values, bck_values)\n\n # if there's only *one* possibility, we've found the\n # solution to this cell\n if len(missing) == 1:\n moves.append((col, row, missing.pop()))\n\n return moves", "def get_next_moves2(self):\n moves = []\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n if self.board[i][j] == \"\" and self.move_makes_box(i, j):\n next_board = copy.deepcopy(self.board)\n next_board[i][j] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n next_turn = get_opponent(self.turn)\n moves.append(DotsAndBoxesState(next_board, next_turn))\n return moves", "def get_moves_for_square(self, square):\n (x,y) = square\n\n # determine the color of the piece.\n color = self[x][y]\n\n # skip empty source squares.\n if color==0:\n return []\n\n # search all possible directions.\n moves = []\n for direction in self.__directions:\n move = self._discover_move(square, direction)\n if move:\n # print(square,move,direction)\n moves.append(move)\n\n # return the generated move list\n return moves", "def move_replay(self) -> Generator[List[List[str]], None, None]:\n current_moves = OrderedDict()\n for move in self.moves:\n current_moves[move] = self.moves[move]\n yield self.compile_board(current_moves)", "def get_possible_next_boards(self, selected_piece_coords):\n moves = []\n piece = self.get_piece(selected_piece_coords)\n next_move_coords = self.get_possible_next_coords(selected_piece_coords,piece.player)\n for next_move in next_move_coords:\n board_copy = self.clone()\n board_copy.move_piece(selected_piece_coords, next_move) \n moves.append(board_copy)\n return moves", "def moves(self):\n\n moves = list()\n\n for row in range(HEIGHT):\n for col in range(WIDTH):\n\n move = (row, col)\n\n if self.board[row][col] == 9:\n moves.append(move)\n\n if self.board[row][col] == 1 or self.board[row][col] == 2:\n\n move = (row - 1, col)\n\n if self.board[row - 1][col] == 1 or self.board[row - 1][col] == 2:\n\n pass\n\n else:\n\n moves.append(move)\n\n return moves", "def capture(board,player,possible_moves, pos, loc, i):\r\n \r\n if player == 1 and encode(loc+i) in board.black:\r\n next_piece = encode(loc+i) \r\n new_pos = int(loc + (i*2)) \r\n if not((pos[0] == 'B') and ((next_piece)[0] == 'A')) and not((pos[0] == 'G') and ((next_piece)[0] == 'H')):\r\n new_enc = encode(new_pos)\r\n if new_pos in range(0,112) and (new_enc not in board.white) and (new_enc not in board.black) and (new_enc not in board.empty):\r\n possible_moves.append([pos,new_enc,next_piece])\r\n return True\r\n \r\n if player == -1 and encode(loc+i) in board.white: \r\n next_piece = encode(loc+i) \r\n new_pos = int(loc + (i*2))\r\n if not((pos[0] == 'B') and ((next_piece)[0] == 'A')) and not((pos[0] == 'G') and ((next_piece)[0] == 'H')):\r\n new_enc = encode(new_pos)\r\n if new_pos in range(0,112) and (new_enc not in board.white) and (new_enc not in board.black) and (new_enc not in board.empty):\r\n possible_moves.append([pos,new_enc,next_piece])\r\n return True\r\n \r\n return False", "def ids(board):\n depth_limit = 0\n while True:\n result = dfsl(board, depth_limit)\n if result is not None:\n return list(reversed(result))\n depth_limit += 1", "def get_possible_moves(self, board):\n possible_moves = []\n\n # search in each direction for possible squares to move to\n for direction in [(0, 1), (0, -1), (1, 0), (-1, 0)]:\n possible_moves.extend(\n self._get_possible_moves_in_dir(board, rank_incr=direction[0], file_incr=direction[1])\n )\n\n return possible_moves" ]
[ "0.68712467", "0.6634379", "0.6587093", "0.61561346", "0.6102893", "0.60196084", "0.5792323", "0.57847136", "0.57644325", "0.5760115", "0.57349503", "0.5699928", "0.56808186", "0.56797403", "0.5657029", "0.56525606", "0.5646975", "0.5603376", "0.5562328", "0.5545172", "0.5540988", "0.5537533", "0.55374324", "0.55249053", "0.5524826", "0.55137753", "0.55105096", "0.5486727", "0.5451028", "0.54386854" ]
0.7262479
0
Tests static method is_heap if it can correctly verify if a list of elements preserves the heap property.
def test_static_is_heap(self): good = [4, 4, 8, 9, 4, 12, 9, 11, 13] bad = [1,2,3,114,5,6,7,8,9,10] self.assertTrue(Heap.is_heap(good), 'should hold the heap property') self.assertFalse(Heap.is_heap(bad), 'should not hold the heap property')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heapify(x):\n pass", "def build_heap(self, alist):\r\n if len(alist) > self.capacity:\r\n return False\r\n else:\r\n i = len(alist) // 2\r\n self.size = len(alist)\r\n self.items = [0] + alist[:] + [None]*(self.capacity+1-len(alist))\r\n while (i > 0):\r\n self.perc_down(i)\r\n i = i - 1\r\n return True", "def heap_sort(list):\n pass", "def test_heap_sort(self):\n integers = heap_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def example_eight():\n a = []\n heapq.heappush(a, 5)\n heapq.heappush(a, 3)\n heapq.heappush(a, 7)\n heapq.heappush(a, 4)\n\n assert a[0] == heapq.nsmallest(1, a)[0] == 3\n\n print('Before:', a)\n a.sort()\n print('After: ', a)", "def test_native(self):\n import heapq\n X = []\n heapq.heappush(X, Item('A', 5))\n heapq.heappush(X, Item('B', 5))\n heapq.heappush(X, Item('C', 5))\n heapq.heappush(X, Item('D', 5))\n heapq.heappush(X, Item('E', 5))\n heapq.heappush(X, Item('F', 5))\n item_ordered = []\n while X:\n item_ordered.append(heapq.heappop(X).val)\n self.assertEqual(['A', 'C', 'F', 'E', 'B', 'D'], item_ordered)\n\n X = []\n heapq.heappush(X, TimeSpecifiedItem('A', 5))\n heapq.heappush(X, TimeSpecifiedItem('B', 5))\n heapq.heappush(X, TimeSpecifiedItem('C', 5))\n heapq.heappush(X, TimeSpecifiedItem('D', 5))\n heapq.heappush(X, TimeSpecifiedItem('E', 5))\n heapq.heappush(X, TimeSpecifiedItem('F', 5))\n ordered = []\n while X:\n ordered.append(heapq.heappop(X).val)\n self.assertEqual(['A', 'B', 'C', 'D', 'E', 'F'], ordered)", "def _heapify(self):\n for _ in range(len(self.elements)):\n for i in range(len(self.elements)-1, 0, -1):\n parentPosition = (i-1)/2 # defaults to int i.e. 7/2=3, and 6/2=3\n if parentPosition < 0:\n parentPosition = 0\n \n # change this condition to '>' if coding for max-heap. This is for min-heap.\n if self.elements[i] < self.elements[parentPosition]:\n self.elements[i], self.elements[parentPosition] = self.elements[parentPosition], self.elements[i]", "def test_remove(self):\n data = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n h = Heap(data)\n h.remove(2)\n\n self.assertTrue(Heap.is_heap(data), 'should preserve heap property')\n self.assertNotIn(8, h.data, 'the value corresponding to the index was removed')", "def test_insert(self):\n data = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n h = Heap(data)\n\n h.insert(7)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')\n\n h.insert(10)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')\n\n h.insert(5)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')", "def test_pop(self):\n self.assertRaises(EmptyHeapException, self.minheap.pop)\n self.minheap.heap = [0, 1, 4, 7, 9]\n assert self.minheap.pop() == 1\n assert self.minheap.heap == [0, 4, 9, 7]", "def heapify(self, l):\n if not l:\n return\n self.h = [None]\n for i in xrange(0, len(l)):\n self.push(l[i])", "def heapify(self, l):\n if not l:\n return\n self.h = [None]\n for i in xrange(0, len(l)):\n self.push(l[i])", "def construct_max_heap(self, lst):\n self.heap_list = lst\n #start compare node\n node = (len(self.heap_list)-2)/2\n while node >= 0:\n self.sift_down(node, len(self.heap_list)-1)\n node -= 1", "def is_min_heap(x):\n n = len(x)\n idx = jnp.arange(1, n, dtype=int)\n parents = (idx-1) // 2\n return jnp.all(x[parents] <= x[1:])", "def build_heap(arr):\n for i in range(len(arr)-1, -1, -1):\n down_heapify(arr, len(arr), i)", "def example_seven():\n a = []\n heapq.heappush(a, 5)\n heapq.heappush(a, 3)\n heapq.heappush(a, 7)\n heapq.heappush(a, 4)\n\n print(heapq.heappop(a), heapq.heappop(a), heapq.heappop(a), heapq.heappop(a))", "def test_pop_decreases_size(sample_priorityq):\n for i in range(5):\n sample_priorityq.insert([i, i + 3])\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 4\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 3\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 2", "def build_max_heap(self, list_to_be_heap):\n self.heaplist = self.heaplist + list_to_be_heap\n self.currentsize = len(list_to_be_heap)\n\n # as it follow properties of complete binary tree, non leaf nodes will end to total size / 2\n index = self.currentsize // 2\n\n # > 0 : to ignore first element of the array which is 0..\n while index > 0:\n self.shift_item_down(index)\n index -= 1", "def is_max_heap(x):\n n = len(x)\n idx = jnp.arange(1, n, dtype=int)\n parents = (idx-1) // 2\n return jnp.all(x[parents] >= x[1:])", "def testArbitraryItems(self):\n hd = HeapDict(size=2)\n item1 = self.PriorityItem(1.0, [None, 'Arbitrary item'])\n item2 = self.PriorityItem(2.0, {'Another item'})\n item3 = self.PriorityItem(3.0, (1, 'Third item'))\n item4 = self.PriorityItem(4.0, 0)\n hd.push(1, item1)\n hd.push(1, item3)\n hd.push(1, item2)\n hd.push(1, item4)\n self.assertEqual(hd.get_result(), {1: [item4, item3]})", "def build_heap(arr):\n for i in range((len(arr)//2), -1, -1):\n heapify(arr,index=i, size=len(arr)-1)", "def test_insert_increases_size(sample_priorityq):\n assert len(sample_priorityq.heap_list) == 0\n sample_priorityq.insert([5, 1])\n assert len(sample_priorityq.heap_list) == 1\n sample_priorityq.insert([6, 2])\n assert len(sample_priorityq.heap_list) == 2", "def test_func_heap(self):\n cmd = \"deref $_heap()\"\n target = _target(\"heap\")\n self.assertFailIfInactiveSession(gdb_run_cmd(cmd, target=target))\n res = gdb_run_silent_cmd(cmd, target=target)\n self.assertNoException(res)\n if is_64b():\n self.assertIn(\"+0x0048:\", res)\n else:\n self.assertIn(\"+0x0024:\", res)\n\n cmd = \"deref $_heap(0x10+0x10)\"\n res = gdb_run_silent_cmd(cmd, target=target)\n self.assertNoException(res)\n if is_64b():\n self.assertIn(\"+0x0048:\", res)\n else:\n self.assertIn(\"+0x0024:\", res)", "def heapify(self, not_a_heap, show_details=False):\n self._heap = not_a_heap[:]\n cur_idx = len(self._heap) // 2 - 1\n while cur_idx >= 0:\n self._perc_down(cur_idx)\n cur_idx = cur_idx - 1\n if show_details:\n print(self._heap)", "def manage_heap(heap, coordinates, distance):\n\tif distance > SUN_DISTANCE:\n\t\tif len(heap) < k:\n\t\t\theap.append((distance, coordinates))\n\t\t\tif len(heap) == k:\n\t\t\t\theapq._heapify_max(heap)\n\t\telif distance < heap[0][0]:\n\t\t\theapq._heappushpop_max(heap, (distance, coordinates))", "def build_heap(data):\n n = len(data) # elements 0 .. n-1\n swaps = []\n def swap(i, j):\n t = data[i]\n data[i] = data[j]\n data[j] = t\n swaps.append((i,j))\n def sift_down(i):\n # 3-way comparison to restore heap property to i\n new_i = i\n l = left(i); r = right(i)\n if l < n and data[l] < data[new_i]: new_i = l\n if r < n and data[r] < data[new_i]: new_i = r\n if not i == new_i:\n # i did not satsify heap property, swap and carry on down\n swap(i, new_i)\n sift_down(new_i)\n # starting from end, parent of n-1 is first that may break heap condition\n for i in range(parent(n - 1), -1, -1):\n sift_down(i)\n return swaps", "def build_heap(self, arr):\n i = len(arr) // 2\n self.size = len(arr)\n self.heap_list = [-1] + arr[:]\n while i > 0:\n self.percolate_down(i)\n i = i - 1", "def testMaxSize(self):\n hd = HeapDict(size=2)\n hd.push('a', 1)\n hd.push('a', 2)\n hd.push('a', 3)\n hd.push('b', 3)\n hd.push('b', 2)\n hd.push('b', 1)\n # The order is always descending.\n self.assertEqual(hd.get_result(), {'a': [3, 2], 'b': [3, 2]})", "def heappop(heap):\n pass", "def heap_sort(alist: list, key=None) -> list:\n newList = List()\n hp = BinaryHeap(func=key)\n\n for item in alist:\n hp.heappush(item)\n\n for _ in range(len(alist)):\n newList.append(hp.heappop())\n\n return newList" ]
[ "0.69610363", "0.69486296", "0.6804351", "0.66430354", "0.6605916", "0.6603965", "0.6579628", "0.6459191", "0.63457316", "0.633452", "0.6319004", "0.6319004", "0.6296606", "0.6261532", "0.6209209", "0.62068355", "0.61780304", "0.6161776", "0.61614645", "0.61578655", "0.61445665", "0.61056066", "0.60617745", "0.6050837", "0.6036127", "0.6033182", "0.6014531", "0.6009737", "0.6002658", "0.5966042" ]
0.78172654
0
Test the removal of a key from the middle of the heap.
def test_remove(self): data = [4, 4, 8, 9, 4, 12, 9, 11, 13] h = Heap(data) h.remove(2) self.assertTrue(Heap.is_heap(data), 'should preserve heap property') self.assertNotIn(8, h.data, 'the value corresponding to the index was removed')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __delitem__(self, key):\n\t\ttry:\n\t\t\tdel self.heap[[item == key for _, item in self.heap].index(True)]\n\t\texcept ValueError:\n\t\t\traise KeyError(str(key) + \" is not in the priority queue\")\n\t\theapq.heapify(self.heap)", "def remove(self, key):\n index = self._hash_mod(key)\n node = self.storage[index]\n node_before = None\n if node:\n while node:\n if node.key == key:\n if node_before:\n node_before.next = node.next\n elif node.next:\n self.storage[index] = node.next\n else:\n self.storage[index] = None\n self.key_count -= 1\n return\n node_before = node\n node = node.next\n print(f\"An element with key '{key}' cannot be found!\")", "def remove(self, key: int) -> None:\n pos = key % self.space\n head = self.hash_table[pos]\n curr = head\n\n while curr.next:\n if curr.next.key == key:\n curr.next = curr.next.next\n return\n curr = curr.next", "def remove(self, key):\r\n\r\n\t\t# if the key doesn't exist, exit the function\r\n\t\tif not self.contains_key(key):\r\n\t\t\treturn\r\n\t\telse:\r\n\t\t\tindex = self.get_index(key) # get the index of the key\r\n\t\t\tlinked_list = self._buckets[index] # now get the entire linked list\r\n\t\t\tlinked_list.remove(key) # call the remove function from the linked list\r\n\t\t\tself.size -= 1 # subtract 1\r", "def remove(self, key: int) -> None:\n pos = self.hash(key)\n\n if key in self.table[pos]:\n del self.table[pos][key]", "def remove(self, key: str) -> bool:\n prev, cur = None, self.head\n while cur is not None:\n if cur.key == key:\n if prev:\n prev.next = cur.next\n else:\n self.head = cur.next\n self.size -= 1\n return True\n prev, cur = cur, cur.next\n return False", "def remove(self, key: str) -> bool:\n prev, cur = None, self.head\n while cur is not None:\n if cur.key == key:\n if prev:\n prev.next = cur.next\n else:\n self.head = cur.next\n self.size -= 1\n return True\n prev, cur = cur, cur.next\n return False", "def remove(self, key: int) -> None:\n sh = key % 37\n if self.map[sh] == None:\n return\n for i in range(len(self.map[sh])):\n kv = self.map[sh][i]\n if kv[0] == key:\n self.map[sh].remove(kv)\n return", "def remove(self, key):", "def delete(self, key):\n # Your code here\n idx = self.hash_index(key)\n\n if idx >= 0 and idx < self.capacity:\n # find the entry node that matches the provided key\n prev_node = None\n curr_node = self.hash_table[idx]\n match = None\n\n # check if any node at index exists\n if curr_node is None:\n print(f'Key {key} was not found')\n return\n else:\n # loop until we find a match for the provided key\n while match is None:\n # check to see if there is an entry at this index whose key matches the provided key\n if curr_node.key != key:\n prev_node = curr_node\n curr_node = curr_node.next\n \n elif curr_node.key == key:\n match = curr_node\n \n # if we've reached the tail and still haven't found a match\n if curr_node.next is None and match is None:\n print(f'Key {key} was not found')\n return \n \n # if prev_node is still None and match.next is None, that means there is only 1 node at this index\n if prev_node is None and match.next is None:\n self.hash_table[idx] = None\n elif prev_node is not None:\n prev_node.next = match.next\n \n self.total_items-= 1\n \n else:\n print(f'Key {key} was not found')\n return", "def remove(self, key: int) -> None:\n idx = key % self.size\n if self.mp[idx]:\n for i in range(len(self.mp[idx])):\n if self.mp[idx][i][0] == key:\n #self.mp[idx].pop(i)\n del self.mp[idx][i]\n break", "def remove(self, key):\n if key < self.length:\n self.buckets[key] = -1", "def remove(self, key):\n\n index = self._get_hash(key)\n\n if self.table[index] is not None:\n for i, pair in enumerate(self.table[index]):\n if key == pair[0]:\n del self.table[index][i]\n self.size -= 1\n return\n\n raise ValueError(f\"can't find value with given key {key}\")", "def remove(self, key: int) -> None:\n \n \n hashvalue=key% 1000\n if self.hashset[hashvalue]==None:\n return\n head = self.hashset[hashvalue]\n dummy_head = Node(0)\n curr = dummy_head\n while head:\n k,v = head.data\n if k==key:\n head=head.next\n curr.next=head\n curr= curr.next\n if head != None:\n \n head = head.next\n \n self.hashset[hashvalue]=dummy_head.next", "def remove(self, key: int) -> None:\n t = key % 20011\n delete = []\n for item in self.hash[t]:\n if item[0] == key:\n delete = item\n if delete:\n self.hash[t].remove(delete)", "def remove(self, key: str) -> None:\n thekey = self._gethash(key)\n if self.HashMap[thekey] is not None:\n if len(self.HashMap[thekey]) == 2:\n self.HashMap[\n self._gethash(key)\n ] = None # Keep the location but set the value to None\n else:\n hashkey = self._gethash(key)\n idx = self._find_if_hashclash(key, hashkey, \"i\")\n self.HashMap[hashkey].pop(idx)\n self.HashMap[hashkey].pop(idx)\n self.length -= 1", "def remove(self, key):\n ha = self.myhash(key)\n if key in self.hashmap[ha][0]:\n i = self.hashmap[ha][0].index(key)\n self.hashmap[ha][0].pop(i)\n self.hashmap[ha][1].pop(i)", "def remove(self, key: int) -> None:\n hashKey = key % 1000\n prev = node = self.bucket[hashKey]\n if not node: return\n if node.pair[0] == key:\n self.bucket[hashKey] = node.next\n else:\n node = node.next\n while node:\n if node.pair[0] == key:\n prev.next = node.next\n break\n else:\n prev, node = prev.next, node.next", "def __delitem__(self, key):\n if self._size > 1:\n node_to_delete = self._getItemHelper(key, self._root)\n if node_to_delete:\n self._delItemHelper(node_to_delete)\n self._size -= 1\n else:\n raise KeyError('Key is not in the tree.')\n elif self._size == 1 and self._root.key == key:\n self._root = None\n self._size -= 1\n else:\n raise KeyError('Key is not in the tree.')", "def remove(self, key):\n i = key //1000\n j = key%1000\n self.container[i][j] = -1", "def remove(self, key: int) -> None:\n index = key % 10000\n previous = self.array[index]\n current = previous.next\n while current:\n if current.key == key:\n previous.next = current.next\n break\n previous = previous.next\n current = current.next", "def delete(self, key):\r\n node = self.root\r\n while not node.is_leaf():\r\n index = node.search(key)\r\n\r\n if node.contains_key_at(key, index):\r\n left, right = node.children[index : index+2]\r\n\r\n if left.num_keys() > self.min_num_keys:\r\n node.keys[index] = node.deep_predecessor(index)\r\n (node, key) = (left, node.keys[index])\r\n\r\n elif right.num_keys() > self.min_num_keys:\r\n node.keys[index] = node.deep_successor(index) \r\n (node, key) = (right, node.keys[index])\r\n\r\n else:\r\n node = node.merge_children(index)\r\n\r\n else:\r\n child = node.children[index]\r\n if child.num_keys() <= self.min_num_keys:\r\n child = node.grow_child(index, self.min_num_keys)\r\n node = child\r\n \r\n node.delete(key)", "def remove(self, key):\n hashv = self.hash(key)\n bucket=self.hashmap[hashv]\n for i,(k,v) in enumerate(bucket):\n if k==key:\n del bucket[i]", "def delete(self, key):\n visitor = VisitorDelete()\n\n self.visit(key, visitor)\n\n if (visitor.result):\n self.size -= 1\n\n return visitor.result", "def _del(self, key: int) -> int:\n node = self.htab.pop(key)\n node.prev.next = node.next\n node.next.prev = node.prev\n return node.val", "def delete(self, key):", "def remove(self, key):\n index = key % self.size\n curr_node = prev_node = self.hash_table[index]\n\n # Removing from empty bin just return\n if not curr_node:\n return\n\n if curr_node.key == key:\n # We found the node to delete immediately, we can now skip over it\n self.hash_table[index] = curr_node.next\n else:\n # We did not find the node to delete we must now traverse the bin\n curr_node = curr_node.next\n\n while curr_node:\n if curr_node.key == key:\n prev_node.next = curr_node.next\n break\n else:\n prev_node, curr_node = prev_node.next, curr_node.next", "def remove_key(self, key: keyType) -> None:\n self.validate_key(key)\n hash_address = self.get_hash_address(key)\n head_node = self.hashTable[hash_address]\n if key not in head_node.keys:\n raise Exception\n\n for index in range(len(head_node.singlyLinkedList)):\n if head_node.singlyLinkedList[index].key == key:\n head_node.count -= len(head_node.singlyLinkedList[index].values)\n head_node.keys.remove(key)\n head_node.singlyLinkedList.pop(index)\n break\n logger.info(\"Successfully remove the element.\")", "def delete(self, key):\n # TODO: Find the given key and delete its entry if found\n self.size -= 1\n hash_key = self._bucket_index(key) # Gets the index of the key\n if self.contains(key) is True:\n for key_value_pair in self.buckets[hash_key]: # Iteratre through the value pair\n if key_value_pair[0] is key: # If the key matches\n self.buckets[hash_key].delete(key_value_pair)\n # self.buckets[hash_key] = None\n return True\n else:\n raise KeyError(\"Key no longer exists\") # If key doesn't exist, return None", "def __delitem__(self, key):\n\n # If key is in hash map\n if self.__contains__(key):\n\n # Get hashed key\n i = self.hash(key)\n\n # Get chain index of key value pair\n chain_idx = self.keys_ref[i].index(key)\n\n # Delete value associated with key in hash map\n del self.table[i][chain_idx]\n\n # Delete key from hash table\n del self.keys_ref[i][chain_idx]\n\n # Remove key from set of keys\n self.keys_set.remove(key)\n\n # Decrement size\n self.size -= 1\n\n # If key not in hash map\n else:\n\n # Raise error\n raise KeyError(key)" ]
[ "0.71460545", "0.6887034", "0.6840105", "0.6774187", "0.6762826", "0.67459995", "0.67459995", "0.6740352", "0.6685591", "0.66796625", "0.66610855", "0.663803", "0.66340905", "0.6587309", "0.65832776", "0.65391386", "0.65278965", "0.65011847", "0.64858145", "0.64825714", "0.6461989", "0.64616376", "0.64596087", "0.6440579", "0.6434516", "0.6381343", "0.6368895", "0.63573253", "0.6352443", "0.6340801" ]
0.71519136
0
Test if extracting min and adding a new value at the same time works.
def test_extract_min_and_insert(self): data = [4, 5, 8, 9, 6, 12, 9, 11, 13] h = Heap(data) min_value = h.extract_min_and_insert(2) self.assertEqual(min_value, 4, 'should return the min value') expected = [2, 5, 8, 9, 6, 12, 9, 11, 13] self.assertEqual(h.data, expected, 'should remove the old min and '+ 'add new value correctly')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mini(a, b):\n return min(a, b)", "def min(x):\n pass", "def _min_in_bounds(self, min):\n if min <= self.valmin:\n if not self.closedmin:\n return self.val[0]\n min = self.valmin\n\n if min > self.val[1]:\n min = self.val[1]\n return self._stepped_value(min)", "def test_check_min(self):\n\t\tself.filter.set_operator(\".min\")\n\t\tself.filter.set_limit(12)\n\t\tself.assertTrue(self.filter.check(Object(field=12)))\n\t\tself.assertTrue(self.filter.check(Object(field=15)))\n\t\tself.assertFalse(self.filter.check(Object(field=9)))", "def _get_min_positive_value(self, a, b):\n if a < 0 and b >= 0:\n return b\n if a >= 0 and b < 0:\n return a\n return min(a, b)", "def test_minimum_all_same(self):\n\n temp_data = [(3.00, time.localtime()), (3.00, time.localtime()),\n (3.00, time.localtime()), (3.00, time.localtime())]\n\n tt = TemperatureTracker()\n result = tt.minimum_from(temp_data)\n self.assertEqual(result[0], 3.0)\n self.assertEqual(temp_data[3][1], result[1])", "def test_minimum_all_different(self):\n temp_data = [(1.00, time.localtime()), (2.00, time.localtime()),\n (3.00, time.localtime()), (4.00, time.localtime())]\n\n tt = TemperatureTracker()\n result = tt.minimum_from(temp_data)\n self.assertEqual(result[0], 1.0)\n self.assertEqual(temp_data[0][1], result[1])", "def set_min(self, min):\n self.set_val((min, self.val[1]))", "def action_store_min(raw_val):\n\n if isinstance(raw_val, list):\n values = []\n for val in raw_val:\n val = auto_type_convert(val)\n if isinstance(val, (int, float)):\n values.append(val)\n if len(values) != 0:\n return min(values)\n else:\n return None\n else:\n return None", "def minimum(x, y):\r\n # see decorator for function body\r", "def min(self, other):\n ox = self._op_check(other)\n r = self.dec_value.min(ox)\n if r == self.dec_value:\n return self\n else:\n return other", "def mini(a,b):\n\tif a < b: \n\t\treturn a\n\treturn b", "def test_find_smallest_element(self):\n smallestValue = min(self.values)\n valueFound = self.tree.findSmallest(self.tree.root)\n self.assertEqual(smallestValue, valueFound)", "def test_extract_min(self):\n data = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n h = Heap(data)\n\n min_key = h.extract_min()\n self.assertEqual(min_key, 4, 'should extract the min value')\n self.assertTrue(Heap.is_heap(data), 'should still hold the heap property')\n\n min_key = h.extract_min()\n self.assertEqual(min_key, 4, 'should extract the min value')\n self.assertTrue(Heap.is_heap(data), 'should still hold the heap property')\n\n min_key = h.extract_min()\n self.assertEqual(min_key, 4, 'should extract the min value')\n self.assertTrue(Heap.is_heap(data), 'should still hold the heap property')", "def testMin(self):\n\n n = randint(50, 170)\n l = []\n for i in xrange(n):\n a = randint(-2147483648,2147483647)\n self.s.insert(a, a)\n l.append(a)\n\n self.assertIsNotNone(self.s.minNode(self.s._root))\n self.assertEqual(min(l), self.s.minNode(self.s._root).value)", "def exceeds_min(value, min_):\n\n if isinstance(value, (float, int)):\n val_ = value\n else:\n try:\n val_ = int(value)\n except:\n val_ = value\n if isinstance(min_, (float, int)):\n return (val_ < min_)\n else:\n if min_.isalnum():\n try:\n imin = int(min_)\n return (val_ < imin)\n except:\n pass\n \n return False", "def minimum_inplace(a, b):", "def min(self):\n raise NotImplementedError('must be implemented by subclass')", "def min(self):\n raise NotImplementedError('must be implemented by subclass')", "def min():\n return KeeperOfMinOrMax(int.__gt__)", "def minimum(self, start, end):\n return self.foldl1(start, end, min)", "def min(self, other):\n ox, ctx = self._op_check(other)\n r = self.dec.min(ox)\n if r == self.dec:\n return self\n else:\n return other", "def test_value_min(self):\n self.assertEqual(DPTValue1Ucount().to_knx(0), (0x00,))\n self.assertEqual(DPTValue1Ucount().from_knx((0x00,)), 0)", "def Min(data):\n return data.min()", "def minimum(lhs, rhs):\n return _make.minimum(lhs, rhs)", "def test_validate_min_value(self):\n\n test_values = [\n -5,\n 2,\n ]\n\n testrow = TestSchema()\n\n for value in test_values:\n testrow.int_min_field = value\n self.assertRaises(Exception, testrow.save)", "def min(self, min):\n\n self._min = min", "def min(self, min):\n\n self._min = min", "def get_min(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n return min(data)", "def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)" ]
[ "0.6581571", "0.6492888", "0.64508563", "0.6287588", "0.621782", "0.6203606", "0.6171011", "0.6134213", "0.6038028", "0.6012827", "0.60017294", "0.5988239", "0.5976465", "0.5966177", "0.5962323", "0.59480584", "0.594802", "0.58188283", "0.58188283", "0.5814531", "0.5797752", "0.57913494", "0.57894385", "0.57529706", "0.5731596", "0.57089806", "0.57005644", "0.57005644", "0.57001424", "0.56900734" ]
0.68474996
0
Updates the loop body graph with a subgraph (for body or condition functions)
def update_body_graph(body_graph: Graph, subgraph_proto: dict, body_parameter_names: list, body_results: list): # create a map from a node name in original model to a name in a loop body graph assuming # that names in the original model are unique # initially, the map contains names for parameters that are common for the body and condition graphs map_original_name = {} for idx, pb_node in enumerate(subgraph_proto['input_arg']): map_original_name[pb_node.name] = body_parameter_names[idx] # walk through all nodes (non-parameter and non-result nodes) and add into the loop body graph for pb_node in subgraph_proto['node_def']: # create an NX node id = body_graph.unique_id(pb_node.name) map_original_name[pb_node.name] = id body_graph.add_node(id, pb=pb_node, kind='op') if hasattr(body_graph, 'op_names_statistic') and hasattr(pb_node, 'op'): body_graph.op_names_statistic[pb_node.op] += 1 # add incoming edges based on data_nodes_map for dst_port, inp in enumerate(pb_node.input): orig_src_id = inp.split(":")[0] # TODO: avoid this temporal workaround for TF 2.4 or higher RNN layers: # skip control flow dependency if orig_src_id[0] == '^': continue src_id = map_original_name[orig_src_id] src_port = 0 if len(inp.split(":")) == 1 else int(inp.split(":")[-1]) assert (body_graph.has_node(src_id)) body_graph.add_edges_from([create_tf_edge(src_id + ":" + str(src_port), id, dst_port)]) # create Result nodes in the loop body graph for output in subgraph_proto['output_arg']: output_name = subgraph_proto['ret'][output.name] orig_src_id = output_name.split(":")[0] src_id = map_original_name[orig_src_id] src_port = 0 if len(output_name.split(":")) == 1\ else int(output_name.split(":")[-1]) assert body_graph.has_node(src_id), 'The body graph does not contain output with name "{}"'.format( src_id) body_results.append(Node(body_graph, add_opoutput(body_graph, src_id, src_port, False)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_edges(subgraph, graph_name, bb):\n top_subgraph = get_top_parent(subgraph, graph_name)\n edges = extract_edges(top_subgraph)\n for edge in edges:\n if(edge.get_style() is not None):\n style = edge.get_style()\n if(edge.get_color() is not None):\n color = edge.get_color()\n if(edge.get_label() is not None):\n label = edge.get_label()\n node_head = edge.get_source()\n node_tail = edge.get_destination()\n bb_head = get_bb(node_head)\n bb_tail = get_bb(node_tail)\n if(bb_head >= bb or bb_tail > bb):\n top_subgraph.del_edge(node_head, node_tail, 0)\n if bb_head >= bb:\n if bb_tail > bb:\n add_edge(top_subgraph, update_edge_node_name(node_head, bb_head), update_edge_node_name\n (node_tail, bb_tail), style=style, color=color, label=label)\n else:\n add_edge(top_subgraph, update_edge_node_name(node_head, bb_head), node_tail, style=style, \n color=color, label=label)\n else:\n add_edge(top_subgraph, node_head, update_edge_node_name(node_tail, bb_tail), \n style=style, color=color, label=label)\n \n #si bb_n < bb et bb_s <= bb on touche pas\n #sinon\n # si bb_n >= bb:\n # si bb_s >= bb:\n # creer edge (n+1, s+1)\n # sinon:\n # creer edge (n+1, s)\n # sinon:\n # si bb_s > bb:\n # creer edge (n, s+1)", "def _visit_loop_body(self, node, if_block=None, is_for=None):\n loop_name = \"for\" if is_for else \"while\"\n if if_block:\n node.if_block = if_block\n else:\n node.if_block = self.flow.nextblock(label=\"%s_body\" % loop_name,\n pos=node.body[0])\n self.visitlist(node.body)\n self.flow.loops.pop()\n\n if self.flow.block:\n # Add back-edge\n self.flow.block.add_child(node.cond_block)\n\n # Else clause\n if node.orelse:\n node.else_block = self.flow.nextblock(\n parent=node.cond_block,\n label=\"else_clause_%s\" % loop_name,\n pos=node.orelse[0])\n self.visitlist(node.orelse)\n if self.flow.block:\n self.flow.block.add_child(node.exit_block)\n else:\n node.cond_block.add_child(node.exit_block)\n\n self.exit_block(node.exit_block, node)", "def update(self):\r\n self.g = self.create_graph()", "def sub_graph_merging(self):", "def update(self, edges) -> None:\n for v1, v2 in edges:\n self.add(v1, v2)", "def add_subgraph(self, subgraph, prefix):\n for ntype in subgraph.get_types():\n new_nodes = subgraph.get_node_list(ntype)\n for node in new_nodes:\n node.ext_id = node_id(prefix, node.ext_id)\n self.add_node_list(ntype, new_nodes)", "def update_DLoop(self, astnode, path, pathidx):\n pathidx = self.update_until_STOP(astnode['_cond'], path, pathidx+1)\n if pathidx > 0:\n self.update_until_STOP(astnode['_body'], path, pathidx+1)", "def make_looped(self) -> None:\n self.most_right.right_node = self.most_left\n self.most_left.left_node = self.most_right", "def rewrite(self, dag: saldag.OpDag):\n ordered = dag.top_sort()\n if self.reverse:\n ordered = ordered[::-1]\n\n for node in ordered:\n print(type(self).__name__, \"rewriting\", node.out_rel.name)\n if isinstance(node, saldag.Aggregate):\n self._rewrite_aggregate(node)\n elif isinstance(node, saldag.Divide):\n self._rewrite_divide(node)\n elif isinstance(node, saldag.Project):\n self._rewrite_project(node)\n elif isinstance(node, saldag.Filter):\n self._rewrite_filter(node)\n elif isinstance(node, saldag.Multiply):\n self._rewrite_multiply(node)\n elif isinstance(node, saldag.RevealJoin):\n self._rewrite_reveal_join(node)\n elif isinstance(node, saldag.HybridJoin):\n self._rewrite_hybrid_join(node)\n elif isinstance(node, saldag.Join):\n self._rewrite_join(node)\n elif isinstance(node, saldag.Concat):\n self._rewrite_concat(node)\n elif isinstance(node, saldag.Close):\n self._rewrite_close(node)\n elif isinstance(node, saldag.Open):\n self._rewrite_open(node)\n elif isinstance(node, saldag.Create):\n self._rewrite_create(node)\n elif isinstance(node, saldag.Distinct):\n self._rewrite_distinct(node)\n else:\n msg = \"Unknown class \" + type(node).__name__\n raise Exception(msg)", "def update_graph(graph):\n\n if not isinstance(graph, WeightedGraph):\n raise TypeError('update_graph(graph): graph must be a WeightedGraph object')\n\n # check if graph has been already updated\n if graph.updated >= 1:\n return\n else:\n graph.updated = 1\n\n # update every vertice of the graph\n for vertice in graph.Vertices:\n update_adj_list(vertice)", "def subgraph(self, nodes, relabel_nodes=False, output_device=None):\n raise NotImplementedError(\"subgraph is not implemented yet\")", "def modify_body(lines, PE_dims, var_map): \n loop_bodies = []\n # Locate the user statements\n for line_id in range(len(lines)):\n line = lines[line_id]\n if line.find('hls_pipeline') != -1:\n # extract the loop body\n body_start = line_id\n r_minus_l = -1\n nxt_line_id = line_id + 1 \n while nxt_line_id < len(lines):\n nxt_line = lines[nxt_line_id]\n if nxt_line.find('}') != -1:\n r_minus_l += 1\n if nxt_line.find('{') != -1:\n r_minus_l -= 1\n if r_minus_l == 0:\n body_end = nxt_line_id - 1\n break\n nxt_line_id += 1\n loop_body = lines[body_start : body_end + 1]\n #print(loop_body)\n loop_bodies.append({'pos': [body_start, body_end], 'lines': loop_body})\n \n # Modidy the loop bodies\n #for body in loop_bodies:\n body_offset = 0\n for idx in range(len(loop_bodies)):\n body = loop_bodies[idx]\n body_lines = body['lines'] \n group_names = []\n has_data_trans = True\n data_trans_info = extract_data_trans_info(body_lines, PE_dims)\n # Remove the in transfer\n while has_data_trans:\n has_data_trans = False\n for line_id in range(len(body_lines)):\n line = body_lines[line_id]\n if line.find('read_channel_intel') != -1:\n has_data_trans = True\n # Locate the read block and the write block\n block_start, block_end = locate_data_trans_block(line_id, body_lines)\n m = re.search(r'\\((.+?)\\)', line) \n fifo_name = m.group(1)\n group_name = fifo_name.split('_')[1]\n group_names.append(group_name)\n break\n if has_data_trans:\n body_lines = body_lines[:block_start] + body_lines[block_end + 1:]\n # Remove the out transfer\n has_data_trans = True\n while has_data_trans:\n has_data_trans = False\n for line_id in range(len(body_lines)):\n line = body_lines[line_id]\n if line.find('write_channel_intel') != -1:\n m = re.search(r'\\((.+?)\\)', line)\n fifo_name = m.group(1).split(',')[0]\n group_name = fifo_name.split('_')[1]\n if group_name in group_names:\n has_data_trans = True\n block_start, block_end = locate_data_trans_block(line_id, body_lines)\n if has_data_trans:\n body_lines = body_lines[:block_start] + body_lines[block_end + 1:]\n #print(body_lines)\n # Wrap the body with space loops\n for dim_idx in range(len(PE_dims)):\n dim = PE_dims[dim_idx] \n line = f'#pragma unroll\\nfor (int s{dim_idx} = 0; s{dim_idx} < {dim}; s{dim_idx}++) {{\\n'\n body_lines.insert(dim_idx, line) \n for dim in PE_dims:\n body_lines.append('}\\n')\n\n # Modify the index\n body_lines = modify_index(body_lines, var_map, PE_dims)\n #print(body_lines)\n\n # Insert the data transfer stmts\n body_lines = insert_data_trans(body_lines, data_trans_info, PE_dims)\n #loop_bodies[idx]['lines'] = body_lines\n\n # Replace the loop bodies\n body_pos = body['pos'] \n lines = lines[: body_offset + body_pos[0]] \\\n + body_lines \\\n + lines[body_offset + body_pos[1] + 1 :] \n body_offset += len(body_lines) - (body_pos[1] - body_pos[0] + 1)\n\n return lines", "def refine(self, graph, tick, known_inputs):\n parentpath =_dataflow_path(graph, tick)\n in_connections = self._in_connections(graph, tick)\n out_connections = self._out_connections(graph, tick)\n\n # remove task \n self._remove_task(graph, tick)\n\n # insert the subgraph\n insert_subgraph(graph, self.body_graph, tick)\n for source, dest in in_connections:\n graph.connect(source, dest) \n for source, dest in out_connections:\n graph.connect(source, dest)\n \n # adjust path elements in the tasks of the subgraph\n addedticks=_filter_for_common_parent(graph, tick)\n for t in addedticks:\n _extend_dataflow_path(graph, t, parentpath)\n return defer.succeed(None)", "def updateGraph(self):\n self.initUnits()\n v = self.units.copy()\n v_old = v.copy() * 100 # initial value so it will skip the first break\n for step in range(self.numCycles): # for total number of cycles\n # keep the old version of v for paralel updating\n # if v_old and v every element differnce < 0.001, then stop\n if np.all(np.abs(v_old - v) < 0.001):\n break\n # assign to v_old v from the previous step\n v_old = v.copy()\n for i in range(self.graph.n): # for every unit in the graph\n if i not in self.graph.observed: # if the unit is not a special fixed value s\n net = np.dot(v_old, self.graph.c[i]) # compute total flow to the unit\n if net > 0:\n gradient = net*(self.min_max[1]-v_old[i])\n else:\n gradient = net*(v_old[i]-self.min_max[0])\n v[i] = v_old[i]*(1-self.decay) + gradient\n # should this be after every unit update, or after the whole graph updates ??\n v = np.where(v>1, self.min_max[1], v)\n v = np.where(v<-1,self.min_max[0],v)\n self.units = v", "def schedule_update_all(graph, message_func, reduce_func, apply_func, outframe=...): # -> None:\n ...", "def _update_attributes(self,circuit_graph,name,lib_name,lib_graph, Gsub):\n #PnR can route primitive power but not subckt power\n if lib_name in self.all_lef:\n pg = []\n else:\n pg = self.pg\n G1 = circuit_graph\n num = len([key for key in Gsub\n if 'net' not in G1.nodes[key][\"inst_type\"]])\n # Define ports for subblock\n matched_ports = {}\n ports_weight = {}\n G2 = lib_graph.copy()\n for g1_n, g2_n in Gsub.items():\n if 'net' in G2.nodes[g2_n][\"inst_type\"]:\n if 'external' in G2.nodes[g2_n][\"net_type\"]:\n if num > 1 and g1_n in pg:\n # remove power connections\n G2=nx.relabel_nodes(G2,{g2_n:g1_n},copy=False)\n else:\n matched_ports[g2_n] = g1_n\n ports_weight[g2_n] = []\n for nbr in list(G2.neighbors(g2_n)):\n ports_weight[g2_n].append(G2.get_edge_data(g2_n, nbr)['weight'])\n else:\n G2.nodes[g2_n]['values'] = G1.nodes[g1_n]['values']\n G2.nodes[g2_n]['real_inst_type'] = G1.nodes[g1_n]['real_inst_type']\n return matched_ports,ports_weight,G2", "def loops(graph = None):\n\tunknown_structs = []\n\tcompound_structs = []\n\tloops_dict = create_components_dict()\n\tfor subgraph in nx.connected_component_subgraphs(graph):\n\t\tif subgraph.number_of_nodes() < 3:\n\t\t\tunknown_structs.append(subgraph)\n\t\telse:\n\t\t\tif connectivity_threshold(graph = subgraph) > 2 or loop_type(graph= subgraph) == 'NA':\n\t\t\t\tcompound_structs.append(subgraph)\n\t\t\telse:\n\t\t\t\tloops_dict[loop_type(graph= subgraph)].append(subgraph)\n\treturn loops_dict", "def subgraph(self, name):\n\n self._compiled = None\n sub = Graph()\n self.graph[name] = sub\n return sub", "def populate_graph(self):", "def multiloop_type(graph = None):\n\tg = graph.copy()\n\tbreakpoint_nodes = []\n\tfor edge in g.edges(data=True):\n\t\tid1 ,id2 , data = edge\n\t\tif g.edge[id1][id2]['label']== '$':\n\t\t\tif not id1 in breakpoint_nodes:\n\t\t\t\tbreakpoint_nodes.append(id1)\n\t\t\tif not id2 in breakpoint_nodes:\n\t\t\t\tbreakpoint_nodes.append(id2)\n\tfor node in breakpoint_nodes:\n\t\tg.remove_node(node)\n\tif nx.number_connected_components(g) ==1:\n\t\treturn 'bulge'\n\telse:\n\t\treturn 'internal_loop'", "def _update_graph(self, graph, iter_map, box_map, prefix=\"\"):\n # Go through nnil nodes\n for node in graph.available_nodes():\n\n # Deal with ibox only\n box_name = node.name\n if isinstance(node.meta, Ibox) and box_name not in iter_map:\n\n # Construct the itarative graphs\n itergraphs = node.meta.itergraphs(box_name)\n if itergraphs == {}:\n raise ValueError(\"IBox '{0}' can't be executed.\".format(\n box_name))\n\n # Update the input graph and the execution list\n iter_map[box_name] = []\n iterboxes = []\n for itername, iteritem in itergraphs.items():\n itergraph, iterbox = iteritem\n graph.add_graph(itergraph)\n _, iteration = itername.split(Ibox.itersep)\n iteration = int(iteration)\n iterboxes.append((iteration, iterbox))\n iter_map[box_name].extend(\n [node.name for node in itergraph._nodes.values()])\n iterboxes = sorted(iterboxes, key=lambda item: item[0])\n box_map[box_name] = [item[1] for item in iterboxes]", "def FeynmanSubgraphs(graph, model):\n model.SetTypes(graph)\n model.checktadpoles = False\n graph.FindSubgraphs(model)\n\n subs_toremove = subgraphs.DetectSauseges(graph._subgraphs)\n graph.RemoveSubgaphs(subs_toremove)\n\n subgraphs.RemoveTadpoles(graph)", "def update_graph(self):\n if self.update_callback:\n self.update_callback()", "def subgraph(infr, aids):\n orig_name_labels = list(infr.gen_node_values('orig_name_label', aids))\n infr2 = AnnotInference(\n infr.ibs, aids, orig_name_labels, autoinit=False, verbose=infr.verbose\n )\n # deep copy the graph structure\n infr2.graph = infr.graph.subgraph(aids).copy()\n infr2.readonly = True\n infr2.verifiers = infr.verifiers\n infr2.ranker = infr.ranker\n\n infr.params = copy.deepcopy(infr.params)\n infr2._viz_image_config = infr._viz_image_config.copy()\n\n # infr2._viz_init_nodes = infr._viz_image_config\n # infr2._viz_image_config_dirty = infr._viz_image_config_dirty\n infr2.edge_truth = {\n e: infr.edge_truth[e] for e in infr2.graph.edges() if e in infr.edge_truth\n }\n\n # TODO: internal/external feedback\n\n infr2.nid_counter = infr.nid_counter\n infr2.dirty = True\n infr2.cm_list = None\n infr2.qreq_ = None\n\n # TODO:\n # infr2.nid_to_errors {} # = copy.deepcopy(infr.nid_to_errors)\n # infr2.recover_graph = copy.deepcopy(infr.recover_graph)\n # infr2.pos_redun_nids = copy.deepcopy(infr.pos_redun_nids)\n # infr2.neg_redun_metagraph = copy.deepcopy(infr.neg_redun_metagraph)\n\n infr2.review_graphs = {}\n for k, g in infr.review_graphs.items():\n if g is None:\n infr2.review_graphs[k] = None\n elif k == POSTV:\n infr2.review_graphs[k] = g.subgraph(aids, dynamic=True)\n else:\n infr2.review_graphs[k] = g.subgraph(aids)\n return infr2", "def update(self):\n\n for node in self.nodes:\n for edge in node.edges:\n for i, edge_node in enumerate(edge.nodes):\n if edge_node.id != node.id:\n edge_node.add_edge(edge)\n\n return self", "def add_body(self,body):\n if body.id == 0:\n body.update_id(len(self.bodies)+1)\n else:\n body_ids = [b.id for b in self.bodies]\n if body.id in body_ids:\n print(\"Error: specified body id has already been assigned\")\n print(\"Assigning a new body id\")\n body.update_id(max(body_ids)+1)\n\n body.group_master = True\n for by in self.bodies:\n if by.group == body.group:\n body.group_master = False\n break\n\n for scenario in self.scenarios:\n for func in scenario.functions:\n body.add_function_derivatives()\n\n self.bodies.append(body)", "def update_gl_state(self, *args, **kwargs):\n for v in self._subvisuals:\n v.update_gl_state(*args, **kwargs)", "def update(self, interval):\n for o in self.physics_objects:\n o.update(interval)\n for f in self.interacting_forces:\n f.update(interval)\n for p in self.particles:\n p.update(interval)", "def rdf_update_connections(rdf, prop, obj, subj, owl):\n conname = prop.split('#')[-1]\n print(\"createcon \"+str(obj)+ \" \" + str(subj))\n obj.relationships.create(conname, subj)\n for i in rdf.objects(subject=prop, predicate=RDFS.subPropertyOf):\n print(i)\n rdf_update_connections(rdf, i, obj, subj, owl)\n for i in rdf.objects(subject=prop, predicate=owl.inverseOf):\n conname = i.split('#')[-1]\n subj.relationships.create(conname, obj)", "def _fill_graph_score(graph: GraphDatabase, session: Session) -> None:\n _LOGGER.info(\"Computing graph score for each package\")\n\n subgraphs = deque()\n\n # The very first walk will mark down libraries that do not have any dependencies.\n for package_name in graph.get_python_package_version_names_all(distinct=True):\n dependencies = graph.get_depends_on_package_names(package_name)\n subgraphs.append(SubGraphEntity(subgraph_name=package_name, to_visit=set(dependencies)))\n if not dependencies:\n entry = session.query(Package).filter(Package.package_name == package_name).first()\n if not entry:\n # Might be ingesting in the mean time, do not mark down and continue.\n continue\n\n entry.subgraph_size = entry.version_count\n session.commit()\n else:\n subgraphs.append(SubGraphEntity(subgraph_name=package_name, to_visit=set(dependencies)))\n\n while subgraphs:\n subgraph = subgraphs.popleft()\n\n for package_name in subgraph.to_visit:\n entry = session.query(Package).filter(Package.package_name == package_name).first()\n if not entry:\n _LOGGER.warning(\"Cannot score subgraph %r as not all the dependencies were resolved\", package_name)\n break\n\n if entry.subgraph_size is None:\n # Scheduling for the next round.\n subgraphs.append(subgraph)\n break\n\n subgraph.subgraph_size *= entry.subgraph_size * entry.version_count\n subgraph.subgraphs_seen.add(package_name)\n else:\n entry = session.query(Package).filter(Package.package_name == subgraph.subgraph_name).first()\n if not entry:\n _LOGGER.error(\"No subgraph for %r found, this looks like a programming error\")\n continue\n\n entry.subgraph_size = subgraph.subgraph_size\n session.commit()\n\n subgraph.to_visit -= subgraph.subgraphs_seen" ]
[ "0.6166921", "0.5917972", "0.572015", "0.56074136", "0.5480895", "0.54787594", "0.54676795", "0.5410596", "0.5361217", "0.5360984", "0.5354818", "0.53209776", "0.5262267", "0.52560925", "0.5198067", "0.51964974", "0.5164367", "0.51537746", "0.5147732", "0.51466846", "0.514208", "0.51387864", "0.51332384", "0.51194817", "0.50900084", "0.50750184", "0.5068871", "0.5065314", "0.50620824", "0.503944" ]
0.7227099
0
check to see if we should apply thresholding to preprocess the image
def preprocess(img): #if "thresh" in args["preprocess"]: image = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] """ make a check to see if median blurring should be done to remove noise """ #if "blur" in args["preprocess"]: #image = cv2.medianBlur(gray, 3) return image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def classify(self, img, threshold=2.5):\n return (img-self.mu)/numpy.sqrt(self.sigmasqr) > threshold", "def global_threshold(img, threshold_method):\n pass", "def thresh_setup():\n pass", "def apply_thresholding(x):\n return x > threshold_otsu(x)", "def preprocess_image(self, image, g, c, m, debug=False):\r\n\r\n img = self.adjust_gamma(image, g)\r\n\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n pil_im = Image.fromarray(img)\r\n contrast = ImageEnhance.Contrast(pil_im)\r\n contrast = contrast.enhance(c)\r\n img = np.array(contrast)\r\n\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n img = cv2.GaussianBlur(img, (5, 5), 1)\r\n #mean = np.mean(img[::2] ** m)\r\n\r\n\r\n retval, thresh = cv2.threshold(img, m, 255, cv2.THRESH_BINARY)\r\n\r\n if debug:\r\n cv2.namedWindow('Grey', cv2.WINDOW_NORMAL)\r\n cv2.imshow(\"Grey\", img)\r\n cv2.resizeWindow('Grey', 200, 200)\r\n cv2.moveWindow(\"Grey\", 800, 0)\r\n\r\n cv2.namedWindow('Thresh', cv2.WINDOW_NORMAL)\r\n cv2.imshow(\"Thresh\", thresh)\r\n cv2.resizeWindow('Thresh', 200, 200)\r\n cv2.moveWindow(\"Thresh\", 800, 200)\r\n\r\n return thresh", "def procces_image(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n ret, processed_image = cv2.threshold(image, 75, 255, cv2.THRESH_BINARY_INV)\n return processed_image", "def autothreshold(gray_im, method=\"otsu\"):\n if method == \"otsu\":\n t = otsu(gray_im)\n elif method == \"kmeans\":\n t = ave(kmeans(list(gray_im.getdata())))\n return gray_im.point(lambda x: 0 if x < t else 255) # < or <= ?", "def image_thresholding(image: np.ndarray):\n #  Resize image to a shape of (48, 48)\n image = image_as_square(image)\n\n # Find threshold using Otsu filter\n threshold: float = filters.threshold_otsu(image)\n binary = image > threshold\n\n binary_image = np.where(image, binary, 0) * 255\n\n #  Resize the iamge back to a shape of (2304, )\n return image_as_array(image)", "def oldThresh(self, image):\r\n\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n blur = cv2.GaussianBlur(gray, (5, 5), 1)\r\n mean = np.mean(blur )\r\n if(mean > 255):\r\n mean=255\r\n retval, thresh = cv2.threshold(blur, mean, 255, cv2.THRESH_BINARY)\r\n\r\n return thresh", "def preparing_tocut(image):\n\n _, image = threshold_image(image)\n\n return image", "def test_unknown_thresholding(self):\n self.cube.coord(var_name=\"threshold\").attributes[\n \"spp__relative_to_threshold\"\n ] = \"between\"\n msg = \"Probabilities to percentiles only implemented for\"\n with self.assertRaisesRegex(NotImplementedError, msg):\n Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)", "def image_preprocessing(image):\n\treturn cv2.GaussianBlur(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), (5,5), 0)", "def test_thresholded_image(self):\n orig_size = self._image.size\n self._api.SetImage(self._image)\n image = self._api.GetThresholdedImage()\n self.assertIsNot(image, None)\n self.assertIsInstance(image, Image.Image)\n self.assertEqual(image.size, orig_size)\n self.assertEqual(self._api.GetThresholdedImageScaleFactor(), 1)", "def infer_threshold(self, x: np.ndarray, fpr: float) -> None:\n self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)", "def process(self):\n self.output_image = cv.adaptiveThreshold(\n self.input_image,\n # self.MIN_THRESHOLD,\n self.MAX_PIXEL_VALUE,\n cv.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv.THRESH_BINARY_INV,\n self.BLOCK_SIZE,\n self.CONSTANT,\n )\n return self.output_image", "def threshold_image(img, threshold=THRESHOLD):\n return cv2.threshold(img, threshold, MAX_GRAY_SCALE, cv2.THRESH_BINARY)", "def threshold(self):\n self.frame = cv.adaptiveThreshold(self.frame, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, self.thresh, 2)", "def anoise(this, *args, **kargs):\n\t\t\n\t\t# Arguments\n\t\tif not args: args = [50]\n\t\t\n\t\t# Kernel's retrieval\n\t\tanoisek = this._ANOISEK\n\t\tif anoisek is None: return None\n\t\t\n\t\t# More magic\n\t\tbin = this._BINARY\n\t\tfor thresh in args:\n\t\t\tbin[:,:] = (cv2.filter2D(bin, -1, anoisek) / 2.55 > thresh) * 255\n\t\treturn True", "def pipeline_image_threshold_filter(img, plot=False):\n combined_thresholds = combined_threshold(img, plot)\n remove_shadow = de_shadow_lab(img)\n orange_lane = orange_lane_detection_hsv(img)\n white_lane = white_lane_detection_luv(img)\n white_lane_2 = white_line_detection_hls(img)\n combined = np.zeros_like(combined_thresholds)\n\n # only add image information if less then 80% are white pixel\n white_pixel_white_thresh = 0.8 * combined.shape[0] * combined.shape[1]\n if np.sum(orange_lane == 1) < white_pixel_white_thresh:\n combined[(orange_lane == 1) & (combined_thresholds == 1)] = 1\n if np.sum(white_lane == 1) < white_pixel_white_thresh:\n combined[(white_lane == 1) & (combined_thresholds == 1)] = 1\n if np.sum(white_lane_2 == 1) < white_pixel_white_thresh:\n combined[(white_lane_2 == 1) & (combined_thresholds == 1)] = 1\n combined[remove_shadow == 1] = 1\n\n if plot:\n plot_pipe_results(img, combined_thresholds, remove_shadow, orange_lane, white_lane, white_lane_2, combined)\n return combined", "def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res", "def calculate_thresholds(self):\n \n for group in self.roi_groups:\n for roi in group.rois:\n for image in range(len(roi.counts)):\n # print(roi.autothreshs)\n # print('image',image)\n if roi.autothreshs[image]:\n values = np.fromiter(roi.counts[image].values(), dtype=float)\n roi.thresholds[image] = self.calculate_threshold(values)\n\n for image, im_copy in enumerate(self.copy_im_threshs): # copy values from a different image and set to manual thresh if needed\n if im_copy is not None:\n for group in self.roi_groups:\n for roi in group.rois:\n roi.autothreshs[image] = False\n roi.thresholds[image] = roi.thresholds[im_copy]", "def preprocess(self):\n snr_mask = self._snr_preprocessing()\n flux_mask = self._flux_preprocessing()\n masking_mask = self._mask_preprocessing()\n return snr_mask & flux_mask & masking_mask", "def reprocessImage(self, imgdata):\n\t\t# check to see if image is rejected by other criteria\n\t\tif self.rejectImage(imgdata) is False:\n\t\t\treturn False\n\t\t# check CTF parameters for image and skip if criteria is not met\n\t\tif self.checkCtfParams(imgdata) is False:\n\t\t\treturn False\n\t\treturn None", "def detect(img, template):\r\n\r\n #detect threshold\r\n args = parse_args()\r\n threshold=dictornary(args)\r\n\r\n # detect edges of image\r\n \"\"\"prewitt_x = [[1, 0, -1]] * 3\r\n prewitt_y = [[1] * 3, [0] * 3, [-1] * 3]\r\n img_x = task1.detect_edges(img, prewitt_x, False)\r\n img_y = task1.detect_edges(img, prewitt_y, False)\r\n img_norm = task1.edge_magnitude(img_x, img_y)\r\n\r\n task1.write_image(task1.normalize(img_norm), \".//img_norm.jpg\")\r\n\r\n # detect edges in template\r\n\r\n temp_x = task1.detect_edges(template, prewitt_x, False)\r\n temp_y = task1.detect_edges(template, prewitt_y, False)\r\n template_norm = task1.edge_magnitude(temp_x, temp_y)\r\n\r\n task1.write_image(task1.normalize(template_norm), \".//template_norm.jpg\") \"\"\"\r\n\r\n img_norm = task1.normalize(img)\r\n template_norm = task1.normalize(template)\r\n\r\n coordinates = []\r\n temp_h = len(template_norm)\r\n temp_w = len(template_norm[0])\r\n\r\n rows = len(img_norm)\r\n cols = len(img_norm[0])\r\n\r\n output = [[0 for x in range(len(img_norm[0]))] for y in range(len(img_norm))]\r\n cropped_img = [[0 for x in range(temp_w)] for y in range(temp_h)]\r\n\r\n for i in range(rows):\r\n for j in range(cols):\r\n\r\n if ((i +temp_h) < rows and (j + temp_w < cols)):\r\n cropped_img = utils.crop(img_norm, i, i + temp_h, j, j + temp_w)\r\n\r\n\r\n img_mul_temp = utils.elementwise_mul(cropped_img, template_norm)\r\n sum = 0\r\n # sum of every elemnet in img_mul_temp\r\n for p in range(temp_h):\r\n for q in range(temp_w):\r\n sum += img_mul_temp[p][q]\r\n\r\n # squaring every element in denominator of image\r\n square_img = utils.elementwise_mul(cropped_img, cropped_img)\r\n numsum_img = 0\r\n for d in range(len(cropped_img)):\r\n for e in range(len(cropped_img[0])):\r\n numsum_img += square_img[d][e]\r\n\r\n # squaring every element in denominator of template\r\n square_temp = utils.elementwise_mul(template_norm, template_norm)\r\n numsum_temp = 0\r\n for k in range(temp_h):\r\n for l in range(temp_w):\r\n numsum_temp += square_temp[k][l]\r\n\r\n denominator = np.sqrt((numsum_img * numsum_temp))\r\n\r\n if (denominator != 0):\r\n output[i][j] = (sum / denominator)\r\n if (output[i][j] > threshold):\r\n coordinates.append([i, j])\r\n\r\n # TODO: implement this function.\r\n # raise NotImplementedError\r\n return coordinates", "def _preprocessing(image) -> np.ndarray:\n # TODO: Turn mapping into generic function.\n processed_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n processed_image[~mask] = 255\n return processed_image", "def imthresh(img):\n img_vecs = img.flatten()\n\n # pre-calculate the histogram and cumulative histogram.\n vbins = np.arange(0, 257, 1)\n img_hist, hist_edges = np.histogram(img_vecs, vbins)\n vbins = (hist_edges[:-1] + hist_edges[1:])/2\n \n hist_times_gray = np.cumsum(img_hist * np.arange(0, 256, 1))\n cum_hist = np.cumsum(img_hist)\n\n # A first approximation of the background mean mean_1 is the mean of the corner pixels.\n # The third corner's index seems to be wrong!\n m, n = img.shape\n sum_bg = np.sum(img_vecs[[0, n - 1, n * (m - 1), m * n - 1]])\n num_pix_bg = 4\n mean1 = sum_bg/4\n mean2 = (np.sum(img_vecs) - sum_bg)/(m *n - num_pix_bg)\n threshold_val = np.uint8(np.ceil((mean1 + mean2)/2))\n\n\n if (threshold_val != 0) and (cum_hist[threshold_val - 1] == 0):\n threshold_val_old = threshold_val\n\n threshold_val_old = 0 # weird\n while threshold_val != threshold_val_old:\n threshold_val_old = threshold_val\n mean1 = hist_times_gray[threshold_val - 1]/cum_hist[threshold_val - 1]\n mean2 = (hist_times_gray[-1] - hist_times_gray[threshold_val - 1])/(cum_hist[-1] - cum_hist[threshold_val - 1])\n\n threshold_val = np.uint8(np.ceil((mean1 + mean2)/2))\n\n\n img_out = img >= threshold_val\n return img_out, threshold_val", "def _check_consistency_between_imaging_extractors(self):\n return True", "def thresholdInput(self,samples):\n self.__thresholdInput(samples)", "def preprocess(self, img):\n return img - np.mean(img)", "def apply_threshold(heatmap, threshold):\n heatmap_thresh = np.copy(heatmap)\n ind = np.where(np.logical_and(heatmap_thresh>1, heatmap_thresh<=threshold))\n heatmap_thresh[ind] = 0\n #heatmap_thresh[(heatmap_thresh <= threshold)] = 0\n return heatmap_thresh" ]
[ "0.6902314", "0.6696663", "0.6598084", "0.62699246", "0.6235991", "0.6163094", "0.60365695", "0.6001765", "0.59255934", "0.59180653", "0.58960956", "0.5891709", "0.5859457", "0.58538556", "0.5842548", "0.5827132", "0.57804036", "0.5778117", "0.56903183", "0.5687083", "0.56831056", "0.5681099", "0.5678767", "0.567835", "0.56602925", "0.56568885", "0.56546223", "0.56544095", "0.56476945", "0.5627368" ]
0.6849483
1