query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Checks that the PNG directories exists if not it creates them
def check_png_directories(self): check_dir_of = Locations.check_dir_of check_dir_of(self.HISTO_PNG) check_dir_of(self.LABELS_PNG) check_dir_of(self.SOURCE_PNG)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_dirs(self):\r\n for dir in [self.papers_dir,\r\n self.buffer_dir]:\r\n if not os.path.exists(dir):\r\n message = f'Dir not exists: {dir}. Making it.'\r\n logging.warning(message)\r\n os.mkdir(dir)", "def create_directory():\n try:\n if os.path.isdir(\"./imagesFromTweets\") != True:\n os.makedirs(\"./imagesFromTweets\")\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def make_empty_directories_linux() -> None:\n mkdir(PICTURES_DIR / 'screenshots' / 'grim')\n mkdir(PICTURES_DIR / 'screenshots' / 'swappy')", "def createImageFolder():\n try:\n os.makedirs(imageFolder)\n except FileExistsError:\n # Exists, delete contents instead\n clearImageFolder()", "def check_folder(directory):\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n print(\"making pictures folder\")\r\n print()\r\n else:\r\n print(\"pictures folder already exists\")\r\n print()", "def ensure_dirs_exists(self):\n os.makedirs(os.path.join(self.location, \"batches\"), exist_ok=True)\n os.makedirs(os.path.join(self.location, \"results\"), exist_ok=True)", "def create_required_dir():\n if not os.path.exists('utils_dfn/temp'):\n os.mkdir('utils_dfn/temp')\n if not os.path.exists('utils_dfn/img'):\n os.mkdir('utils_dfn/img')\n if not os.path.exists('utils_dfn/mask'):\n os.mkdir('utils_dfn/mask')\n if not os.path.exists('utils_dfn/output'):\n os.mkdir('utils_dfn/utils_dfn/output')\n # if not os.path.exists('compare'):\n # os.mkdir('compare')", "def createDirs(self):\n logging.info(\"Creating Directories\")\n\n if not self.img_exist:\n self.reCreateDir(self.savePathJoin(\"Images\"))\n if not self.of_exist:\n self.reCreateDir(self.savePathJoin(\"Of\"))\n if not self.back_of_exist:\n self.reCreateDir(self.savePathJoin(\"Back_Of\"))\n if not self.depth_exist:\n self.reCreateDir(self.savePathJoin(\"Depth\"))\n if not self.object_detection_dir_exist and (\n self.ui.c_object_detection.isChecked() or self.ui.c_crash_plot.isChecked()\n ):\n self.reCreateDir(self.savePathJoin(\"ObjectDetection\"))\n if self.super_pixel_method != \"\" and not os.path.exists(\n os.path.join(self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method)\n ):\n os.makedirs(\n os.path.join(self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method)\n )\n\n self.reCreateDir(RESULTS)\n self.reCreateDir(NP_DIR)\n self.reCreateDir(MASK_DIR)\n\n if self.ui.c_crash_plot.isChecked():\n self.reCreateDir(PLOT_CRASH_DIR)\n if self.ui.c_draw.isChecked():\n self.reCreateDir(DRAW_DIR)\n if self.ui.c_velocity.isChecked():\n self.reCreateDir(VL_DIR)\n if self.ui.c_speed_plot.isChecked():\n self.reCreateDir(PLOT_SPEED_DIR)\n if self.super_pixel_method != \"\":\n self.reCreateDir(SUPER_PIXEL_DIR)\n if self.user[\"GT\"] != \"\" and self.ui.c_error_plot.isChecked():\n self.reCreateDir(PLOT_ERROR_DIR)", "def check_image_dir(image_dir):\n if not os.path.isdir(image_dir):\n if verbose:\n print(\"INFO : Creating Image Storage folder %s\" % (image_dir))\n try:\n os.makedirs(image_dir)\n except OSError as err:\n print(\"ERROR : Could Not Create Folder %s %s\" % (image_dir, err))\n exit(1)", "def create_directories(dir_names: list, base_path: str):\n\tfor dir_name in dir_names:\n\t\timage_dir = join(base_path, str(dir_name) + 'x')\n\t\tif not isdir(image_dir):\n\t\t\tos.mkdir(image_dir)", "def create_directory_structure():\n\n def ensure_directory(path):\n try:\n os.makedirs(path)\n\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n ensure_directory('./out/textures')\n ensure_directory('./out/data')", "def images_exist(self):\n pass", "def make_directories(self):\n os.makedirs(self.data_dir, exist_ok=True)\n os.makedirs(self.patches_dir, exist_ok=True)\n os.makedirs(self.raw_image_dir, exist_ok=True)\n os.makedirs(self.pro_image_dir, exist_ok=True)\n os.makedirs(self.results_dir, exist_ok=True)", "def check_if_dir_exists():\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\")\n logger.debug(\"Dir for logs has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'logs'} failed\")\n\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\")\n logger.debug(\"Dir for DB has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'db'} failed\")", "def CreateDirs(self):\n# First, create a list of directories.\n dnames = []\n tags = ['', '_m', '_mf']\n for entry in self.info.keys():\n if self.info[entry]['type'] == 'epi':\n for tag in tags:\n fname = self.info[entry].get('imgfile%s' % tag, None)\n if fname is not None:\n dnames.append(os.path.dirname(fname))\n else:\n if self.info[entry].get('outdir',None) is not None:\n dnames.append(self.info[entry]['outdir'])\n\n# Create them if they don't already exist.\n for dname in dnames:\n if not os.path.exists(dname):\n self.MakeDir(dname)\n if self.verbose:\n print 'mkdir %s' % dname", "def make_path(self):\n folders = [\n f\"{self.save_path}{self.name}/json/\",\n f\"{self.save_path}{self.name}/images/\",\n ]\n if hasattr(self, \"masks\"):\n folders.append(f\"{self.save_path}{self.name}/masks/\")\n for folder in folders:\n if not os.path.exists(folder):\n os.makedirs(folder)", "def setup_image_folder(path_to_images):\n\n print(\"setup images folder...\")\n\n if os.path.isdir(path_to_images):\n print(\"folder already exists: remove...\")\n shutil.rmtree(path_to_images)\n\n os.mkdir(path_to_images)\n print(\"folder created\")", "def checking_path():\n path = Path(\"phonebook\")\n try:\n path.mkdir(parents=True, exist_ok=False)\n except FileExistsError:\n pass\n else:\n pass", "def get_and_create_dirs():\n\n print('Checks for paths for original and segmented images.\\nOriginal and segmented images folders must be organized in the exact same structure (images in the same folders and sub-folders).\\n')\n \n orig_path = None\n\n while orig_path is None:\n orig_path = input('Input root path for original images (default github folder):\\n') or r'.\\chest_xray'\n if not os.path.exists(orig_path):\n orig_path = None\n print('Path doesn\\'t exist, please input a valid directory path.\\n')\n\n seg_path = input('\\nInput root path for segmented images if exists or needs to be created (default github folder):\\n') or r'.\\segmentation'\n if (seg_path is not None) and (not os.path.exists(seg_path)):\n create_dir = 'Z'\n while create_dir not in ['Y', 'N']:\n create_dir = input('Path doesn\\'t exist, would you like to create folder structure for ' + seg_path + ' (Y or N)?\\n')\n if create_dir == 'Y':\n for dirname, _, filenames in os.walk(orig_path):\n os.makedirs(dirname.replace(orig_path, seg_path)) \n if os.path.exists(seg_path):\n print('Directory created.')\n else:\n print('Unknown error while attempting to create directory.')\n else:\n print('Directory not created')\n \n orig_file_ext = input('\\nWhat is the file extension for original images (default jpeg) ?\\n').replace('.', '') or 'jpeg'\n \n seg_model = input('\\nWhat is the path to the segmentation model checkpoint (default github folder) ?\\n') or r'.\\Models\\unet_lung_seg.hdf5'\n \n seg_file_ext = input('\\nWhat is the file extension for segmented images (default png) ?\\n').replace('.', '') or 'png'\n \n return orig_path, seg_path, orig_file_ext, seg_model, seg_file_ext", "def create_directories(train_path, test_path):\n train_path.joinpath(\"images\").mkdir(parents=True)\n test_path.joinpath(\"images\").mkdir(parents=True)", "def _check_or_create_dir(directory):\n if not tf.gfile.Exists(directory):\n tf.gfile.MakeDirs(directory)", "def check_axe_dirs():\n safe_mkdir( AXE_IMAGE_PATH )\n safe_mkdir( AXE_OUTPUT_PATH )\n safe_mkdir( AXE_CONFIG_PATH )\n safe_mkdir( AXE_DRIZZLE_PATH )", "def dirChecking(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)", "def prepare_folders():\n folder_list = [\"./data\", \"./data/stage\", \"./data/spoken\", \"./data/stage_lemmas\", \"./data/spoken_lemmas\"]\n for folder in folder_list:\n if not os.path.exists(folder):\n os.mkdir(folder)\n print(f\"Created folder {folder}\")\n else:\n print(f\"Folder {folder} already existed\")", "def reset_dirs():\n\n image_dir = Config.IMAGE_DIRECTORY\n fig_dir = Config.FIGURE_DIRECTORY\n\n # delete directories\n if os.path.isdir(image_dir):\n shutil.rmtree(image_dir) \n if os.path.isdir(fig_dir):\n shutil.rmtree(fig_dir) \n\n # create directories\n os.mkdir(image_dir)\n orig_dir = os.path.join(image_dir, 'original')\n processed_dir = os.path.join(image_dir, 'processed')\n os.mkdir(orig_dir)\n os.mkdir(processed_dir)\n os.mkdir(fig_dir)\n\n print(f'[INFO] Created image and figure directories.')", "def check_axesim_dirs():\n safe_mkdir( AXE_IMAGE_PATH )\n safe_mkdir( AXE_OUTPUT_PATH )\n safe_mkdir( AXE_CONFIG_PATH )\n safe_mkdir( AXE_SIMDATA_PATH )\n safe_mkdir( AXE_OUTSIM_PATH )", "def test_save_materials_not_existed_dir(temp_dir):\n image1 = [[[0, 0, 0], [0, 0, 0]], [[255, 255, 255], [255, 255, 255]]]\n image2 = [[[0, 0, 0], [255, 255, 255]], [[255, 255, 255], [0, 0, 0]]]\n image3 = [[[255, 255, 255], [255, 255, 255]], [[0, 0, 0], [0, 0, 0]]]\n\n data = [\n (\"image1.png\", Image.fromarray(np.array(image1, dtype=np.uint8))),\n (\"image2.png\", Image.fromarray(np.array(image2, dtype=np.uint8))),\n (\"image3.png\", Image.fromarray(np.array(image3, dtype=np.uint8))),\n ]\n dist = os.path.join(temp_dir, 'not_existed')\n save_materials(dist, data, step=1)\n\n assert os.path.exists(os.path.join(dist, \"images\", \"1\", \"image1.png\"))\n assert os.path.exists(os.path.join(dist, \"images\", \"1\", \"image2.png\"))\n assert os.path.exists(os.path.join(dist, \"images\", \"1\", \"image3.png\"))", "def create_folders():\n os.makedirs(GRID_DIR, exist_ok=True)", "def _create_folder_if_not_exist(filename):\n os.makedirs(os.path.dirname(filename), exist_ok=True)", "def _create_paths(paths):\n for path in paths:\n _mkdir_if_not_exist(path)" ]
[ "0.7306773", "0.70073175", "0.69785833", "0.6906268", "0.68699163", "0.6868954", "0.6845684", "0.6698808", "0.6694639", "0.66394055", "0.66372293", "0.6557312", "0.65152556", "0.6488042", "0.64863986", "0.64772093", "0.6472387", "0.6460976", "0.6445166", "0.642431", "0.64194345", "0.6416145", "0.6398929", "0.6381689", "0.63567317", "0.6341822", "0.63310087", "0.6325161", "0.6299288", "0.6283824" ]
0.7632398
0
Returns the location of the slice mask for the requested index. It does not validate the index. A subject must be set prior to call this method
def get_mask_png_location(self, index): if self.subject == None: raise AssertionError('A subject must be set before calling this method') f = Locations.partial_formatter return f(self.SLICE_MASK_PNG, index=index)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mask_index(self) -> int:\n return self._mask_index", "def mask(self):\n return self.mask_index", "def get_mask(self, index):\n if not self.masked:\n return None\n \n x, y = self.location_of(index)\n # Do not take patches from out of bounds spaces\n #if x > img_w - self.size or y > img_h - self.size:\n # raise IndexError('Patch boundary out of bounds')\n \n patch = torch.from_numpy(self.mask[x : x + self.size, y : y + self.size])\n \n if self.transform:\n patch = self.transform(patch)\n return patch", "def getMask(self,filt):\n indx = [self.mask[i] for i in xrange(len(self._header)) if filt == self._header[i]]\n return indx", "def masked_indices(mask):\n\treturn np.nonzero(np.ravel(mask,order='C'))[0]", "def non_masked_indices(mask):\n\treturn np.nonzero(np.ravel(mask-1,order='C'))[0]", "def get_slice(self):\n return self.locs[tuple(self.indices), :]", "def mask(self):\n return self._mask", "def mask(self):\n return self._mask", "def mask(self):\n return self._mask", "def mask(self):\n return self._mask", "def map(self, index):\n\n\t\tif type(index) is int:\n\t\t\tif index < 0:\n\t\t\t\tindex = self.start - (-index - 1)\n\n\t\t\tif self & index != index:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\treturn self.start - index\n\t\telif type(index) is rspan:\n\t\t\tif self & index != index:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\tstop = self.map(index.end) + 1\n\t\t\treturn slice(self.map(index.start), None if stop == len(self) else stop)\n\t\telif type(index) is slice:\n\t\t\tstop = self.map(index.stop if index.stop is not None else self.end) + 1\n\t\t\treturn slice(self.map(index.start if index.start is not None else self.start), None if stop == len(self) else stop)\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn self.map(index.__index__())\n\t\t\texcept:\n\t\t\t\traise ValueError(f\"{index!r}: bad index\")\n\n\t\traise ValueError(f\"{index!r}: bad index\")", "def map(self, index):\n\n\t\tif type(index) is int:\n\t\t\tif index < 0:\n\t\t\t\tindex = self.end - (-index - 1)\n\n\t\t\tif self & index != index:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\treturn index - self.start\n\t\telif type(index) is span:\n\t\t\tif self & index != index:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\tstop = self.map(index.end) + 1\n\t\t\treturn slice(self.map(index.start), None if stop == len(self) else stop)\n\t\telif type(index) is slice:\n\t\t\tstop = self.map(index.stop if index.stop is not None else self.end) + 1\n\t\t\treturn slice(self.map(index.start if index.start is not None else self.start), None if stop == len(self) else stop)\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn self.map(index.__index__())\n\t\t\texcept:\n\t\t\t\traise ValueError(f\"{index!r}: bad index\")", "def get_sample_mask(self):", "def Mask(self) -> int:", "def get_boundary_position_of_index():\n function = LegacyFunctionSpecification() \n function.must_handle_array = True\n for x in ['i','j','k']:\n function.addParameter(x, dtype='i', direction=function.IN)\n function.addParameter('index_of_boundary', dtype='i', direction=function.IN, default = 1)\n for x in ['x','y','z']:\n function.addParameter(x, dtype='d', direction=function.OUT)\n function.addParameter('number_of_points', 'i', function.LENGTH) \n function.result_type = 'i'\n return function", "def mask(self, mask):\n return MaskedDistribution(self, mask)", "def _get_current_mask(self):\n return [self.input_fields[x].text() for x in range(0,int(self.mask_scale_x.text()) * int(self.mask_scale_y.text()))]", "def mask(self) -> list[int]:\n return self._mask", "def get_mask_offset(mask):\n # use ctypes to truncate the result to a uint32\n cmask = ctypes.c_uint32(mask).value\n return _bruijn32lookup[ctypes.c_uint32((mask & -mask) * 0x077cb531).value >> 27]", "def get_input_mask_at(self, node_index):\n inputs = self.get_input_at(node_index)\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)", "def test_get_mask(self):\n\n spine_data_loader = SpineDataLoader(dirpath_data=self.dirpath,\n batch_size=4)\n\n for idx in range(4):\n mask = spine_data_loader.get_mask(str(idx))\n assert mask.shape == (256, 256, 1)\n assert mask.dtype == 'int64'", "def cal_mask(mask_999, index):\n mask = np.all(mask_999[:, index], axis = 1) & (np.sum(mask_999, axis = 1) == len(index))\n return mask, np.sum(mask)", "def mask(self):\n return type(self)(self.data.mask, self.bset)", "def mask(self) -> str:\n return self.tokenizer.get_command('MASK').Id", "def _get_mask(self, x):\n x_mask = Variable(torch.zeros(x.size(0), self.max_seq_len).byte())\n return x_mask.cuda() if self.use_cuda else x_mask", "def get_mask ( self, iloc ):\n mask = self._mask[iloc]\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n sat = g.ReadAsArray()\n m3 = sat == 0\n\n the_mask = mask.replace(\"SAT\", \"DIV\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n div = g.ReadAsArray()\n m1 = div == 0\n\n the_mask = mask.replace(\"SAT\", \"NUA\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n nua = g.ReadAsArray()\n m2 = np.logical_not ( np.bitwise_and ( nua, 1 ).astype ( np.bool ) )\n return m1 * m2 * m3", "def get_mask ( self, iloc ):\n mask = self._mask[iloc]\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n sat = g.ReadAsArray()\n m3 = sat == 0\n\n the_mask = mask.replace(\"SAT\", \"DIV\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n div = g.ReadAsArray()\n m1 = div == 0\n\n the_mask = mask.replace(\"SAT\", \"NUA\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n nua = g.ReadAsArray()\n m2 = np.logical_not ( np.bitwise_and ( nua, 1 ).astype ( np.bool ) )\n return m1 * m2 * m3", "def rmap(self, index):\n\n\t\tif type(index) is int:\n\t\t\tif index < 0:\n\t\t\t\tindex = len(self) - (-index - 1)\n\n\t\t\tresult = self.start - index\n\t\t\tif self & result != result:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\treturn result\n\t\telif type(index) is slice:\n\t\t\treturn slice(\n\t\t\t\tself.rmap(index.start if index.start is not None else self.start),\n\t\t\t\tself.rmap((index.stop if index.stop is not None else len(self)) - 1))\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn self.rmap(index.__index__())\n\t\t\texcept:\n\t\t\t\traise ValueError(f\"{index!r}: bad index\")\n\n\t\traise ValueError(f\"{index!r}: bad index\")", "def mask_id(self) -> int:\n return self.tokenizer.get_command('MASK').Id" ]
[ "0.73455626", "0.7261472", "0.64302695", "0.639594", "0.62014866", "0.60594404", "0.6010678", "0.59553385", "0.59553385", "0.59553385", "0.59553385", "0.57957006", "0.57448375", "0.5725256", "0.57158476", "0.5667323", "0.56491894", "0.5641712", "0.5563752", "0.5561733", "0.55430377", "0.5522845", "0.54886687", "0.5484295", "0.54686904", "0.5435914", "0.54295385", "0.54295385", "0.5422329", "0.5400783" ]
0.73636913
0
Returns the location of the requested label file. A subject must be set prior to call this method
def get_label_png_location(self, index): if self.subject == None: raise AssertionError('A subject must be set before calling this method') f = Locations.partial_formatter return f(self.LABELS_PNG, index=index)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _GetPathFromLabel(self, label):\n\n return os.path.join(self.GetRoot(),\n self._GetRelativeLabelPath(label))", "def _GetRelativeLabelPath(self, label):\n\n if self._AreLabelsPaths():\n return label\n\n path = \"\"\n components = self.GetLabelComponents(label)\n if not components:\n return path\n \n for c in components[:-1]:\n path = os.path.join(path, c + self.suite_extension)\n path = os.path.join(path, components[-1])\n return path", "def _GetRelativeLabelPath(self, label):\n\n if self._AreLabelsPaths():\n return label\n\n return os.path.join(*self.GetLabelComponents(label))", "def get_label(filename:str) -> str:\n label = filename.split(\"/\")[-2]\n return label", "def get_filename(label):\n return op.splitext(op.splitext(op.basename(label))[0])[0]", "def label_names_file():\n return tfds.core.tfds_path(_LABELS_FNAME)", "def _get_file_path(self):\n self.select_pdf()\n self.file_path_label.configure(\n text=self._shorten_file_name())\n self.file_path_label.grid(row=0, column=1)", "def _GetLabelFromBasename(self, basename):\n \n return basename", "def get_label(self):\n return self.job[self.label_key]", "def getDefaultFileLocation(self):\n\n label_env = os.getenv('DISPASS_LABELFILE')\n std_env = os.getenv('XDG_DATA_HOME') or os.getenv('APPDATA')\n home_file = '~/.dispass/labels'\n\n if label_env:\n return label_env\n if not exists(home_file) and std_env:\n return std_env + '/dispass/labels'\n else:\n return home_file", "def get_location(self):\n return os.path.dirname(self.filename)", "def get_file(self, layout, subject, suffix, extension, session=None):\n if session is None:\n files = layout.get(subject=subject, suffix=suffix, extension=extension)\n else:\n files = layout.get(subject=subject, suffix=suffix, extension=extension, session=session)\n\n if len(files) > 0:\n out_file = os.path.join(files[0].dirname, files[0].filename)\n\n if self.global_conf.dmri_bids_acq != \"\":\n for file in files:\n if self.global_conf.dmri_bids_acq in file.filename:\n out_file = os.path.join(file.dirname, file.filename)\n break\n\n # TODO: Better parsing of multiple runs\n else:\n out_file = None\n\n return out_file", "def get_labels_filepath(self, dataset_type: DatasetType) -> str:\n return os.path.join(\n self.save_dir, self.name, dataset_type.name.lower() + \"_labels.csv\"\n )", "def filename(self) -> str:\n return self.__location.filename", "def label_by_parent_folder(self):\n return self.label_by_function(lambda fname: fname.split('/')[-2])", "def get_path_and_label(root, file):\n # path of image (path is image)\n path = os.path.join(root, file)\n # Grab name of folder / Grab image folder name and replace spaces to - and convert all into lower case\n label = os.path.basename(root).replace(\" \", \"-\").lower()\n return path, label", "def _get_label(self):\n return self.label", "def get_label(img_path):\n img_name = img_path.stem\n label_name = img_name + \".txt\"\n label_path = img_path.parent / label_name\n with open(label_path) as f:\n label = json.load(f)\n return label", "def _get_subject_input_path(input_root, subject_id):\n subject_dir = get_subject_dir(input_root, subject_id)\n return subject_dir / f\"{subject_id}_task-tapping_nirs.snirf\"", "def _find_colabeled_file(self) -> Union[pathlib.Path, None]:\n name = self.results_file.name[:-11] + \"colabeled_idx.npy\"\n try:\n colabel_file = next(self.results_file.parent.glob(name))\n return colabel_file\n except StopIteration:\n return None", "def file_location(self) -> str:\n if os.path.exists(self._file_path):\n return \"The file ({}) is located in the path ({})\".format(self._file_name, self._file_path)\n else:\n raise FileNotFoundError", "def _get_file_names_and_labels(self):\n # Get waveform file names\n file_names = list(self.lookup_dict.keys())\n\n # Get labels\n labels = [self.lookup_dict[key] for key in self.lookup_dict.keys()]\n\n # file_paths and labels should have same length\n assert len(file_names) == len(labels)\n\n return file_names, labels", "def tests_ti_file_get_label(self):\n super().indicator_get_label()", "def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")", "def _get_label_config(self, file_path):\n # Get label config file\n dir = os.path.dirname(file_path)\n file = os.path.basename(file_path)\n split_list = file.split('.')\n nii_index = split_list.index('nii')\n file = ''.join(split_list[:nii_index])\n config_file = os.path.join(file, 'lbl')\n if os.path.isfile(config_file):\n label_config = LabelConfig(config_file, False)\n else:\n label_config = self.label_config\n\n return label_config", "def findLabel(self, label):\n return self.root._findLabel(label)", "def get_path_image(path_data, label, filename):\n\n return path_data.joinpath(f'label_{label}', filename)", "def get_output(path, label_file = None):\n img_id = path.split('/')[-1]\n labels = label_file.loc[img_id].values\n return labels" ]
[ "0.6648611", "0.6380659", "0.62147915", "0.6169521", "0.591856", "0.5886688", "0.58614296", "0.5729815", "0.57252276", "0.5621918", "0.5616924", "0.5591803", "0.5585961", "0.55759835", "0.5569629", "0.55620193", "0.5542063", "0.5513758", "0.54805475", "0.54596347", "0.54587704", "0.5434497", "0.54325634", "0.54263157", "0.54263157", "0.54263157", "0.5347712", "0.5346903", "0.5343058", "0.5342246" ]
0.64731854
1
Returns the location of the requested histology file. A subject must be set prior to call this method
def get_histo_png_location(self,index): if self.subject == None: raise AssertionError('A subject must be set before calling this method') f = Locations.partial_formatter return f(self.HISTO_PNG,index=index)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_path(self):\n return self._obs_file()", "def get_location(self):\n return os.path.dirname(self.filename)", "def svn_fs_history_location(*args):\r\n return _fs.svn_fs_history_location(*args)", "def file_loc(self):\n\t\treturn self.__dbfile", "def get_hist(self, dmc, year, trigger, pt, var, frame, plot):\n logging.debug('Trying to get plot from file {0}, Parameters are: '\n 'year={1}, trigger={2}, pt={3}, var={4}, frame={5}, '\n 'plot={6}, dmc={7}'.format(self.filename(), year, trigger,\n pt, var, frame, plot, dmc))\n subdir = self._get_subdir(dmc, year, trigger, pt)\n histname = self._get_histname(plot, var, frame)\n return self._get_by_str('/'.join([subdir, histname]))", "def filename(self) -> str:\n return self.__location.filename", "def path(self):\n return self.file_path()", "def get_absolute_path(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetAbsolutePath', self.handle)", "def get_absolute_path(self):\n if self.datafile and self.datafile.storage.exists(self.datafile.path):\n return self.datafile.path\n else:\n return None", "def file_path(self):\n return self.lib.file_path", "def path(self):\n return self._data_file", "def locate_file(self, filename):\n return locate_file(filename, self.observatory)", "def location(self) -> List[str]:\n loc = [os.path.basename(self.filepath)]\n for h in self.headings:\n loc.append(h.heading.strip())\n\n return loc", "def getIndexFilePath(self):\n return self.index_file_path", "def __get_path(self):\n return self.path", "def get_path(self):\n try:\n return self._file.path\n except AttributeError:\n return os.path.abspath(self._file.name)", "def real_path(self):\n\t\treturn self.args[0]", "def path(self):\n return self.lib.path", "def get_path(self):\n return self.path", "def path(self):\n return self.path", "def get_file_path(self):\n return self._file_path", "def filepath(self):\n return self.filepath_", "def get_path(self):\n\t\treturn call_sdk_function('PrlShare_GetPath', self.handle)", "def getFile(self, stamp):\n name = escapeForPath(str(stamp))\n return os.path.join(self.path, name)", "def logpath(self):\n return self.outpath", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path" ]
[ "0.59872836", "0.57290167", "0.5555855", "0.55376714", "0.5519211", "0.55075973", "0.5495559", "0.54652685", "0.5446337", "0.54237443", "0.54112005", "0.53727615", "0.5362345", "0.53595704", "0.5332021", "0.531647", "0.5274756", "0.5249345", "0.52212", "0.52183527", "0.5216787", "0.52", "0.5198469", "0.5185347", "0.5172579", "0.5168921", "0.5168921", "0.5168921", "0.5168921", "0.5168921" ]
0.6232143
0
Close and reopen all file handlers.
def reopen_files(self): for log in (self.error_log, self.access_log): for h in log.handlers: if isinstance(h, logging.FileHandler): h.acquire() h.stream.close() h.stream = open(h.baseFilename, h.mode) h.release()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close_all_file_handles(self):\n\n if isinstance(getattr(self, \"_file_handles\", None), dict):\n for fh in self._file_handles.values():\n fh.close()\n self._file_handles.clear()", "def close(self):\n if not self.__closed:\n self.counters = { \"error\": 0, \"warning\": 0, \"success\": 0, \"failure\": 0 }\n\n try:\n self.__flush_count = 0\n for handler in self.__filehandlers:\n handler.flush()\n self.__logger.removeHandler(handler)\n handler.close()\n except:\n # do nothing\n pass\n self.__closed = True", "def close_all_files(self):\r\n while self.close_file():\r\n pass", "def __del__(self):\n for handle in self._filehandles:\n handle.close()", "def release_handlers(self):\n for handler in self.handlers:\n handler.close()", "def close(self) -> None:\n if self.file_handler:\n self.file_handler.close()", "def close( self ):\n \n for file in self._files:\n ir.file_hub.close( file )", "def release_logger_files():\n for hl in logging.getLogger().handlers:\n if isinstance(hl, logging.FileHandler):\n hl.close()\n logging.getLogger().removeHandler(hl)", "def release_logger_files():\n for hl in logging.getLogger().handlers:\n if isinstance(hl, logging.FileHandler):\n hl.close()\n logging.getLogger().removeHandler(hl)", "def _close_file_logger(self):\n if self._file_log_handler is not None:\n self._file_log_handler.flush()\n self._file_log_handler.close()\n self.logger.removeHandler(self._file_log_handler)\n self._file_log_handler = None\n self.logger.propagate = True", "def _close(self):\n for fd in self._fds:\n try:\n os.close(fd)\n except:\n pass", "def __del__(self):\n for file in list(self.mFiles.values()):\n file.close()", "def __del__(self):\n for f in self._files:\n f.close()", "def __del__(self):\n self.close_files()", "def unload(self):\n for f in self.logs.values():\n f.close()", "def _CloseOutputFiles(self):\n self.gfile.close()\n self.efile.close()", "def _close(self):\n self.fh.close()", "def __cleanup(self):\n wrappers = copy.copy(self.__wrappers)\n\n num = len(wrappers)\n for fd in wrappers:\n wrappers[fd].close()\n\n self.__wrappers = {}\n self.__disconnected_wrappers = []\n self.__logger.info(\"Closed %d IOWrappers\" % num)\n os.close(self.__wakeup_read)\n os.close(self.__wakeup_write)", "def close(self):\r\n self._report_file.close()\r\n # Make sure everything's closed.\r\n for files in self._output_files.values():\r\n for f in files.values():\r\n f.close()", "def __del__(self):\n for component_name, file in self._file_list.items():\n file.close()", "def close_files(self):\n self.wb_alm.close()\n self.wb_defect.close()\n self.wb_enhancement.close()\n self.wb_incident.close()\n self.wb_destination.close()", "def _cleanup(self):\n if self.current_session is not None:\n self.current_session.close()\n self.current_session = None\n\n for handler in list(self.logger.root_logger.handlers):\n self.logger.root_logger.removeHandler(handler)\n handler.flush()\n handler.close()", "def close(self):\n if self.current_file_number is not None:\n self.fh_raw.close()\n self.current_file_number = None", "def _safe_close(self, fds):\n for fd in fds:\n try:\n os.close(fd)\n except OSError as err:\n if err.errno != errno.EBADF:\n raise\n # TODO(kota_): fd might be closed already, so if already\n # closed, OSError will be raised. we need more refactor to\n # keep clean the file discriptors.\n pass", "def _close_stdio(self, log_path: PurePath):\n for attr, writable in ((\"stdin\", False), (\"stdout\", True), (\"stderr\", True)):\n # Close the old.\n fd = getattr(sys, attr)\n fileno = fd.fileno()\n fd.flush()\n fd.close()\n\n # Open the new.\n temp_fd = safe_open(log_path, \"a\") if writable else open(os.devnull)\n os.dup2(temp_fd.fileno(), fileno)\n setattr(sys, attr, os.fdopen(fileno, mode=(\"w\" if writable else \"r\")))\n sys.__stdin__, sys.__stdout__, sys.__stderr__ = sys.stdin, sys.stdout, sys.stderr # type: ignore[assignment,misc]", "def close_file_readers(file_reader_last_read_list):\n for file_reader in file_reader_last_read_list:\n file_reader[\"file_reader\"].close()", "def close_file(self):\n self.root_group.close()", "def close_file_handle(self):\n if self.file_handle and self.output_file:\n self.file_handle.close()", "def close(self):\n for logger in self._loggers:\n logger.close()", "def close(self):\n for key, logger in self._loggers.items():\n logger.close()" ]
[ "0.7845344", "0.75302327", "0.7509484", "0.72066605", "0.7174362", "0.7027809", "0.70179516", "0.6978493", "0.6978493", "0.68127835", "0.6807487", "0.6763765", "0.67139983", "0.66308576", "0.6599396", "0.6508524", "0.6469078", "0.6453326", "0.63491166", "0.62819314", "0.6280671", "0.6280342", "0.6267076", "0.62205136", "0.6208749", "0.6178877", "0.6155317", "0.61455107", "0.6116813", "0.6106979" ]
0.78520674
0
Convert to a new object of networkx.MultiGraphlike class cls
def convertTo( self, cls, data=False, keys=False ): g = cls() g.add_nodes_from( self.nodes( data=data ) ) g.add_edges_from( self.edges( data=( data or keys ), keys=keys ) ) return g
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_graph(edges):\n \n G = nx.MultiGraph()\n G.add_edges_from(edges)\n return G", "def collapse_multigraph_to_nx(graph: Union[gr.MultiDiGraph, gr.OrderedMultiDiGraph]) -> nx.DiGraph:\n\n # Create the digraph nodes.\n digraph_nodes: List[Tuple[int, Dict[str, nd.Node]]] = ([None] * graph.number_of_nodes())\n node_id = {}\n for i, node in enumerate(graph.nodes()):\n digraph_nodes[i] = (i, {'node': node})\n node_id[node] = i\n\n # Create the digraph edges.\n digraph_edges = {}\n for edge in graph.edges():\n src = node_id[edge.src]\n dest = node_id[edge.dst]\n\n if (src, dest) in digraph_edges:\n edge_num = len(digraph_edges[src, dest])\n digraph_edges[src, dest].update({edge_num: edge.data})\n else:\n digraph_edges[src, dest] = {0: edge.data}\n\n # Create the digraph\n result = nx.DiGraph()\n result.add_nodes_from(digraph_nodes)\n result.add_edges_from(digraph_edges)\n\n return result", "def _construct_graph(self):\n raise NotImplementedError", "def CreateFromNetworkX(\n cls, g: nx.MultiDiGraph, ir_id: int, split: Optional[int] = None,\n ) -> \"GraphTuple\":\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split)\n mapped.data_flow_steps = g.graph.get(\"data_flow_steps\")\n mapped.data_flow_root_node = g.graph.get(\"data_flow_root_node\")\n mapped.data_flow_positive_node_count = g.graph.get(\n \"data_flow_positive_node_count\"\n )\n return mapped", "def direct_network(self):\n #print list(self.get_subgraphs())\n graphs = [self._depth_first_directed(g) for g in self.get_subgraphs()]\n self._network = reduce(lambda a, b: nx.union(a, b), graphs)", "def to_networkx(self):\n g = nx.Graph()\n for v in self.vs.values():\n g.add_node(v)\n for v in self.fs:\n g.add_node(v)\n for u in v.neighbors:\n g.add_edge(v, u)\n return g", "def __toNetworkX(self):\n G = nx.Graph()\n G.add_nodes_from(range(self.n))\n for u in range(self.n):\n for v in range(self.n):\n if self.adjacent(u, v):\n G.add_edge(u, v)\n\n return G", "def __deepcopy__(self, memodict={}):\n nodes = [deepcopy(n) for n in self.nodes]\n return Network(nodes)", "def parseNodeUsingClass(cls, multElement, xPath, linkData, **kwargs):\n\n xPath.append( multElement.tag )\n\n multiplicityComponent = cls()\n\n formClasses = {}\n for formClass in [ Unspecified, Constant1d, XYs1d, Regions1d, Reference, Polynomial1d, PartialProduction, Gridded1d, Branching1d ] :\n formClasses[formClass.moniker] = formClasses\n for form in multElement :\n formClass = formClasses.get( form.tag )\n if( formClass is None ) : raise Exception( \"encountered unknown multiplicity form: %s\" % form.tag )\n newForm = formClass.parseNodeUsingClass(form, xPath, linkData, **kwargs)\n multiplicityComponent.add( newForm )\n\n xPath.pop( )\n\n return( multiplicityComponent )", "def parse_graph(self):\n\t\tnx_graph = nx.Graph()\n\t\tfor node in self.vertices:\n\t\t\tnx_graph.add_node(node)\n\n\t\tfor edge in self.edges:\n\t\t\tnode1, node2, weight = edge\n\t\t\tnx_graph.add_edge(node1, node2, weight=weight)\n\n\t\treturn nx_graph", "def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]", "def to_graph(l):\n G = nx.Graph()\n for part in l:\n # each sublist is a bunch of nodes\n G.add_nodes_from(part)\n # it also imlies a number of edges:\n G.add_edges_from(to_edges(part))\n return G", "def __init__(self, network: Network):\n self.graph = network.graph", "def instance():\n from weighted_graph import Graph\n inst = Graph()\n for edge in EDGES:\n inst.add_edge(*edge)\n return inst", "def create_graph_with_nodes(src_nodes, get_id: callable, get_attrs: callable):\n graph = nx.MultiDiGraph()\n for node in src_nodes:\n graph.add_node(get_id(node), **get_attrs(node))\n return graph", "def graph_with_graph(cls, graph):\n new = cls()\n new.nx_graph = graph.nx_graph.copy()\n new.max_date = graph.max_date\n new.min_date = graph.min_date\n return new", "def copy(self, new=True):\n return UndirectedGraph(self._nodes, self._edges)", "def create( basic_graph, mcs_ids, rule, add_attr = True ) :\n g = copy.deepcopy( basic_graph )\n for id in mcs_ids :\n id0, id1 = mcs.get_parent_ids( id )\n simi = rule.similarity( id0, id1, mcs_id = id )\n if (simi > 0) :\n if (add_attr) :\n try :\n partial_ring = int( KBASE.ask( id, \"partial_ring\" ) )\n except LookupError :\n partial_ring = 0\n try :\n slack_simi = KBASE.ask( id, \"slack_similarity\" )\n except LookupError :\n slack_simi = 0.0\n g.add_edge( id0, id1, similarity = simi, slack_similarity = slack_simi,\n partial_ring = partial_ring, mcs_id = id )\n else :\n g.add_edge( id0, id1, similarity = simi )\n return g", "def as_graph(self, graph=None):\n # at this level it works but what if we have nested structures?\n # What is a graph if not a set of links? Why do not we put all into a graph?\n if not graph:\n graph = nx.Graph()\n\n for link in self.sequence:\n logging.info(link)\n (l, r) = link.value\n (ln, rn) = link.name\n logging.info (\"Node: %s %s \" % (l.name, str(l.shannon)))\n graph.add_node(l.name, shannon=l.shannon, IC=l.IC)\n logging.info (\"Node: %s %s \" % (r.name, str(r.shannon)))\n graph.add_node(r.name, shannon=r.shannon, IC=r.IC)\n logging.info (\"Edge: %s %s %s \" % (l.name, r.name, str(link.PMI)))\n graph.add_edge(l.name, r.name, pmi=link.PMI)\n\n return graph", "def convertGraph(G):\n G_new = nx.Graph()\n for nd, attrs in G.nodes(data=True):\n G_new.add_node(str(nd), chem=attrs['atom_symbol'])\n# G_new.add_node(str(nd), x=str(attrs['attributes'][0]),\n# y=str(attrs['attributes'][1]))\n for nd1, nd2, attrs in G.edges(data=True):\n G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type'])\n# G_new.add_edge(str(nd1), str(nd2))\n\n return G_new", "def gen_graph(self):", "def fresh_copy(self):\n return OrderedMultiGraph()", "def clone(self) -> \"Graph\":\n return Graph(seed=self.seed,\n layout=self.layout,\n community_n=self.community_n,\n community_size_mean=self.community_size_mean,\n community_size_std=self.community_size_std,\n community_p_in=self.community_p_in,\n community_p_out=self.community_p_out,\n considered_immune_threshold=self.considered_immune_threshold)", "def get_graph(msm, with_comm_classes=False, edge_threshold=0.):\n\n g = AGraph(strict=False, directed=True)\n\n g.graph_attr.update(size=\"7.75, 10.25\")\n g.graph_attr.update(dpi=\"300\")\n\n g.add_nodes_from(range(msm.num_nodes))\n\n if with_comm_classes:\n comm_classes = msm.communication_classes\n\n for (i, comm) in enumerate(comm_classes):\n g.add_subgraph(nbunch=comm, name='cluster%d' % i,\n style='rounded, dotted',\n color='lightgrey',\n label='<<B>Communication class %d</B>>' % (i + 1))\n\n for from_node in range(msm.num_nodes):\n for to_node in get_adjacent_nodes(msm, from_node, discard_self=False):\n if msm.T[from_node, to_node] > edge_threshold:\n label = '%.2f' % msm.T[from_node, to_node]\n g.add_edge(from_node, to_node, label=label)\n\n return g", "def __init__(self, class_graph: class_dependency.JavaClassDependencyGraph):\n super().__init__()\n\n # Create list of all targets using class nodes\n # so we don't miss targets with no dependencies (edges).\n for class_node in class_graph.nodes:\n if len(class_node.build_targets) > _MAX_CONCURRENT_BUILD_TARGETS:\n continue\n for build_target in class_node.build_targets:\n self.add_node_if_new(build_target)\n\n for begin_class, end_class in class_graph.edges:\n if len(begin_class.build_targets) > _MAX_CONCURRENT_BUILD_TARGETS:\n continue\n if len(end_class.build_targets) > _MAX_CONCURRENT_BUILD_TARGETS:\n continue\n for begin_target in begin_class.build_targets:\n for end_target in end_class.build_targets:\n # Avoid intra-target deps.\n if begin_target == end_target:\n continue\n\n self.add_edge_if_new(begin_target, end_target)\n\n begin_target_node = self.get_node_by_key(begin_target)\n end_target_node = self.get_node_by_key(end_target)\n assert begin_target_node is not None\n assert end_target_node is not None\n begin_target_node.add_class(begin_class)\n end_target_node.add_class(end_class)\n begin_target_node.add_class_dependency_edge(\n end_target_node, begin_class, end_class)", "def copy(self):\n cls = self.__class__\n new_graph = cls.__new__(cls)\n new_graph._nodes = self._nodes[:]\n new_graph._node_wip = self._node_wip[:]\n new_graph._edges = self._edges[:]\n if self._sorted_nodes:\n new_graph._sorted_nodes = self._sorted_nodes[:]\n else:\n new_graph._sorted_nodes = None\n new_graph.predecessors = {}\n for key, val in self.predecessors.items():\n new_graph.predecessors[key] = self.predecessors[key][:]\n new_graph.successors = {}\n for key, val in self.successors.items():\n new_graph.successors[key] = self.successors[key][:]\n return new_graph", "def __init__(self):\n self.tree = nx.Graph() \n self.orig_dist_matrix = pd.DataFrame()\n self.work_dist_matrix = pd.DataFrame() \n self.cluster_map = {} \n self.class_map = {}", "def convertGraph(G):\n G_new = nx.Graph()\n for nd, attrs in G.nodes(data=True):\n G_new.add_node(str(nd), chem=attrs['atom_symbol'])\n for nd1, nd2, attrs in G.edges(data=True):\n # G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type'])\n G_new.add_edge(str(nd1), str(nd2))\n\n return G_new", "def __init__(self, graph=None):\n\n self.graph = graph if graph else nx.Graph()", "def build_graph(self):\n raise NotImplementedError" ]
[ "0.6156872", "0.6093618", "0.60673475", "0.59905785", "0.5913077", "0.5776343", "0.57379025", "0.57106227", "0.56839067", "0.5642758", "0.5619346", "0.558172", "0.5574337", "0.55602324", "0.5526403", "0.5499183", "0.54969406", "0.5460194", "0.54598016", "0.54490125", "0.5446127", "0.54327327", "0.5430268", "0.5424455", "0.5409319", "0.5401128", "0.53906626", "0.53594744", "0.5352227", "0.5339268" ]
0.63634163
0
Transforme une video en dictionnaire "JSON" de ses keypoints
def video_to_dict(inputVideo, start, end): params = set_params(tracking = 0, hand_opti = True) opWrapper = init_openpose(params) videoJson = [] vs = cv2.VideoCapture(inputVideo) vs.set(cv2.CAP_PROP_FPS, 25) id_frame = 1 while(1): (ret, frame) = vs.read() if (not ret) or (end != -1 and id_frame > end): break if id_frame >= start : datum = frame_to_keypoints(frame, opWrapper) videoJson.append({"frame" : id_frame - start, "keypoints" : keypoints_to_json(datum)}) id_frame += 1 return videoJson
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wrangle_video_record(json_obj: dict):\n entry_dict = {}\n for key, value in get_final_key_paths(\n json_obj, '', True, black_list=['localized', 'thumbnails'],\n final_keys_only=True):\n if key in video_keys_and_columns: # converting camelCase to underscore\n new_key = []\n for letter in key:\n if letter.isupper():\n new_key.append('_' + letter.lower())\n else:\n new_key.append(letter)\n key = ''.join(new_key)\n if key == 'relevant_topic_ids':\n value = list(set(value)) # due to duplicate parent topic ids\n elif key == 'duration':\n value = convert_duration(value)\n elif key == 'published_at':\n value = value.replace('T', ' ')\n elif key == 'actual_start_time':\n key = 'stream'\n value = 'true'\n elif key in ['view_count', 'dislike_count', 'like_count',\n 'comment_count']:\n value = int(value)\n entry_dict[key] = value\n\n return entry_dict", "def get_video():\n video = {}\n for k, v in DB.VIDEOS.iteritems():\n video[k] = v.__dict__\n return video", "def standalize_yt2t(input_file):\n logger.info('Reading file: %s', input_file)\n lines = [line.rstrip('\\n') for line in open(input_file)]\n lines = [line.split('\\t') for line in lines]\n\n logger.info('Building caption dictionary for each video key')\n video_ids = []\n capdict = {}\n for line in lines:\n video_id = line[0]\n if video_id in capdict:\n capdict[video_id].append(line[1])\n else:\n capdict[video_id] = [line[1]]\n video_ids.append(video_id)\n\n # create the json blob\n videos = []\n captions = []\n counter = itertools.count()\n for video_id in video_ids:\n\n vid = int(video_id[3:])\n jvid = {}\n jvid['category'] = 'unknown'\n jvid['video_id'] = video_id\n jvid['id'] = vid\n jvid['start_time'] = -1\n jvid['end_time'] = -1\n jvid['url'] = ''\n videos.append(jvid)\n\n for caption in capdict[video_id]:\n jcap = {}\n jcap['id'] = next(counter)\n jcap['video_id'] = vid\n jcap['caption'] = unicode(caption, errors='ignore')\n captions.append(jcap)\n\n out = {}\n out['info'] = {}\n out['videos'] = videos\n out['captions'] = captions\n\n return out", "def compute_video_encoding(video):\n\n video_points = []\n while True:\n ret, frame = video.read()\n if not ret:\n break\n\n # Find landmarks/points in frame.\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n rects = face_detector(gray, 1)\n if (len(rects) == 0):\n break # No face found.\n landmarks = face_predictor(gray, rects[0])\n\n # Convert landmarks to a numpy array.\n points = []\n for i in range(0, landmarks.num_parts):\n if i == 60 or i == 64:\n continue\n point = landmarks.part(i)\n points.append([point.x, point.y])\n points = np.array(points)\n\n img, maps, pts = gann_utils.process_image(frame, points)\n video_points.append(pts)\n\n video_points = np.array(video_points).transpose().swapaxes(0, 1)\n return video_points", "def test_parse_youtube_one_video(self):\r\n youtube_str = '0.75:jNCf2gIqpeE'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})", "def test_parse_youtube_one_video(self):\r\n youtube_str = '0.75:jNCf2gIqpeE'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})", "def test_parse_youtube(self):\r\n youtube_str = '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',\r\n '1.00': 'ZwkTiUPN0mg',\r\n '1.25': 'rsq9auxASqI',\r\n '1.50': 'kMyNdzVHHgg'})", "def test_parse_youtube(self):\r\n youtube_str = '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',\r\n '1.00': 'ZwkTiUPN0mg',\r\n '1.25': 'rsq9auxASqI',\r\n '1.50': 'kMyNdzVHHgg'})", "def dict_to_json(videoJson, id_gloss, id_instance):\n path = \"/home/nmiguens/JSON/WLASL\"\n name = \"{}_\".format(id_gloss) + \"{}.json\".format(id_instance)\n with open(os.path.join(path, name), 'w') as fout:\n json.dump(videoJson , fout)\n return 0", "def get_video(soup, data, dictionary):\n video_markup = [] \n VIDEOS_TAGS = ['iframe', 'embed', 'object', 'video']\n VIDEO_PROVIDERS = ['youtube', 'vimeo', 'dailymotion', 'kewego']\n #print \",\".join(VIDEOS_TAGS)\n for t in VIDEOS_TAGS:\n if soup.find_all(t):\n for vid in soup.find_all(t):\n # youtube og vimeo kan avsløres ver src atributt til iframe tag\n #print vid\n for prov in VIDEO_PROVIDERS:\n if prov in vid['src']:\n video_markup.append(vid)\n\n #print video_markup \n #print \"antall videoer (ikke nrk): \", len(video_markup)\n\n # nrk-videoer (lastet via js, og må trikses med)\n # ser ut som eksistensen av en data-video-id=\"118648\" kan være en bedre indikator.. \n nrk_videoer = soup.select('figure.video')\n #print \"antall nrk-videoer: \", len(nrk_videoer)\n\n\n dictionary['video_files'] = len(video_markup)\n dictionary['video_files_nrk'] = len(nrk_videoer)\n return", "def video_dict(self):\n self.cur.execute(\"SELECT video_ID, video_title FROM videos\")\n videos = {}\n video_titles = []\n for video in self.cur.fetchall():\n video_titles.append(video[1])\n videos.update({video[0] : video[1]})\n return videos, video_titles", "def keypoints_to_json(datum):\n jsonDict = dict()\n jsonDict[\"pose_keypoints_2d\"] = datum.poseKeypoints.tolist()\n if datum.faceKeypoints.size > 0 :\n jsonDict[\"face_keypoints_2d\"] = []\n else : \n jsonDict[\"face_keypoints_2d\"] = datum.faceKeypoints.tolist()\n jsonDict[\"hand_left_keypoints_2d\"] = datum.handKeypoints[0].tolist()\n jsonDict[\"hand_right_keypoints_2d\"] = datum.handKeypoints[1].tolist()\n return jsonDict", "def parse():\n all_players = list(FACE_IMAGE_LOCATIONS.keys())\n face_encodings = VideoParser.__load_faces_encodings(all_players)\n player_occurrences = VideoParser.__get_player_occurrences(all_players, face_encodings)\n VideoParser.__save_parsed_video(player_occurrences)", "def get_embed_dict(self):\n if not self.get_url() or not self.get_embed_url():\n return None\n \n output = {\n \"url\": self.get_url(),\n \"embed_url\": self.get_embed_url(),\n \"provider_url\": self.get_provider_url(),\n \"provider_name\": self.get_provider_name(),\n \"thumbnail_url\": self.get_thumbnail_url(),\n \"type\": \"video\"\n }\n if self.get_height():\n output['iframe_height'] = self.get_height()\n if self.get_width():\n output['iframe_width'] = self.get_width()\n\n return output", "def serialize(self):\n\t\treturn {\n\t\t\t'id': self.id,\n\t\t\t'title': self.title,\n\t\t\t'tracknum': self.track_num,\n\t\t\t'video': self.video_id\n\t\t}", "def video_test():\n r = request\n # convert string of image data to uint8\n nparr = np.fromstring(r.data, np.uint8)\n # decode image\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n # do some fancy processing here....\n\n # build a response dict to send back to client\n response = {'message': 'image received. size={}x{}'.format(img.shape[1], img.shape[0])\n }\n print(response)\n # encode response using jsonpickle\n response_pickled = jsonpickle.encode(response)\n cv2.imwrite(\"1.jpg\", img)\n print(\"done\")\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")", "def video_feed(self):\r\n model.video.link(self.link)\r\n age_net, gender_net = model.video.caffe_models()\r\n return Response(model.video.video_detector(age_net, gender_net),mimetype='multipart/x-mixed-replace; boundary=frame')", "def creer_dictionnaire_vide():\n dico = {}\n return dico", "def video2():\n return Response(gen_frames(2),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def get_video_data(video_page_url):\n\tvideo_data = {}\n\tresponse = requests.get(video_page_url)\n\tsoup = bs4.BeautifulSoup(response.text)\n\tvideo_data['title'] = soup.select('title')[0].get_text()\n\t\n\t# careful with the encoding: otherwise it might fail on title like \"Dr med Schr(umlaut)fel Interview\" \n\t#print(u'\"{0}\"'.format(video_data['title']).encode('ascii', 'ignore'))\n\t\n\t# sometimes views are like \"42 views\" or \"2457\" with even CR/LF\n\ttry:\n\t\tvideo_data['views'] = int(re.sub('[^0-9]', '',\n\t soup.select('.watch-view-count')[0].get_text().split()[0]))\n\texcept:\n\t\t#print(\"Error fetching the view count for %s\" % video_data['title'].encode('ascii', 'ignore'))\n\t\tvideo_data['views'] = 0\n\n\t# sometimes likes / dislikes can be disabled...\n\tif soup.select('.likes-count'):\n\t\tvideo_data['likes'] = int(re.sub('[^0-9]', '',\n\t soup.select('.likes-count')[0].get_text().split()[0]))\n\t\tvideo_data['dislikes'] = int(re.sub('[^0-9]', '', \n\t soup.select('.dislikes-count')[0].get_text().split()[0]))\n\telse:\n\t\t#print(\"likes/dislikes not authorized for the video: %s\" % video_data['title'].encode('ascii', 'ignore'))\n\t\tvideo_data['likes'] = 0\n\t\tvideo_data['dislikes'] = 0\n\n\treturn video_data", "def fake_note_with_video_attachment():\n with open(\"tests/data/note_with_video_attach.json\") as f:\n return json.load(f)", "def send_video(vid, text='', chatID=chatID, token=token,time=10):\n url = f'https://api.telegram.org/bot{token}/sendVideo'\n com = f'curl -s -X POST {url}'\n com += f' -F chat_id={chatID} -F video=@{vid} -F caption=\"{text}\"'\n resp = os.popen(com).read().strip()\n return json.loads(resp)", "def voc2json():\n hyou_lesson = hyou_reader()\n mina1_lesson = mina1_reader()\n mina2_lesson = mina2_reader()\n\n lesson_list = hyou_lesson + mina1_lesson + mina2_lesson\n\n json_file = open(OUT_PATH, 'w')\n json_file.write(json.dumps(lesson_list, encoding='utf-8', ensure_ascii=False,\n indent=4, sort_keys=True))\n json_file.close()", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']]\n for text in video_info['text']:\n info = {}\n frame_dir = video_info['filename']\n filename = osp.join(self.data_prefix, video_info['filename']+'.mp4') \n info['filename'] = filename\n info['frame_dir'] = frame_dir\n info['index'] = i\n info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n info['text'] = [text]\n if self.is_ret:\n pass\n elif self.is_mc:\n info['clip_text_candidate'] = [0, 1, 2, 3, 4]\n elif self.is_qa:\n pass\n video_infos.append(info) \n del ann_info\n\n return video_infos", "def save_video_data(self):\n if self.overwrite:\n # Erase old event videos\n for path in self.video_dir.glob('*.json'):\n path.unlink()\n for video in self.videos:\n video.save()", "def video_feed():\n return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']+'.avi') \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']] \n else:\n if not self.test_ret:\n video_info['text'] = [rnd.choice(video_info['text'])]\n else:\n video_info['clip_text_candidate'] = list(range(len(video_info['text'])))\n\n video_infos.append(video_info) \n del ann_info\n\n return video_infos", "def video1():\n return Response(gen_frames(1),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def bb_gvideo(hit):\n video = hit.group(1)\n return '<object width=\"400\" height=\"326\"><param name=\"movie\" value=\"http://video.google.com/googleplayer.swf?docId=%s\"></param><param name=\"wmode\" value=\"transparent\"></param><embed src=\"http://video.google.com/googleplayer.swf?docId=%s\" wmode=\"transparent\" style=\"width:400px; height:326px;\" id=\"VideoPlayback\" type=\"application/x-shockwave-flash\" flashvars=\"\"></embed></object>' % ( video, video )", "def parse_video_tag(raw_video):\r\n if not raw_video:\r\n return None\r\n\r\n keystring_matcher = re.search(r'(?<=embed/)[a-zA-Z0-9_-]+', raw_video)\r\n if keystring_matcher is None:\r\n keystring_matcher = re.search(r'<?=\\d+:[a-zA-Z0-9_-]+', raw_video)\r\n\r\n if keystring_matcher:\r\n return keystring_matcher.group(0)\r\n else:\r\n logging.warn(\"ignoring the content because it doesn't not conform to expected pattern: \" + raw_video)\r\n return None" ]
[ "0.6351646", "0.62693363", "0.6039878", "0.5948034", "0.58440053", "0.58440053", "0.57589173", "0.57589173", "0.57279646", "0.5714623", "0.56589276", "0.56396013", "0.5506513", "0.54930335", "0.5426834", "0.5410226", "0.5388581", "0.53463376", "0.53265417", "0.5283384", "0.5244086", "0.52438414", "0.52379686", "0.5227579", "0.51954854", "0.5185518", "0.5179038", "0.5175231", "0.5170314", "0.51691926" ]
0.6927642
0
Ecrit en format JSON le contenu du dictionnaire videoJson sous le nom {id_gloss}_{id_instance}.json
def dict_to_json(videoJson, id_gloss, id_instance): path = "/home/nmiguens/JSON/WLASL" name = "{}_".format(id_gloss) + "{}.json".format(id_instance) with open(os.path.join(path, name), 'w') as fout: json.dump(videoJson , fout) return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wrangle_video_record(json_obj: dict):\n entry_dict = {}\n for key, value in get_final_key_paths(\n json_obj, '', True, black_list=['localized', 'thumbnails'],\n final_keys_only=True):\n if key in video_keys_and_columns: # converting camelCase to underscore\n new_key = []\n for letter in key:\n if letter.isupper():\n new_key.append('_' + letter.lower())\n else:\n new_key.append(letter)\n key = ''.join(new_key)\n if key == 'relevant_topic_ids':\n value = list(set(value)) # due to duplicate parent topic ids\n elif key == 'duration':\n value = convert_duration(value)\n elif key == 'published_at':\n value = value.replace('T', ' ')\n elif key == 'actual_start_time':\n key = 'stream'\n value = 'true'\n elif key in ['view_count', 'dislike_count', 'like_count',\n 'comment_count']:\n value = int(value)\n entry_dict[key] = value\n\n return entry_dict", "def get_video():\n video = {}\n for k, v in DB.VIDEOS.iteritems():\n video[k] = v.__dict__\n return video", "def WLASL_parcours():\n path = \"/home/nmiguens/Datasets/WLASL\"\n write_txt(\"\\n\" + \"---------------------------- \\n\" + \"Nouveau processus \\n\")\n nb_video = 0\n with open(r\"{}\".format(path + \"/start_kit/WLASL_v0.3.json\"), \"r\") as read_file:\n WLASL = json.load(read_file)\n for glosses in WLASL:\n for instance in glosses[\"instances\"]:\n inputVideo = os.path.join(path, \"videos/\" + instance[\"video_id\"] +\".mp4\")\n if os.path.exists(inputVideo):\n if not os.path.exists(\"/home/nmiguens/JSON/WLASL/{}_{}.json\".format(glosses[\"gloss\"], instance[\"instance_id\"])):\n videoDict = video_to_dict(inputVideo, 0, -1) #instance[\"frame_start\"], instance[\"frame_end\"])\n dict_to_json(videoDict, glosses[\"gloss\"], instance[\"instance_id\"])\n nb_video = len(os.listdir(\"/home/nmiguens/JSON/WLASL\")) - nb_video \n message = \"{} vidéos traitées pour la classe {}\".format(nb_video, glosses[\"gloss\"])\n write_txt(message)\n return 0", "def __init__(self, json):\n\n self.id = json[\"id\"]\n self.alternateId = json[\"alternateId\"]\n\n if \"airDate\" in json:\n self.airDate = datetime.strptime(json[\"airDate\"], '%Y-%m-%dT%H:%M:%SZ')\n\n if \"name\" in json:\n self.name = json[\"name\"]\n\n if \"title\" in json:\n self.title = json[\"title\"]\n\n if \"description\" in json:\n self.description = json[\"description\"]\n\n if \"episode\" in json:\n self.episode = json[\"episode\"]\n\n if \"episodeNumber\" in json:\n self.episodeNumber = json[\"episodeNumber\"]\n else:\n self.episodeNumber = None\n\n if \"season\" in json:\n self.season = json[\"season\"]\n\n if \"seasonNumber\" in json:\n self.seasonNumber = json[\"seasonNumber\"]\n else:\n self.seasonNumber = None\n\n if \"publishStart\" in json:\n self.publishStart = datetime.strptime(json[\"publishStart\"], '%Y-%m-%dT%H:%M:%SZ')\n\n if \"publishEnd\" in json:\n self.publishEnd = datetime.strptime(json[\"publishEnd\"], '%Y-%m-%dT%H:%M:%SZ')\n\n if \"videoDuration\" in json:\n self.videoDuration = timedelta(milliseconds=json[\"videoDuration\"])\n\n if \"isFreePlayable\" in json:\n self.isFreePlayable = json[\"isFreePlayable\"]\n\n if \"isPlayable\" in json:\n self.isPlayable = json[\"isPlayable\"]\n\n if \"isNew\" in json:\n self.isNew = json[\"isNew\"]\n\n if \"image\" in json:\n self.image = Image(json[\"image\"])", "def single_channel_video_data(self, limit=50, vid_part='snippet', output_path='./', chanlid=None):\n all_result = {}\n print(\"finding vidids: \", chanlid, \" : \", os.getpid())\n result = self.playlist([chanlid], limit)\n\n # print(\"playlist: \", result, \" : \", os.getpid())\n print(\"finding channel meta: \", chanlid, \" : \", os.getpid())\n all_result.update({chanlid: self.get_video_details(result[chanlid], part=vid_part)})\n print(\"doing json dump: \", chanlid, \" : \", os.getpid())\n\n lock.acquire()\n with open(output_path + 'new_family_parenting.json', \"a\") as out_fp:\n json.dump(all_result, out_fp)\n out_fp.write(\"\\n\")\n lock.release()", "def creer_dictionnaire_vide():\n dico = {}\n return dico", "def serialize(self):\n\t\treturn {\n\t\t\t'id': self.id,\n\t\t\t'title': self.title,\n\t\t\t'tracknum': self.track_num,\n\t\t\t'video': self.video_id\n\t\t}", "def json_friendly(self):", "def create_output():\n\n input_data = \"{}/{}.json\".format(TRANSCRIPTS_VIDEOS_PATH, request.form[\"name\"])\n duration = \"0,{}\".format(int(float(request.form[\"duration\"])))\n movie = \"{}/{}\".format(VIDEOS_PATH, request.form[\"movie\"]) # videos/movie.mp4\n movie_data = \"{}/{}.json\".format(TRANSCRIPTS_VIDEOS_PATH, request.form[\"movie\"]) # transcripts/movie.mp4.json\n\n scene = make_scene(OUTPUT_VIDEOS_PATH, input_data, duration, movie, movie_data, True)\n return jsonify(status=\"200\", scene=scene)", "def __json_encode__(self) -> Dict[str, Any]:\n return {\"figure\": self.figure, \"name\": self.name, \"metadata\": self.metadata}", "def __init__(self, json_data, save_path, video_fname):\n self.synced = json_data.get('MatchedBy') == \"moviehash\"\n self.movie_name = json_data.get('MovieName', \"\")\n self.episode_num = json_data.get('SeriesEpisode')\n self.season_num = json_data.get('SeriesSeason')\n self.download_link = json_data.get('SubDownloadLink')\n self.download_count = int(json_data.get('SubDownloadsCnt', -1))\n self.sub_format = json_data.get('SubFormat')\n self.sub_filename = json_data.get('SubFileName')\n self.save_path = save_path\n self.full_path = \"{folder}{name}.{format}\".format(\n folder=self.save_path,\n name=video_fname,\n format=self.sub_format)", "def voc2json():\n hyou_lesson = hyou_reader()\n mina1_lesson = mina1_reader()\n mina2_lesson = mina2_reader()\n\n lesson_list = hyou_lesson + mina1_lesson + mina2_lesson\n\n json_file = open(OUT_PATH, 'w')\n json_file.write(json.dumps(lesson_list, encoding='utf-8', ensure_ascii=False,\n indent=4, sort_keys=True))\n json_file.close()", "def get_json_string(item):\n return json.dumps(item.json if isinstance(item, Instance) else item)", "def voc2json():\n words_count = 0\n word_list = []\n lesson_list = []\n\n reg_word = re.compile(r\"[0-9]+\\.\\s*([a-zA-Z\\S]+)\")\n voc_dict = voc_reader()\n\n with open(LESSON_PATH, 'r') as word_file:\n\n for line in word_file:\n line.strip()\n line = line.replace(\"\\xef\", \" \")\n line = line.replace(\"|\", \" \")\n word_match = reg_word.match(line)\n if not word_match:\n continue\n word_group = word_match.group(1)\n if word_group not in voc_dict:\n continue\n\n words_count += words_count + 1\n\n word_list.append({\n \"Type\": \"\",\n \"Voc\": word_group,\n \"Ext\": voc_dict[word_group][0],\n \"Meaning\": voc_dict[word_group][1],\n \"Time\": 0\n })\n\n if len(word_list) >= MAX_WORD_COUNT:\n lesson_list.append(word_list)\n word_list = []\n\n lesson_len = len(word_list)\n if lesson_len > 0:\n lesson_list.append(word_list)\n\n print(words_count)\n\n json_save(OUT_PATH, lesson_list)", "def __init__(self, json):\n\n self.id = json[\"id\"]\n self.alternateId = json[\"alternateId\"]\n self.name = json[\"name\"]\n\n if \"description\" in json:\n self.description = json[\"description\"]\n\n if \"episodeCount\" in json:\n self.episodeCount = json[\"episodeCount\"]\n\n if \"seasonNumbers\" in json:\n self.seasonNumbers = json[\"seasonNumbers\"]\n\n if \"image\" in json:\n self.image = Image(json[\"image\"])", "def to_json(self):\n template = {\n \"tensorName\": self.title,\n \"tensorShape\": list(self.vector_shape),\n \"tensorPath\": self.vector_url,\n \"metadataPath\": self.metadata_url,\n }\n if self.sprite_url is not None:\n template[\"sprite\"] = {\n \"imagePath\": self.sprite_url,\n \"singleImageDim\": list(self.image_size),\n }\n return template", "def _jsonable(self):\n magic_dict = {}\n mman = self.magics_manager\n magics = mman.lsmagic()\n for key, subdict in magics.items():\n d = {}\n magic_dict[key] = d\n for name, obj in subdict.items():\n try:\n classname = obj.__self__.__class__.__name__\n except AttributeError:\n classname = 'Other'\n \n d[name] = classname\n return magic_dict", "def json_format(data):\n return {\n 'Title': data[\"title\"],\n 'Publication date': data['pubDate'],\n 'News link': data['link'],\n 'Image link': data['media'],\n }", "def write_in_json(data):\n with open('genre.json', 'w') as data_file:\n json.dump(data, data_file, indent= 4)", "def standalize_yt2t(input_file):\n logger.info('Reading file: %s', input_file)\n lines = [line.rstrip('\\n') for line in open(input_file)]\n lines = [line.split('\\t') for line in lines]\n\n logger.info('Building caption dictionary for each video key')\n video_ids = []\n capdict = {}\n for line in lines:\n video_id = line[0]\n if video_id in capdict:\n capdict[video_id].append(line[1])\n else:\n capdict[video_id] = [line[1]]\n video_ids.append(video_id)\n\n # create the json blob\n videos = []\n captions = []\n counter = itertools.count()\n for video_id in video_ids:\n\n vid = int(video_id[3:])\n jvid = {}\n jvid['category'] = 'unknown'\n jvid['video_id'] = video_id\n jvid['id'] = vid\n jvid['start_time'] = -1\n jvid['end_time'] = -1\n jvid['url'] = ''\n videos.append(jvid)\n\n for caption in capdict[video_id]:\n jcap = {}\n jcap['id'] = next(counter)\n jcap['video_id'] = vid\n jcap['caption'] = unicode(caption, errors='ignore')\n captions.append(jcap)\n\n out = {}\n out['info'] = {}\n out['videos'] = videos\n out['captions'] = captions\n\n return out", "def render_dictionary(self): \n asset_json = {\n 'name': self.name,\n 'product_name': self.product_name,\n 'product_vendor': self.product_vendor,\n 'configuration': self.configuration,\n 'description': self.description,\n 'primary_users': self.primary_users,\n 'primary_voting': self.primary_voting,\n 'secondary_users': self.secondary_users,\n 'secondary_voting': self.secondary_voting,\n 'tags': self.tags,\n 'type': self.asset_type,\n 'action_whitelist': self.action_whitelist\n }\n\n if self.ingest_container_label:\n asset_json['ingest'] = {\n 'container_label': self.ingest_container_label,\n 'interval_mins': self.ingest_interval_mins,\n 'poll': self.ingest_poll,\n 'start_time_epoch_utc': self.ingest_start_time\n }\n\n return asset_json", "def json(self):\n return {'id': self.id, 'name': self.name, 'description': self.description}", "def route_video_details(id_title):\n\n result = video_dal_retriever.retrieve_details(id_title)\n return jsonify({'details' : result})", "def __init__(self, json):\n\n if \"show\" not in json or \"videos\" not in json:\n raise Exception(\"Invalid JSON.\")\n\n self.show = Show(json[\"show\"])\n self.seasons = []\n for seasonNumber in self.show.seasonNumbers:\n try:\n season_json = json[\"videos\"][\"episode\"][str(seasonNumber)]\n except KeyError:\n continue\n self.seasons.append(Season(seasonNumber, season_json))\n\n self.specials = []\n if \"standalone\" in json[\"videos\"]:\n for special in json[\"videos\"][\"standalone\"]:\n self.specials.append(Episode(special))", "def to_dict(self):\n# \"\"\" The JSON model used is like:\n# <code>\n#{\n# \"duration\": 15,\n# \"url\": \"url1\",\n# \"selections\": [{\n# \"annotations\": [{\n# \"author\": \"\",\n# \"description\": \"speaker\",\n# \"keyword\": \"john\",\n# \"lang\": \"EN\"\n# },\n# {\n# \"author\": \"\",\n# \"description\": \"speakerLabel\",\n# \"keyword\": \"S0\",\n# \"lang\": \"EN\"\n# }\n# , {\n# \"author\": \"\",\n# \"description\": \"gender\",\n# \"keyword\": \"F\",\n# \"lang\": \"EN\" \n# }],\n# \"resolution\": \"0x0\",\n# \"selW\": 20,\n# \"selH\": 15,\n# \"selY\": 10,\n# \"selX\": 10,\n# \"startTime\" : 0,\n# \"endTime\" : 10\n# \n# }]\n#}\n# </code>\n# \n# \"\"\"\n\n dic = {\"duration\": self.get_duration(),\n \"url\": self._filename,\n \"db\":self.get_db().get_path(),\n \"selections\": [] }\n for seg in self.get_time_slices():\n dic['selections'].append({\n \"startTime\": float(seg[0]) / 100.0,\n \"endTime\": float(seg[1]) / 100.0,\n 'speaker': seg[-2],\n 'speakerLabel': seg[-1],\n 'gender': seg[2],\n 'speakers': seg[3]\n })\n return dic", "def __cleaned(game, json):\n link = game.link\n title = \" - \".join(map(lambda x: x.strip(), game.title.split('-')))\n date = datetime.datetime(*game.updated_parsed[:6])\n\n # convert date to timestamp\n if json:\n date = date.strftime('%a, %d %b %Y')\n \n return {'title': title, 'link': link, 'date': date}", "def json(self):\n robot_dict = self.robot_dict()\n target_dict = self.target_dict()\n json_str = '{'\n json_str = json_str + '\"robot_obj\" : ' + json.dumps(robot_dict) + \",\\n\"\n json_str = json_str + '\"target_obj\" : ' + json.dumps(target_dict) + \"\\n\"\n json_str = json_str + '}'\n return(json_str)", "def to_json(self):\r\n\r\n object_json = dict()\r\n object_json[\"Type\"] = self.__class__.__name__\r\n game_json = dict()\r\n game_json[\"x_dist\"] = self.x_dist\r\n game_json[\"y_dist\"] = self.y_dist\r\n game_json[\"turn_number\"] = self.turn_number\r\n game_json[\"max_turns\"] = self.max_turns\r\n game_json[\"num_to_win\"] = self.num_to_win\r\n game_json[\"winner\"] = self.winner\r\n game_json[\"board\"] = self.board.to_json()\r\n game_json[\"board_history\"] = [board.to_json() for board in self.board_history]\r\n game_json[\"players\"] = [player.to_json() for player in self.players]\r\n object_json[\"Object\"] = game_json\r\n\r\n return json.dumps(object_json)", "def create_json(self):\n data = {\"image_id\": self.ids, \"img_path\": self.img_paths, \"bg\": self.bgs}\n if hasattr(self, \"bbox\"):\n data[\"bbox\"] = self.bbox\n if hasattr(self, \"masks\"):\n data[\"masks\"] = self.masks\n with open(f\"{self.save_path}{self.name}/json/images_info.json\", \"w\") as f:\n json.dump(data, f)", "def save_game_encours(partie):\n fichier= open(\"contgame.json\",\"w\")\n json.dump(partie,fichier)\n fichier.close()" ]
[ "0.6025468", "0.5868325", "0.57273495", "0.56607246", "0.56546223", "0.5646203", "0.56321543", "0.56124234", "0.55609107", "0.5533504", "0.55251294", "0.55178976", "0.5456793", "0.54222846", "0.54157853", "0.54101205", "0.53957343", "0.5393648", "0.539035", "0.53855884", "0.5362167", "0.5340885", "0.53048027", "0.529652", "0.52864647", "0.5275069", "0.5269677", "0.526934", "0.5263077", "0.52580196" ]
0.7464959
0
Creates instance of DungeonMap using id of Dungeon and calls function to update rooms. Should only be used ONCE
def _update_map(self): # Creates an instance of DungeonMap using the id of Dungeon self._map = DungeonMap(self._id) # Calls function to update rooms self._map._update_rooms()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_map(self) -> GameMap:\n player = self.engine.player\n dungeon = GameMap(\n self.engine, self.map_width, self.map_height, entities=[player]\n )\n\n player.place(int(self.map_width / 2), int(self.map_height / 2), dungeon)\n\n start_pos = (int(self.map_width / 2), int(self.map_height / 2))\n\n total_tiles = self.map_width * self.map_height\n desired_tiles = int(total_tiles * self.floor_percent)\n digger_count = 0\n\n floor_number = len(\n [\n (x, y)\n for x in range(0, self.map_width - 1)\n for y in range(0, self.map_height - 1)\n if dungeon.tiles[x, y] == tile_types.floor\n ]\n )\n\n while floor_number < desired_tiles:\n if self.spawn_mode == \"Random\":\n if digger_count == 0:\n drunk_x = start_pos[0]\n drunk_y = start_pos[1]\n else:\n drunk_x = self.engine.rng.integers(1, self.map_width - 1)\n drunk_y = self.engine.rng.integers(1, self.map_height - 1)\n else:\n drunk_x = start_pos[0]\n drunk_y = start_pos[1]\n\n drunk_life = 400\n\n while drunk_life > 0:\n dungeon.tiles[drunk_x, drunk_y] = tile_types.floor\n\n stagger_direction = self.engine.rng.integers(0, 4)\n if stagger_direction == 0 and drunk_x > 1:\n drunk_x -= 1\n elif stagger_direction == 1 and drunk_x < self.map_width - 2:\n drunk_x += 1\n elif stagger_direction == 2 and drunk_y > 1:\n drunk_y -= 1\n elif stagger_direction == 3 and drunk_y < self.map_height - 2:\n drunk_y += 1\n\n drunk_life -= 1\n\n digger_count += 1\n floor_number = len(\n [\n (x, y)\n for x in range(0, self.map_width - 1)\n for y in range(0, self.map_height - 1)\n if dungeon.tiles[x, y] == tile_types.floor\n ]\n )\n\n dijk_map = generate_dijkstra_map(dungeon, (player.x, player.y))\n exit_tile = exit_from_dijk(dungeon, dijk_map, cull_unreachable=True)\n\n dungeon.tiles[exit_tile] = tile_types.down_stairs\n dungeon.downstairs = exit_tile\n\n regions = generate_voronoi_regions(dungeon)\n\n for region in regions:\n if len(region) > 0:\n place_entities(region, dungeon, self.engine.game_world.current_floor)\n\n return dungeon", "def make_map(player, dungeon_level):\n new_map = map.Map(config.MAP_HEIGHT, config.MAP_WIDTH, dungeon_level)\n new_map.objects.append(player)\n player.current_map = new_map\n player.camera_position = algebra.Location(0, 0)\n new_map.random_seed = libtcod.random_save(0)\n _build_map(new_map)\n for new_room in new_map.rooms:\n _place_objects(new_map, new_room, player)\n player.pos = new_map.rooms[0].center()\n\n new_map.initialize_fov()\n return new_map", "def create_map(json_game_map):\n room_hash = {}\n\n for room in constants.ROOMS:\n # Set name, description, and neighbors\n room_hash[room] = Room.Room()\n room_hash[room].set_name(room)\n room_hash[room].set_short_description(constants.ROOMS[room]['short_description'])\n room_hash[room].set_long_description(constants.ROOMS[room]['long_description'])\n room_hash[room].set_north(constants.ROOMS[room]['north'])\n room_hash[room].set_south(constants.ROOMS[room]['south'])\n room_hash[room].set_east(constants.ROOMS[room]['east'])\n room_hash[room].set_west(constants.ROOMS[room]['west'])\n room_hash[room].set_locked(constants.ROOMS[room]['locked'])\n\n # Set features in the room\n for feature in constants.ROOMS[room]['features']:\n new_feature = Feature.Feature()\n new_feature.set_name(constants.ROOMS[room]['features'][feature]['name'])\n new_feature.set_description(constants.ROOMS[room]['features'][feature]['description'])\n room_hash[room].add_feature(new_feature)\n\n # If it is not a loaded game\n if not json_game_map:\n # Set items in the room\n for item in constants.ROOMS[room]['items']:\n new_item = Item.Item()\n new_item.set_name(constants.ROOMS[room]['items'][item]['name'])\n new_item.set_description(constants.ROOMS[room]['items'][item]['description'])\n if \"hidden\" in constants.ROOMS[room]['items'][item]:\n if constants.ROOMS[room]['items'][item][\"hidden\"] == \"true\":\n new_item.set_hidden(True)\n room_hash[room].add_item(new_item)\n \n #Set monsters in the room\n for monster in constants.ROOMS[room]['monsters']:\n if constants.ROOMS[room]['monsters'] != \"None\":\n new_monster = Monster.Monster()\n new_monster.set_name(constants.ROOMS[room]['monsters'][monster]['name'])\n new_monster.set_lvl(constants.ROOMS[room]['monsters'][monster]['lvl'])\n new_monster.set_description(constants.ROOMS[room]['monsters'][monster]['description'])\n room_hash[room].add_monster(new_monster)\n\n # If it is a loaded game\n else:\n # Set items in the room\n for item in json_game_map[room]:\n if item == \"visited\":\n room_hash[room].set_visited(json_game_map[room][item])\n elif item == \"locked\":\n room_hash[room].set_locked(json_game_map[room][item])\n #Set undefeated monster in the room\n elif item == \"Lich\":\n new_monster = Monster.Monster()\n new_monster.set_name(json_game_map[room][item]['Name'])\n new_monster.set_lvl(json_game_map[room][item]['Lvl'])\n new_monster.set_description(json_game_map[room][item]['Description'])\n new_monster.set_defeated_status(json_game_map[room][item]['Defeated'])\n room_hash[room].add_monster(new_monster)\n elif item == \"Armored Skeleton\":\n new_monster = Monster.Monster()\n new_monster.set_name(json_game_map[room][item]['Name'])\n new_monster.set_lvl(json_game_map[room][item]['Lvl'])\n new_monster.set_description(json_game_map[room][item]['Description'])\n new_monster.set_defeated_status(json_game_map[room][item]['Defeated'])\n room_hash[room].add_monster(new_monster)\n elif item == \"Animated Armor\":\n new_monster = Monster.Monster()\n new_monster.set_name(json_game_map[room][item]['Name'])\n new_monster.set_lvl(json_game_map[room][item]['Lvl'])\n new_monster.set_description(json_game_map[room][item]['Description'])\n new_monster.set_defeated_status(json_game_map[room][item]['Defeated'])\n room_hash[room].add_monster(new_monster)\n elif item == \"Skeleton\":\n new_monster = Monster.Monster()\n new_monster.set_name(json_game_map[room][item]['Name'])\n new_monster.set_lvl(json_game_map[room][item]['Lvl'])\n new_monster.set_description(json_game_map[room][item]['Description'])\n new_monster.set_defeated_status(json_game_map[room][item]['Defeated'])\n room_hash[room].add_monster(new_monster)\n elif item == \"Features\":\n for feature in json_game_map[room][item]:\n room_hash[room].get_features()[feature].set_interacted_with(json_game_map[room][item][feature]['Interacted With'])\n else:\n new_item = Item.Item()\n new_item.set_name(json_game_map[room][item]['Name'])\n new_item.set_description(json_game_map[room][item]['Description'])\n if \"Hidden\" in json_game_map[room][item]:\n if json_game_map[room][item][\"Hidden\"]:\n new_item.set_hidden(True)\n room_hash[room].add_item(new_item)\n\n return room_hash", "def create_room(room):\n global map\n for x in range(room.x1+1, room.x2):\n for y in range(room.y1+1, room.y2):\n map[x][y].blocked = False\n map[x][y].block_sight = False", "def _create_room(new_map, room):\n for x in range(room.x1 + 1, room.x2):\n for y in range(room.y1 + 1, room.y2):\n new_map.terrain[x][y] = 1", "def new_map(self):\n self.map = Map()\n self.player.roomId = 0\n return self.map", "def generate(self, level):\n # TODO The dungeon's instances are spawned and loaded here.\n # fill map with \"blocked\" tiles\n level.maze = [[Tile(x, y, True) for y in range(level.height)] for x in range(level.width)]\n\n for r in range(level.max_rooms):\n # random width and height\n w = random.randint(level.min_room_size, level.max_room_size)\n h = random.randint(level.min_room_size, level.max_room_size)\n\n # random position without going out of the boundaries of the map\n x = random.randint(0, level.width - w - 1)\n y = random.randint(0, level.height - h - 1)\n\n # \"DungeonRoom\" class makes rectangles easier to work with\n new_room = Room(x, y, w, h)\n level.rooms.append(new_room)\n\n # run through the other rooms and see if they intersect with this one\n failed = False\n for other_room in level.rooms:\n if other_room is not new_room and new_room.intersect(other_room):\n failed = True\n break\n\n if not failed:\n # this means there are no intersections, so this room is valid\n\n # \"paint\" it to the map's tiles\n self._create_room(level, new_room)\n\n # center coordinates of new room, will be useful later\n new_x, new_y = new_room.center()\n\n if level.num_rooms > 0:\n # connect it to the previous room with a tunnel\n # center coordinates of previous room\n (prev_x, prev_y) = level.rooms[level.num_rooms - 1].center()\n\n # draw a coin (random number that is either 0 or 1)\n if random.randint(0, 1) == 1:\n # first move horizontally, then vertically\n self._create_h_tunnel(level, prev_x, new_x, prev_y)\n self._create_v_tunnel(level, prev_y, new_y, new_x)\n else:\n # first move vertically, then horizontally\n self._create_v_tunnel(level, prev_y, new_y, prev_x)\n self._create_h_tunnel(level, prev_x, new_x, new_y)\n\n # finally, append the new room to the list\n level.rooms.append(new_room)\n level.num_rooms += 1\n\n # connect them with a tunnel\n self._create_h_tunnel(level, 25, 55, 23)", "def make_dungeon(self):\r\n # Total number of rooms\r\n n = self.__nx * self.__ny\r\n room_stack = []\r\n current_room = self.room_at(self.__ix, self.__iy)\r\n # Total number of visited rooms during maze construction\r\n nv = 1\r\n\r\n # iterate over all rooms of dungeon\r\n while nv < n:\r\n neighbors = self.find_neighbors(current_room)\r\n\r\n if not neighbors:\r\n # We've reached a dead end: backtrack.\r\n current_room = room_stack.pop()\r\n continue\r\n\r\n # Choose a random neighboring room and move to it\r\n direction, next_room = random.choice(neighbors)\r\n current_room.connect(next_room, direction)\r\n room_stack.append(current_room)\r\n current_room = next_room\r\n nv += 1", "def remap_ids(self, id_map: Dict[int, int]) -> None:\n super().remap_ids(id_map)\n self.door = id_map.get(self.door, 0)", "def add_room(self, data):\n room_id = data['room_id']\n x, y = literal_eval(data['coordinates'])\n room_data = {'id': data['room_id'],\n 'title': data['title'],\n 'description' : data['description'],\n 'coordinates': literal_eval(data['coordinates']),\n 'elevation': data['elevation'],\n 'terrain': data['terrain'],\n 'exits' : {direction: '?' for direction in data['exits']}\n }\n self.rooms.setdefault(room_id, room_data)", "def clear_map(self):\n self.rooms = []\n\n self.dungeon.clear_dungeon()", "def GetRoom(self, id):\n try:\n return self._rooms[id]\n except:\n return None", "def getRoomById(self, id):\n for room in self.rooms:\n if room.id == id:\n return room\n\n return None", "def room(roomid):\n if db.checkCache(roomid):\n data = db.showCache(roomid)\n get_events(data[\"info\"],None,None)\n resp = jsonify(data)\n resp.status_code = 200\n else:\n try:\n r = requests.get(FenixSpacesAPI_URL + \"/\" + str(roomid))\n data = r.json()\n\n if(data['type'] != 'ROOM'):\n resp = jsonify(\"Not Found\")\n resp.status_code = 404\n\n else:\n data = format_room(data)\n db.add(roomid, data)\n get_events(data[\"info\"],None,None)\n resp = jsonify(data)\n resp.status_code = 200\n\n except Exception as e:\n print(e)\n resp = jsonify(\"Unsuccess\")\n resp.status_code = 400\n\n return resp", "def get_room_by_id(self, id):\n if not isinstance(id, int):\n id = int(id)\n if self.rooms.has_key(id):\n return self.rooms[id]\n raise RuntimeError, \"Room not known\"", "def create_dungeon(self):\n self.walk_iterations = max(self.walk_iterations, (self.width * self.height * 10))\n self._tiles_filled = 0\n self._prev_direction = None\n\n self.drunkard_x = randint(2, self.width - 2)\n self.drunkard_y = randint(2, self.height - 2)\n self.tiles_goal = self.width * self.height * self._percent_goal\n\n for i in range(self.walk_iterations):\n self.walk()\n if self._tiles_filled >= self.tiles_goal:\n break\n\n self.scan_for_zones()", "def __init__(self, width, height):\n roomDict = {}\n for w in range(width):\n for h in range(height):\n roomDict[Position(w, h)] = 'dirty'\n self.tiles = roomDict\n self.width = width\n self.height = height", "def load_rooms(self, filename):\n # First we parse all the data we need to create the rooms with.\n # All parsed lines of data are saved to rooms_data.\n rooms_data = []\n with open(filename, \"r\") as f:\n room_data = []\n for line in f:\n # When there is no blank newline it means there's still data.\n if not line == \"\\n\":\n room_data.append(line.strip())\n # A blank newline signals all data of a single room is parsed.\n else:\n rooms_data.append(room_data)\n room_data = []\n # Append a final time, because the files do not end on a blank newline.\n rooms_data.append(room_data)\n\n # Create room objects for each set of data we just parsed.\n rooms = {}\n for room_data in rooms_data:\n id = int(room_data[0])\n name = room_data[1]\n description = room_data[2]\n\n # Initialize a room object and put it in a dictionary with its\n # id as key.\n room = Room(id, name, description)\n rooms[id] = room\n\n # Add routes to each room we've created with the data from each set\n # we have parsed earlier.\n for room_data in rooms_data:\n id = int(room_data[0])\n # We split to connections into a direction and a room_id.\n connections = room_data[4:]\n connections = [connection.split() for connection in connections]\n # Here we get the current room object that we'll add routes to.\n room = rooms[id]\n for connection, target_room_id in connections:\n # TODO add routes to a room (hint: use the add route method)\n # split id and item\n idanditem = target_room_id.split('/', 1)\n if len(idanditem) < 2:\n room.add_route(connection, target_room_id)\n else:\n room.add_route(connection, idanditem[0], idanditem[1])\n rooms[id] = room\n\n return rooms", "def change_map(self, depth, *args, **kwargs):\n #Unload\n self.maplist[self.current_map_idx].on_unload()\n self.maplist[self.current_map_idx]._entities.remove(self._player)\n\n #Generate new maps if necessary\n while depth >= len(self.maplist):\n new_map = self.create_new_map()\n self.maplist.append(new_map)\n\n #Switch to the new map\n self.current_map_idx = depth\n self.load_map(self.maplist[self.current_map_idx])", "async def _build_room_entry(self, room_id: str) -> JsonDict:\n stats = await self._store.get_room_with_stats(room_id)\n\n # currently this should be impossible because we call\n # check_user_in_room_or_world_readable on the room before we get here, so\n # there should always be an entry\n assert stats is not None, \"unable to retrieve stats for %s\" % (room_id,)\n\n current_state_ids = await self._store.get_current_state_ids(room_id)\n create_event = await self._store.get_event(\n current_state_ids[(EventTypes.Create, \"\")]\n )\n\n # TODO: update once MSC1772 lands\n room_type = create_event.content.get(EventContentFields.ROOM_TYPE)\n if not room_type:\n room_type = create_event.content.get(EventContentFields.MSC1772_ROOM_TYPE)\n\n room_version = await self._store.get_room_version(room_id)\n allowed_spaces = None\n if await self._event_auth_handler.has_restricted_join_rules(\n current_state_ids, room_version\n ):\n allowed_spaces = await self._event_auth_handler.get_spaces_that_allow_join(\n current_state_ids\n )\n\n entry = {\n \"room_id\": stats[\"room_id\"],\n \"name\": stats[\"name\"],\n \"topic\": stats[\"topic\"],\n \"canonical_alias\": stats[\"canonical_alias\"],\n \"num_joined_members\": stats[\"joined_members\"],\n \"avatar_url\": stats[\"avatar\"],\n \"world_readable\": (\n stats[\"history_visibility\"] == HistoryVisibility.WORLD_READABLE\n ),\n \"guest_can_join\": stats[\"guest_access\"] == \"can_join\",\n \"creation_ts\": create_event.origin_server_ts,\n \"room_type\": room_type,\n \"allowed_spaces\": allowed_spaces,\n }\n\n # Filter out Nones – rather omit the field altogether\n room_entry = {k: v for k, v in entry.items() if v is not None}\n\n return room_entry", "def for_room(self, room_id):\n if not isinstance(room_id, str):\n raise TypeError('Room ID must be a string')\n\n self.for_tag('roomId:{}'.format(room_id))\n\n return self", "def create_map(self) -> pygame.sprite.Sprite:\n topleft = 50, 50\n bottomright = 500, 300\n f = TestFloor(topleft, bottomright, s.BROWN)\n\n p0 = Vec2d(topleft)\n p1 = p0 + Vec2d(bottomright)\n self.level_borders_ids.update(\n LevelBorders(s.flip_y(p0), s.flip_y(p1),\n space=self.main_loop.space,\n d=s.LEVEL_BORDERS_THICKNESS).get_ids\n )\n\n return f", "def create_new_map(self):\n return GameMap(self, mapgenfuncs.empty_box, width=self.width, height=self.height)", "async def create_room(self, location_id: str, data: dict):\r\n return await self.post(API_ROOMS.format(location_id=location_id), data)", "def create_map(width, height, pixels):\n\n\n\n\n def index_to_xy(i, width, height):\n \"\"\" Takes 0 based index going line wise from top\n left to bottom right, returns x, y coordinates so\n that 0,0 is on bottom left corner\n \"\"\"\n x = i % width\n y = i // width\n y*= -1\n y+= height - 1\n return (x,y)\n\n def place_terrain(type, i):\n \"\"\"This won't return anything, just do side effects\n\n The object \"gameLogic\" is used to place the object\n initially. It doesn't matter where this object is,\n as long as it exists. There must be an easier way,\n but this works.\n \"\"\"\n x,y = index_to_xy(i, width, height)\n\n object_name = terrain_types.get(type, \"water\")\n\n if ob[\"fast_create\"] > 0 and not (x%ob[\"fast_create\"] == 0 and y%ob[\"fast_create\"] == 0):\n return\n\n if object_name != \"water\":\n object = scene.addObject(object_name, \"gameLogic\")\n object.worldPosition = (x,y,0)\n\n\n list(map( (lambda tup : place_terrain(tup[1], tup[0])), list(enumerate(pixels)) ))", "def create_door(dungeon, room, pos, symbol, destination):\n dungeon[room][pos[0]][pos[1]] = [symbol, destination]", "def new_room(self):\r\n return Room()", "def get_room(room_id):\n try:\n room_id = int(room_id)\n room_entry = read_criteria(Room,{\"id\":room_id},session)\n except ValueError:\n room_entry = None\n # if the provided id doesn't match any room in the db, return -1 to indicate not found\n if room_entry is None:\n room = {\"roomId\":-1}\n status_code = 404\n else:\n status_code = 200\n room = room_json(room_entry, session,app.config[\"OFFLINE_TESTING\"], login_session)\n return generate_response(room,status_code)", "def update_monster(self):\n\n\t\t# if nothing else gets added to this (no other changes to update) you could delete\n\t\t# this function and simply call self.choose_guard() in its place\n\t\tself.guarded_area = self.choose_guard()", "def map_data(cult):\n try: # Map already exists\n underworld_model = Underworld.objects.get(owner=cult)\n field = generate_map(underworld_model.seed)\n except Underworld.DoesNotExist: # Generate new map\n # Create a new random seed every time we create an Underworld map\n seed = ''.join(random.choice(ascii_letters + digits) for _ in range(32))\n field = generate_map(seed)\n underworld_model = Underworld(owner=cult, seed=seed, x=field['x'], y=field['y'], time=0)\n underworld_model.save()\n\n print('### ########################### Seed used: ' + underworld_model.seed)\n \n return underworld_model, field" ]
[ "0.6638245", "0.66051424", "0.6174843", "0.6125215", "0.6114841", "0.60952884", "0.60184324", "0.5820419", "0.5677035", "0.5667187", "0.560798", "0.55109024", "0.5457504", "0.5321221", "0.5276006", "0.52715224", "0.5267841", "0.52529025", "0.5226872", "0.52195966", "0.5119134", "0.50558394", "0.5053132", "0.50442654", "0.50369334", "0.50365806", "0.49819148", "0.49699828", "0.49689242", "0.49574077" ]
0.8780594
0
Tests the home page.
def test_home(self): response = self.client.get('/') self.assertContains(response, 'Home Page', 1, 200)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_home(self):\n response = self.client.get('/')\n self.assertContains(response, 'Home Page', 1, 200)", "def test_home_page(self):\r\n url = reverse('home')\r\n response = self.client.get(url)\r\n\r\n self.assertEqual(response.status_code, 200)", "def test_home_page(self):\n\n self.browser.get('http://localhost:8000/index.html')\n\n # there is a page title defined by <title></title> on the home page\n # check it\n\n self.assertIn('Stability within Movement',self.browser.title)\n\n # You will have an image for your home page I am assuming.\n # Put the name of your image here in place of homebrew.png\n # In general this is how we check for images on a page.\n\n # The user sees an image of sun hitting the Washington Monument\n\n m=self.browser.find_element_by_tag_name('img')\n self.assertIn('help.jpg',m.get_attribute('src'))\n\n a=self.browser.find_element_by_id('sun')\n a.click()\n\n self.assertIn('sun',self.browser.title)\n\n h=self.browser.find_element_by_tag_name('h1')\n\n m=self.browser.find_element_by_tag_name('img')\n\n # the user goes back to the home page\n # self.browser.back()\n self.browser.get('http://localhost:8000/index.html')\n\n # the user sees at the bottom of the page a link to credits\n l=self.browser.find_element_by_link_text('Credits')\n\n # the user clicks on the credits link\n l.click()\n # and sees the credits.html page\n a=self.browser.current_url\n self.assertIn(\"credits.html\",a)", "def test_home(self):\n\n response = self.client.get(reverse('home'))\n\n assert response.status_code == 200", "def test_home(self):\n response = self.app.get(\"/\")\n self.assertTrue(response.status_code, 200)", "def test_homepage_view(self):\n response = self.client.get(url_for('home'))\n self.assertEqual(response.status_code, 200)", "def test_homepage_view(self):\n response = self.client.get(url_for('home'))\n self.assertEqual(response.status_code, 200)", "def test_homepage(self):\n\n result = self.client.get(\"/\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Homepage\", result.data)", "def test_homepage(self):\n rc = self.app.get('/')\n assert b'Welcome to Code TA' in rc.data\n assert b'Logout' not in rc.data", "def test_home(self):\n result = self.app.get('/')\n self.assertEqual(result.status_code, 200)", "def test_home(self):\n response = self.client.get('/')\n self.assert_200(response)\n self.assert_template_used('index.html')", "def test_given_home_page_behavior(self):\n res = self.client().get('/')\n self.assertEqual(res.status_code, 200)\n json_res = json.loads(res.get_data(as_text=True))\n self.assertEqual('Home page', json_res['message'])", "def test_homepage(self):\r\n\r\n result = self.client.get(\"/\")\r\n self.assertIn(b\"Welcome!\", result.data)", "def test_homepage(self):\n rv = self.app.get('/')\n assert 'Enter your url here' in rv.data", "def test_home(self):\n\n with self.client:\n result = self.client.get('/users')\n self.assertEqual(result.status_code, 200)\n self.assertIn(b'<h1 class=\"col-2\">Users</h1>', result.data)", "def test_home_exists(self):\n response = self.app.get('/')\n self.assertEqual(response.status_code, 200)", "def test_homepage(self):\n \n result = self.client.get(\"/\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"What type of user are you?\", result.data)", "def test_homepage(self):\n\n response = self.client.get(\"/\")\n self.assertIn(\"Books</title>\", response.data)\n self.assertIn(\"Goodreads ID\", response.data)", "def test_home_content(self):\n bs = self.get_soup(baseUrl)\n self.assertOneExists(bs, \"#page_discover\")", "def test_home(self):\n self.selenium.get('{}/'.format(self.live_server_url))", "def test_template_home(self):\n self.assertTemplateUsed(self.response, 'index.html')", "def test_index(self):\n response = self.client.get('')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'home/index.html')", "def test_main_page(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n content = response.content.decode('utf-8')\n self.assertTrue('Improving the FOIA request experience' in content)", "def test_if_home_is_successful(client):\n\n url = reverse(\"home\")\n response = client.get(url)\n assert response.status_code == 200", "def test_homepage_render(self):\n\n result = self.client.get(\"/\")\n self.assertIn(\"<h1 class=\\\"title\\\">Bark Park!</h1>\", result.data)", "def test_home(client):\n rv = client.get('/')\n assert 200 == rv.status_code", "def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n html = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn('<table class=\"board\">', html)\n self.assertIn('<table', html)\n self.assertIn('boggle homepage. used in testing', html)\n # test that you're getting a template", "def test_view_home(self):\n testUser = User.objects.create_user(username=\"testUser\", email = \"[email protected]\", password=\"testPassword\")\n uA = create_user(user=testUser, first_name=\"John\", last_name=\"Doe\", major='', bio='')\n login = self.client.force_login(testUser)\n url = reverse('login:home')\n response = self.client.get(url, follow=True)\n self.assertContains(response, \"Are you ready\")", "def test_home(self):\n res = self.client.get(\"/\")\n data = res.data.decode(\"utf-8\")\n assert res.status == \"200 OK\"\n assert \"Gandalf\" in data", "def test_01_index(self):\r\n res = self.app.get(\"/\", follow_redirects=True)\r\n assert self.html_title() in res.data, res\r\n assert \"Create an App\" in res.data, res" ]
[ "0.85700876", "0.83283335", "0.8285781", "0.82591826", "0.82452273", "0.82392025", "0.82392025", "0.8199806", "0.8114395", "0.8091571", "0.8031986", "0.7946267", "0.79409236", "0.79266375", "0.7866116", "0.7856337", "0.7807612", "0.7719456", "0.7685034", "0.7626273", "0.7593646", "0.7538487", "0.75284004", "0.7514524", "0.7510136", "0.75043875", "0.74783105", "0.7472915", "0.7420776", "0.7376298" ]
0.86721593
0
Tests the contact page.
def test_contact(self): response = self.client.get('/contact') self.assertContains(response, 'Contact', 4, 200)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_contact_page(self, client):\n response = client.get(url_for('contact.index'))\n assert response.status_code == 200", "def test_contact(self):\n response = self.client.get('/contact')\n self.assertContains(response, 'Contact', 3, 200)", "def test_contact_page(self):\n res = self.client.get('/contact')\n data = res.data.decode('utf-8')\n assert res.status == '200 OK'\n assert 'Contact Us' in data", "def test_contact(self):\n response = self.client.get('/contact/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'home/contact.html')", "def test_6_contact(self):\n response = self.client.get(reverse('contact'), follow=True)\n self.assertEqual(response.status_code, 200)\n\n data = {\n 'contact_email': '[email protected]',\n 'content': 'test content',\n 'contact_name': 'john doe'\n }\n response = self.client.post(reverse('contact'), data, follow=True)\n self.assertEqual(response.status_code, 200)", "def test_get_contact(self):\n pass", "def test_get_contacts(self):\n pass", "def test_findContact(self):\n response = self.client.get(self.url)\n qs = response.json()\n contact = qs[0]\n response = self.client.get(self.url + str(contact['id']) + '/')\n self.assertEqual(response.status_code, 200)\n contact2 = response.json()\n self.assertEqual(contact2['name'], 'contact1')", "def test_get_detail(self):\n response = self.client.get(reverse('objections-contact',\n kwargs={'pk': self.contact}),\n format='json')\n # import pdb; pdb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_display_all_contact(self):\n self.assertEqual(Contact.display_contacts(), Contact.contact_list)", "def test_contact_basic(self):\n contact = Contact.objects.first()\n self.assertEqual(contact.name, 'test')\n self.assertEqual(contact.lastname, 'user')\n self.assertEqual(contact.dateofbirth.strftime('%Y-%m-%d'), '1983-01-01')\n self.assertEqual(contact.bio, 'Hello This is my bio')\n self.assertEqual(contact.email, '[email protected]')\n self.assertEqual(contact.jabber, '[email protected]')\n self.assertEqual(contact.skype, '[email protected]')\n self.assertEqual(contact.othercontacts, 'Other Contacts')", "def test_the_view_render_Contact_instance(self):\n\n my_info = self.response.context_data['info']\n self.assertIsInstance(my_info, Contact)\n\n model_instance = Contact.objects.first()\n self.assertIn(model_instance.name, self.response.content)\n self.assertIn(model_instance.surname, self.response.content)\n self.assertIn(model_instance.email, self.response.content)\n self.assertIn(model_instance.bio, self.response.content)\n self.assertIn(model_instance.skype, self.response.content)\n self.assertIn(model_instance.contacts, self.response.content)", "def test_important_page(self):\n\n result = self.client.get(\"/\", follow_redirects=True)\n self.assertIn(\"Email\", result.data)", "def test_data_index(self):\n response = self.client.get(reverse('index'))\n contacts = About_me.objects.all()\n self.assertEqual(response.status_code, 200)\n contact = contacts[0]\n self.assertContains(response, contact.name, 1)\n self.assertContains(response, contact.surname, 1)\n self.assertContains(\n response,\n contact.birth_date.strftime('%B %d, %Y').replace('0', ''), 1\n )\n self.assertContains(response, contact.bio, 1)\n self.assertContains(response, contact.email, 1)\n self.assertContains(response, contact.jabber, 1)\n self.assertContains(response, contact.skype, 1)\n self.assertContains(response, contact.contacts, 1)", "def test_get_contact_objects(self):\n\n contacts = MessageController.get_contact_objects(['2'])\n self.assertEqual(contacts[0].contact_first_name, 'Contact2')\n self.assertEqual(contacts[0].contact_phone, '4153417706')\n self.assertEqual(contacts[0].user_id, 1)\n self.assertEqual(contacts[0].lang_id, 1)", "def test_create_contact(self):\n \n url = reverse('contact-list')\n contact = self.get_dummy_contact()\n\n response = self.client.post(url, contact,\n format='json',\n HTTP_AUTHORIZATION=self.get_auth())\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Contact.objects.count(), 1)\n self.assertEqual(Contact.objects.get().email_address, contact['email_address'])", "def test_contact_form(self, client):\n form = {\n 'email': '[email protected]',\n 'message': 'Test message from Snake Eyes.'\n }\n # !!!! turn-off CSRF?? !!!!\n from flask import current_app\n current_app.config['WTF_CSRF_ENABLED'] = False\n response = client.post(url_for('contact.index'), data=form,\n follow_redirects=True)\n assert_status_with_message(200, response, 'Thanks')\n # assert_status_with_message(221, response, 'Thanks')", "def test_display_all_contacts(self):\n self.assertEqual(Contact.display_all_contacts(), Contact.contact_list)", "def test_get_filter_effective_contacts(self):\n data = {\"type_contact\": 1}\n response = self.client.get(reverse('contacts-filter'), data)\n # import pdb; pdb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 2)", "def test_get_specific_contact_list(self):\n contact_list = ContactList.objects.first()\n url, parsed = self.prepare_urls('v1:contact_list-detail', subdomain=self.company.subdomain, kwargs={'pk':contact_list.id})\n\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_home_authenticated_has_contacts(testapp, fill_the_db, login_fixture):\n response = testapp.get('/', params=login_fixture).html\n assert len(response.find_all(\"img\")) == 1", "def test_Mail_campaign_page(self):\n self.client.login(username='arch', password='admin')\n response = self.client.get(reverse('echo:change_mailcampaign'))\n self.assertEqual(response.status_code, 200)", "def test_find_contact(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254711223344, \"[email protected]\")\n test_contact.save_contact()\n found_contact = Contact.find_by_phone(254711223344)\n\n self.assertEqual(found_contact.email, test_contact.email)", "def processContactRequest(self):\n\n\t\t# Use the simple page renderer to create the body content\n\t\treturn self.render_simple_page('Contact')", "def contact():\n return render_template('contact.html')", "def test_contact_landing_success(self):\n\n list_fingerprint = \"Make your FOIA request directly\"\n list_fingerprint += \" to the most relevant group or component\"\n\n response = self.client.get(reverse(\n 'contact_landing', kwargs={'slug': self.agency.slug}))\n self.assertContains(response, self.agency.name)\n self.assertContains(response, self.office.name)\n self.assertContains(response, self.office2.name)\n self.assertContains(response, list_fingerprint)\n\n response = self.client.get(reverse(\n 'contact_landing', kwargs={'slug': self.office.slug}))\n self.assertContains(response, self.agency.name)\n self.assertContains(response, self.office.name)\n self.assertNotContains(response, self.office2.name)\n self.assertNotContains(response, list_fingerprint)\n\n response = self.client.get(reverse(\n 'contact_landing', kwargs={'slug': self.agency2.slug}))\n self.assertContains(response, self.agency2.name)\n self.assertNotContains(response, self.office.name)\n self.assertNotContains(response, self.office2.name)\n self.assertNotContains(response, list_fingerprint)", "def test_address_page(self):\n tester = app.test_client(self)\n response = tester.get('/', content_type = \"html_text\")\n self.assertTrue(b'Address Locator' in response.data)", "def check_webpage_for_contact_details(self, item, response, suffix):\n check_contact = response.xpath('//*/a[contains(@href,{})]/@href'.format(\"\\\"\" + suffix + \"\\\"\")).extract_first()\n if check_contact:\n full_url = urllib.parse.urljoin(response.request.url, check_contact)\n resp = requests.get(full_url, timeout=15)\n if resp.status_code != 404:\n print(\"Found webpage {}\", full_url)\n item['contact_page'] = full_url\n raw_response = HtmlResponse(url=full_url, body=resp.content, encoding='utf-8')\n item['email'] = self.extract_email(raw_response)\n item['phone'] = self.extract_phone(raw_response)\n return item\n else:\n return item\n else:\n return item", "def test_send_mail(self):\n response = self.client.post(reverse('contact-form'), self.valid_data, follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, self.valid_data['subject'])\n self.assertEqual(mail.outbox[0].from_email, self.valid_data['sender_email'])\n self.assertEqual(mail.outbox[0].to[1], self.valid_data['sender_email'])", "def test_aboutpage_view(self):\n response = self.client.get(url_for('about'))\n self.assertEqual(response.status_code, 200)" ]
[ "0.865358", "0.8362252", "0.83329916", "0.8207289", "0.7702842", "0.76554585", "0.7019257", "0.699411", "0.68651897", "0.6854455", "0.6845448", "0.68376964", "0.6822424", "0.6779587", "0.6712402", "0.6696219", "0.66620266", "0.66492194", "0.6567752", "0.6566299", "0.65647703", "0.65487057", "0.65228456", "0.6474605", "0.64609027", "0.6460644", "0.64591265", "0.64432627", "0.6373795", "0.6351591" ]
0.84670943
1
Tests the register page.
def test_register(self): response = self.client.get('/register') self.assertContains(response, 'Register', 3, 200)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_page(self):\n\n result = self.client.get('/register')\n self.assertIn('<h1>Register</h1>', result.data)\n\n print \"DONE WITH REGISTER CHECK\"", "def test_registerpage_view(self):\n response = self.client.get(url_for('register'))\n self.assertEqual(response.status_code, 200)", "def test_register_page(self):\n\n result = self.client.get(\"/register\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Register New User</span><br>\", result.data)\n self.assertIn(b\"Confirm Password\", result.data)", "def test_register(self):\n\n # make request to server\n result = self.client.get(\"/register\")\n\n # check that / route renders login page\n self.assertIn(b'<h1>Register for Roomies!</h1>',result.data)", "def test_register_view(self):\n response = self.client.get(url_for('register'))\n self.assertEqual(response.status_code, 200)", "def test_register_page(self):\n with app.test_client() as client:\n response = client.get('/register')\n html = response.get_data(as_text=True)\n self.assertIn('<button type=\"submit\">Register</button>', html)", "def test_register(self):\n app = self.create_app()\n c = app.test_client()\n\n # test response of register page\n c.get('/auth/register')\n self.assert_template_used(\"auth/register.html\")\n\n # test registering user\n rv = register(c, app.config['USERNAME'], app.config['PASSWORD'])\n self.assert_status(rv, 200)\n\n # test registering user with the same name\n register(c, app.config['USERNAME'], app.config['PASSWORD'])\n self.assert_message_flashed(f\"User {app.config['USERNAME']} is already registered.\")", "def test_navigate_to_register_page(self):\n header_text = \"Rejestracja\"\n\n hp = HomePage(self.driver)\n # hp.close_covid_popup() # popup showed at the beginning of the project\n #hp.close_danger_alert_btn()\n hp.click_sign_in_btn()\n hp.click_manager_paczek_btn()\n hp.switch_driver_to_active_tab()\n\n lp = LoginPage(self.driver)\n lp.click_register_btn()\n\n rp = RegisterPage(self.driver)\n rp.verify_register_page_loaded_successfully(header_text)", "def test_show_register_page(self):\n with self.client as c:\n\n res = c.get(\"/register\")\n html = res.get_data(as_text=True)\n\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"Already have an account?\", html)\n self.assertNotIn('<nav class=\"navbar navbar-custom border-bottom border-light navbar-expand-md navbar-dark sticky-top\">', html)", "def test_01_account_register(self):\n self.register()\n self.assertEquals(\n self.selenium.current_url, self.get_absolute_url())\n print 'test_register_valid_password completed'", "def test_register_page_is_rendered(self):\n url = \"/regiter/\"\n response = self.client.get('/register/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"register_user.html\")", "def test_registration_page(self):\n res = self.app.post('/register')\n self.assertEqual(res.status_code, 400)\n res = self.app.get('/register')\n self.assertEqual(res.status_code, 200)", "def test_register(self):\n selenium = self.selenium\n # Opening the link we want to test\n selenium.get(self.live_server_url + '/account/signup/')\n # find the form element\n username = selenium.find_element_by_id('id_username')\n password1 = selenium.find_element_by_id('id_password1')\n password2 = selenium.find_element_by_id('id_password2')\n submit = selenium.find_element_by_xpath('//button[text()=\"Sign up\"]')\n\n # Fill the form with data\n username.send_keys('some username')\n password1.send_keys('123456')\n password2.send_keys('123456')\n\n # submitting the form\n submit.click()\n\n # check the returned result\n assert 'Username' in selenium.page_source", "def test_register(self):\n rc = self.register(\n app.config['TEST_USER'],\n app.config['TEST_PW'])\n assert b'Login to Code TA' in rc.data\n\n rc = self.register(\n app.config['TEST_USER'],\n app.config['TEST_PW'])\n assert b'Sorry, that username is already taken.' in rc.data\n\n rc = self.register('', 'derp')\n assert b'Field must be between 1 and 100 characters long.' in rc.data\n\n rc = self.register('derp', '')\n assert b'This field is required.' in rc.data\n\n rc = self.register('derp', 'pass', 'not same pass')\n assert b'Passwords must match.' in rc.data\n\n rc = self.register('derp', 'pass', 'pass', email='broken', email2='broken')\n assert b'You must enter a valid email address.' in rc.data\n\n rc = self.register('derp', 'pass', 'pass', email='[email protected]')\n assert b'Email addresses must match.' in rc.data", "def test_register(self):\n # Register good data\n data = mock_data['register']\n data = json.dumps(data)\n response = self.client.post(\n 'api/v2/auth/signup', content_type=\"application/json\", data=data)\n data = json.loads(response.data)\n self.assertEqual(data['message'], 'User registered')\n self.assertEqual(response.status_code, 200)\n self.assertTrue('user' in data)", "def test_register(self):\n u = User(first_name = \"David\",\n last_name = 'Smith',\n password='******',\n email='[email protected]',\n phone_number='012-345-6789')\n response = self.register({\n 'first_name': u.first_name,\n 'last_name': u.last_name,\n 'password': u.password,\n 'email': u.email,\n 'phone_number': u.phone_number\n })\n self.assertEqual(response.status_code, 302)\n response = self.client.get(response.url)\n self.assertDictEqual(response.json(), self.client.get(reverse('backend:user_details', args=(response.json()['pk'],))).json())", "def registration(self):\n response = self.app.get(\"/registration\")\n self.assertTrue(response.status_code, 200)\"\"\"\"\"\"", "def test_03_register(self):\r\n with self.flask_app.app_context():\r\n res = self.app.get('/account/signin')\r\n assert 'Forgot Password' in res.data\r\n\r\n res = self.register(method=\"GET\")\r\n # The output should have a mime-type: text/html\r\n assert res.mimetype == 'text/html', res\r\n assert self.html_title(\"Register\") in res.data, res\r\n\r\n res = self.register()\r\n assert self.html_title() in res.data, res\r\n assert \"Thanks for signing-up\" in res.data, res.data\r\n\r\n res = self.register()\r\n assert self.html_title(\"Register\") in res.data, res\r\n assert \"The user name is already taken\" in res.data, res.data\r\n\r\n res = self.register(fullname='')\r\n assert self.html_title(\"Register\") in res.data, res\r\n msg = \"Full name must be between 3 and 35 characters long\"\r\n assert msg in res.data, res.data\r\n\r\n res = self.register(name='')\r\n assert self.html_title(\"Register\") in res.data, res\r\n msg = \"User name must be between 3 and 35 characters long\"\r\n assert msg in res.data, res.data\r\n\r\n res = self.register(name='%a/$|')\r\n assert self.html_title(\"Register\") in res.data, res\r\n msg = '$#&amp;\\/| and space symbols are forbidden'\r\n assert msg in res.data, res.data\r\n\r\n res = self.register(email='')\r\n assert self.html_title(\"Register\") in res.data, res.data\r\n assert self.html_title(\"Register\") in res.data, res.data\r\n msg = \"Email must be between 3 and 35 characters long\"\r\n assert msg in res.data, res.data\r\n\r\n res = self.register(email='invalidemailaddress')\r\n assert self.html_title(\"Register\") in res.data, res.data\r\n assert \"Invalid email address\" in res.data, res.data\r\n\r\n res = self.register()\r\n assert self.html_title(\"Register\") in res.data, res.data\r\n assert \"Email is already taken\" in res.data, res.data\r\n\r\n res = self.register(password='')\r\n assert self.html_title(\"Register\") in res.data, res.data\r\n assert \"Password cannot be empty\" in res.data, res.data\r\n\r\n res = self.register(password2='different')\r\n assert self.html_title(\"Register\") in res.data, res.data\r\n assert \"Passwords must match\" in res.data, res.data", "def test_dietitian_registration_form(self):\n \n result = self.client.get(\"/register\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Dietitian Registration\", result.data)", "def test_user_registration(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(1,result,\"User registration successful\")", "def test_register(self):\n\t\turl = '/register/'\n\t\tdata = {'username' : 'testUser1234', 'password' : 'pass12345'} # The amazing password...\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(User.objects.count(), 2)\n\t\t# Every user must be created with its Profile\n\t\tself.assertEqual(Profile.objects.count(), 1)\n\t\t# The initial user doesn't have a Profile. So it has to be 1.\n\t\tself.assertEqual(User.objects.get(username='testUser1234').username, 'testUser1234')", "def test_signup_page(self):\n res = self.client.get('/signup')\n data = res.data.decode('utf-8')\n assert res.status == '200 OK'\n assert 'Sign Up' in data", "def test_register_view(self):\n url = reverse('xds_api:register')\n\n response = self.client.post(url, self.userDict)\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(responseDict['token'] is not None)\n self.assertTrue(responseDict['user'] is not None)", "def test_valid_registration(self):\n r = dict(\n email='[email protected]',\n username='crow',\n password='I_do_not_caw',\n confirm_password='I_do_not_caw',\n first_name='magpie',\n last_name='corvid'\n )\n resp = self.client.post('/user/register', data=r, follow_redirects=True)\n self.assertEquals(resp.status_code, 200)", "def register(self):\r\n self.q(css='a.register').first.click()\r\n\r\n registration_page = RegisterPage(self.browser, self.course_id)\r\n registration_page.wait_for_page()\r\n return registration_page", "def test_showing_patient_registration(self):\n\n result = self.client.get(\"/patient/new-patient\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Register a New Patient\", result.data)", "def test_register_login(self):\n bn.delete_database()\n self.register()\n self.login()\n self.open(base_url)\n self.assert_element_present(\"#welcome-header\")\n self.assert_text(\"Hi \" + test_user.name, \"#welcome-header\")\n # cleanup after test by removing registered user", "def test_user_add_button_redirects_to_register_page(self):\n self.browser.get(self.warno_url)\n self.browser.find_element_by_link_text('Users').click()\n self.browser.find_element_by_id('new-user-redirect-button').click()\n contents = self.browser.find_element_by_class_name('sub-title')\n self.assertTrue('Register' in contents.text, \"Redirected page's subtitle did not contain 'Register'\")", "def test_signup(self):\n res = self.client.get(\"/registration\")\n data = res.data.decode(\"utf-8\")\n assert res.status == \"200 OK\"\n assert \"Create Account\" in data", "def test_register(self):\n users = User.objects.filter(username='test')\n self.assertTrue(len(users) == 0)\n\n username = \"test3\"\n data = {'username': username, 'password': \"123test\", 'email': '[email protected]',\n 'newsletter': 'false', 'research': 'true', 'device': self.device}\n\n response = self.requestRegistration(data)\n\n self.assertTrue('client_id' in response.data)\n self.assertTrue(not 'password' in response.data)\n\n users = User.objects.filter(username=username)\n self.assertTrue(len(users) == 1)\n user = users[0]\n profile = user.user_profile\n self.assertTrue(profile.research)\n self.assertFalse(profile.newsletter)\n\n phone = Device.objects.get(user=user)\n\n self.assertTrue(phone.uuid == self.uuid)\n self.assertTrue(phone.cordova == self.device['cordova'])" ]
[ "0.8870613", "0.83922213", "0.83834046", "0.80776733", "0.80514234", "0.8031665", "0.8017677", "0.79519755", "0.79405785", "0.78576744", "0.78112644", "0.77617806", "0.7749312", "0.7685649", "0.74160886", "0.74112695", "0.7395869", "0.73778504", "0.7310296", "0.7306154", "0.7285865", "0.72679454", "0.72349215", "0.7221239", "0.72202134", "0.71950424", "0.718842", "0.7180081", "0.71565783", "0.7155371" ]
0.85511273
1
Ordered object for configuration. Loops through files in the `configs`directory by weight, and breaks after it finishes the file which contains the `environment` key.
def __init__(self, environment='develop'): cwd = path.dirname(path.abspath(__file__)) config_dir = path.join(cwd, 'configs') config_files = [] for (root, _, file_names) in walk(config_dir): for file_name in file_names: config_files.append(path.join(root, file_name)) config_files = sorted(config_files) for config_file in config_files: config = anyconfig.load(config_file) for key in config: self[key] = config[key] if environment in config_file: break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_builds(self):\n conf = build_config.BuildConfig(\n cmake_defs={\"BOARD\": self.config.zephyr_board}\n )\n\n kconfig_files = []\n prj_conf = self.config.project_dir / \"prj.conf\"\n if prj_conf.is_file():\n kconfig_files.append(prj_conf)\n kconfig_files.extend(self.config.kconfig_files)\n conf |= build_config.BuildConfig(kconfig_files=kconfig_files)\n\n for build_name, packer_config in self.packer.configs():\n yield build_name, conf | packer_config", "def init_configs(self):\n\n # get current location\n self.script_dir = os.path.dirname(__file__)\n\n # load configuration file\n with open(os.path.join(self.script_dir, \"config.json\")) as f:\n self.configs = json.load(f)\n \n # load some configs as attributes\n self.resource_folder = os.path.join(self.script_dir, self.configs[\"resource_path\"], self.resource_type, self.language)\n self.pre_processed_folder = os.path.join(self.resource_folder, self.configs[\"pre_processed_path\"])\n self.results_folder = os.path.join(self.resource_folder, self.configs[\"results_path\"])\n self.chunk_size = self.configs[\"resources\"][self.resource_type][\"chunk_size\"]", "def _process_environments(self):\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n raw = '/'.join((self.rawdir, 'environment'))\n logger.info(\"building labels for environment\")\n env_parts = {}\n label_map = {}\n env = Environment(g)\n with open(raw, 'r') as f:\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n f.readline() # read the header row; skip\n for line in filereader:\n (environment_id, uniquename, description) = line\n # 22 heat sensitive | tetracycline conditional\n\n environment_num = environment_id\n environment_internal_id = self._makeInternalIdentifier(\n 'environment', environment_num)\n if environment_num not in self.idhash['environment']:\n self.idhash['environment'][environment_num] = \\\n environment_internal_id\n\n environment_id = self.idhash['environment'][environment_num]\n environment_label = uniquename\n if environment_label == 'unspecified':\n environment_label += ' environment'\n env.addEnvironment(environment_id, environment_label)\n self.label_hash[environment_id] = environment_label\n\n # split up the environment into parts\n # if there's parts, then add them to the hash;\n # we'll match the components in a second pass\n components = re.split(r'\\|', uniquename)\n if len(components) > 1:\n env_parts[environment_id] = components\n else:\n label_map[environment_label] = environment_id\n\n # ### end loop through file\n\n # build the environmental components\n for eid in env_parts:\n eid = eid.strip()\n for e in env_parts[eid]:\n # search for the environmental component by label\n env_id = label_map.get(e.strip())\n env.addComponentToEnvironment(eid, env_id)\n\n return", "def process_config(json_file):\n config, _ = get_config_from_json(json_file)\n print(\" THE Configuration of your experiment ..\")\n pprint(config)\n print(\" *************************************** \")\n try:\n config.summary_dir = os.path.join(\"experiments\", config.exp_name, \"summaries/\")\n config.checkpoint_dir = os.path.join(\"experiments\", config.exp_name, \"checkpoints/\")\n config.out_dir = os.path.join(\"experiments\", config.exp_name, \"out/\")\n create_dirs([config.summary_dir, config.checkpoint_dir, config.out_dir])\n except AttributeError as e:\n print(\"ERROR!!..Please provide the exp_name in json file..\")\n exit(-1)\n return config", "def load_configs(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\t# Get root default config.\n\t\t# TODO: change default_cnf so it places whatever the values are at this stage of the build.\n\t\tconfigs = [('defaults', StringIO(default_cnf)), os.path.expanduser('~/.shutit/config'), os.path.join(self.host['shutit_path'], 'config'), 'configs/build.cnf']\n\t\t# Add the shutit global host- and user-specific config file.\n\t\t# Add the local build.cnf\n\t\t# Get passed-in config(s)\n\t\tfor config_file_name in self.build['extra_configs']:\n\t\t\trun_config_file = os.path.expanduser(config_file_name)\n\t\t\tif not os.path.isfile(run_config_file):\n\t\t\t\tshutit_global.shutit_global_object.shutit_print('Did not recognise ' + run_config_file + ' as a file - do you need to touch ' + run_config_file + '?')\n\t\t\t\tshutit_global.shutit_global_object.handle_exit(exit_code=0)\n\t\t\tconfigs.append(run_config_file)\n\t\t# Image to use to start off. The script should be idempotent, so running it\n\t\t# on an already built image should be ok, and is advised to reduce diff space required.\n\t\tif self.action['list_configs'] or self.loglevel <= logging.DEBUG:\n\t\t\tmsg = ''\n\t\t\tfor c in configs:\n\t\t\t\tif isinstance(c, tuple):\n\t\t\t\t\tc = c[0]\n\t\t\t\tmsg = msg + ' \\n' + c\n\t\t\t\tself.log(' ' + c,level=logging.DEBUG)\n\n\t\t# Interpret any config overrides, write to a file and add them to the\n\t\t# list of configs to be interpreted\n\t\tif self.build['config_overrides']:\n\t\t\t# We don't need layers, this is a temporary configparser\n\t\t\toverride_cp = ConfigParser.RawConfigParser()\n\t\t\tfor o_sec, o_key, o_val in self.build['config_overrides']:\n\t\t\t\tif not override_cp.has_section(o_sec):\n\t\t\t\t\toverride_cp.add_section(o_sec)\n\t\t\t\toverride_cp.set(o_sec, o_key, o_val)\n\t\t\toverride_fd = StringIO()\n\t\t\toverride_cp.write(override_fd)\n\t\t\toverride_fd.seek(0)\n\t\t\tconfigs.append(('overrides', override_fd))\n\n\t\tself.config_parser = self.get_configs(configs)\n\t\tself.get_base_config()", "def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n # read the file in, sample-by-sample\n # build the dictionary recursively\n # add rle file also to generated cfg files\n # print integrations per job as well!\n # consider more than 1 file per jobs -- the jobs are splitted by MEM integration anyways\n\n rle_filters = self.get_filter() if self.rle_filter_file else {}\n statistics = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n\n if not os.path.exists(sample_info['local_paths'][0]['path']):\n logging.warning(\"Skipping sample {sample_name}\".format(sample_name = sample_name))\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_addMEM, process_name))\n is_mc = (sample_info[\"type\"] == \"mc\")\n if self.rle_filter_file:\n assert(process_name in rle_filters)\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n # typically, the analysis ends here and starts looping b/c the smallest unit of work processes\n # at least one file; we need, however, to split the file into event ranges in such a way that\n # each job performs mem_integrations_per_job MEM integrations\n\n # so what we are going to do is to open each set of files in inputFileList, read the variable\n # requestMEM_*l_*tau and try to gather the event ranges such that each event range\n # performs up to mem_integrations_per_job integrations per job\n memEvtRangeDict = self.memJobList(inputFileList, rle_filters[process_name] if self.rle_filter_file else [])\n\n for jobId in memEvtRangeDict.keys():\n\n key_dir = getKey(sample_name)\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = memEvtRangeDict[jobId]['input_fileset']\n\n # there should always be a job\n assert(self.inputFiles[key_file] > 0), \"More than one input file: %s ?? !!\" % \\\n ', '.join(self.inputFiles[key_file])\n\n #assert(len(self.inputFiles[key_file]) == 1), \"There is more than one input file!\"\n self.cfgFiles_addMEM_modified[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"addMEM_%s_%s_%i_cfg.py\" % (self.channel, process_name, jobId)\n )\n self.shFiles_addMEM_modified[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"addMEM_%s_%s_%i.sh\" % (self.channel, process_name, jobId)\n )\n self.outputFiles[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_NTUPLES], \"%s_%i.root\" % (process_name, jobId)\n )\n self.logFiles_addMEM[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"addMEM_%s_%s_%i.log\" % (self.channel, process_name, jobId)\n )\n self.logFiles_addMEM[key_file] = get_log_version((self.logFiles_addMEM[key_file],))[0]\n self.createCfg_addMEM(\n self.inputFiles[key_file],\n memEvtRangeDict[jobId]['event_range'][0],\n memEvtRangeDict[jobId]['event_range'][1],\n self.outputFiles[key_file],\n self.era,\n sample_info[\"sample_category\"],\n is_mc,\n self.cfgFiles_addMEM_modified[key_file],\n memEvtRangeDict[jobId]['whitelist'],\n )\n\n # associate the output file with the fileset_id\n #UDPATE: ONE OUTPUT FILE PER SAMPLE!\n fileset_id = memEvtRangeDict[jobId]['fileset_id']\n hadd_output_dir = os.path.join(\n self.dirs[key_dir][DKEY_FINAL_NTUPLES],\n '%04d' % (fileset_id // 1000)\n )\n if not os.path.exists(hadd_output_dir):\n os.makedirs(hadd_output_dir)\n hadd_output = os.path.join(\n hadd_output_dir, '%s_%i.root' % ('tree', fileset_id) # UDPATE: ADDED\n #hadd_output_dir, \"tree.root\" # UDPATE: REMOVED\n )\n if hadd_output not in self.hadd_records:\n self.hadd_records[hadd_output] = {}\n self.hadd_records[hadd_output]['output_files'] = []\n self.hadd_records[hadd_output]['fileset_id'] = fileset_id\n self.hadd_records[hadd_output]['output_files'].append(self.outputFiles[key_file])\n self.hadd_records[hadd_output]['process_name'] = process_name\n\n # let's sum the number of integration per sample\n nofEntriesMap = {}\n for v in memEvtRangeDict.values():\n if v['fileset_id'] not in nofEntriesMap:\n nofEntriesMap[v['fileset_id']] = {\n 'nof_entries' : v['nof_entries'],\n }\n statistics[process_name] = {\n 'nof_int' : sum([entry['nof_int'] for entry in memEvtRangeDict.values()]),\n 'nof_entries' : sum([entry['nof_entries'] for entry in nofEntriesMap.values()]),\n 'nof_events_pass' : sum([entry['nof_events_pass'] for entry in memEvtRangeDict.values()]),\n 'nof_int_pass' : sum([entry['nof_int_pass'] for entry in memEvtRangeDict.values()]),\n 'nof_zero' : sum([entry['nof_zero'] for entry in memEvtRangeDict.values()]),\n 'nof_jobs' : len(memEvtRangeDict),\n }\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_addMEM)\n self.createScript_sbatch()\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_addMEM(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n self.createMakefile(lines_makefile)\n\n ws_len = max([len(kk) + 1 for kk in statistics.keys()])\n total_nof_integrations_sum = sum(x['nof_int'] for x in statistics.values())\n total_nof_entires = sum(x['nof_entries'] for x in statistics.values())\n total_nof_zero_int = sum(x['nof_zero'] for x in statistics.values())\n total_nof_jobs = sum(x['nof_jobs'] for x in statistics.values())\n total_nof_pass = sum(x['nof_events_pass'] for x in statistics.values())\n total_nof_int_pass_avg = float(sum(x['nof_int_pass'] for x in statistics.values())) / total_nof_pass\n total_nof_integrations_avg = float(total_nof_integrations_sum) / total_nof_entires\n total_nof_int_per_job = float(total_nof_integrations_sum) / total_nof_jobs\n for k, v in statistics.iteritems():\n if v['nof_entries'] == 0:\n int_per_event = 0.\n evt_pass = 0.\n else:\n int_per_event = float(v['nof_int']) / v['nof_entries']\n evt_pass = (100 * float(v['nof_events_pass']) / v['nof_entries'])\n if v['nof_events_pass'] == 0:\n nof_int_pass = 0.\n else:\n nof_int_pass = float(v['nof_int_pass']) / v['nof_events_pass']\n print('%s%s: %d (%d entries; %d jobs; %.2f int/evt; %d (%.2f%%) evt pass; %.2f int/evt pass; %d evt 0int)' %\n (k,\n ' ' * (ws_len - len(k)),\n v['nof_int'],\n v['nof_entries'],\n v['nof_jobs'],\n int_per_event,\n v['nof_events_pass'],\n evt_pass,\n nof_int_pass,\n v['nof_zero'],\n )\n )\n print('%s%s: %d (%d entries; %d jobs; %.2f int/evt; %d evt pass; %.2f int/evt pass; '\n '%.2f int/job pass; %d evt 0int)' %\n ('total',\n ' ' * (ws_len - len('total')),\n total_nof_integrations_sum,\n total_nof_entires,\n total_nof_jobs,\n total_nof_integrations_avg,\n total_nof_pass,\n total_nof_int_pass_avg,\n total_nof_int_per_job,\n total_nof_zero_int,\n )\n )\n\n if self.max_mem_integrations > 0 and total_nof_integrations_sum > self.max_mem_integrations:\n logging.error(\"Will not start the jobs (max nof integrations exceeded)!\")\n return False\n else:\n logging.info(\"Done\")\n return True", "def build_configs():", "def get_dict(self):\n\tself.log.debug('Getting dictionary from config files: %s', str(self.file_list))\n\tfor cfg_file in self.file_list:\n\t \"\"\"\n\t We want to append dictionaries from all the config files.\n\t \"\"\"\n\t if self.cfg_type == None: self.cfg_type = self._get_cfg_type(cfg_file)\n\t self.log.debug('Updating dictionary from config file in the order provided: %s',str(cfg_file) )\n\t if self.cfg_type.lower() in ['yaml', \"yml\"]: self._get_dict_yaml(cfg_file)\n\t elif self.cfg_type.lower() == 'xml': self._get_dict_xml(cfg_file)\n\t elif self.cfg_type.lower() == 'json': self._get_dict_json(cfg_file)\n\t elif self.cfg_type.lower() == 'ini': self._get_dict_ini(cfg_file)\n\t \n\treturn self.cfg_dict", "def setup(self):\n # Initialize key variables\n valid = True\n updated_list = []\n config = copy.deepcopy(self.config)\n directory = self.directories[0]\n\n # Update log_directory and ingest_cache_directory\n if isinstance(config, dict) is True:\n if 'main' in config:\n # Setup the log_directory to a known good default\n (updated, config) = self._create_directory_entries(\n 'log_directory', config)\n updated_list.append(updated)\n\n # Setup the ingest_cache_directory to a known good default\n (updated, config) = self._create_directory_entries(\n 'ingest_cache_directory', config)\n updated_list.append(updated)\n\n else:\n valid = False\n else:\n valid = False\n\n # Gracefully exit if things are not OK\n if valid is False:\n log_message = (\n 'Configuration files found in {} is invalid'\n ''.format(self.directories))\n log.log2die_safe(1007, log_message)\n\n # Update configuration file if required\n if len(updated_list) == updated_list.count(True):\n for next_directory in self.directories:\n # Delete all YAML files in the directory\n general.delete_yaml_files(next_directory)\n\n # Write config back to directory\n filepath = ('%s/config.yaml') % (directory)\n with open(filepath, 'w') as outfile:\n yaml.dump(config, outfile, default_flow_style=False)", "def apply_config_file(self, filename):\n def extractor(template, options):\n \"\"\"Ignore things that are existing non default values\"\"\"\n for name, val in options:\n normalised = self.normalise_key(name)\n if normalised in self.values and not isinstance(self.values[normalised], Default):\n continue\n else:\n yield name, val\n\n items = json.load(open(filename)).items()\n self.use_options(items, extractor)", "def _read_latest_config_files(self, run_path_pairs):\n configs = {}\n config_fpaths = {}\n for run_name, logdir in run_path_pairs:\n config = ProjectorConfig()\n config_fpath = os.path.join(logdir, PROJECTOR_FILENAME)\n if file_io.file_exists(config_fpath):\n file_content = file_io.read_file_to_string(config_fpath).decode('utf-8')\n text_format.Merge(file_content, config)\n\n has_tensor_files = False\n for embedding in config.embeddings:\n if embedding.tensor_path:\n has_tensor_files = True\n break\n\n if not config.model_checkpoint_path:\n # See if you can find a checkpoint file in the logdir.\n ckpt_path = latest_checkpoint(logdir)\n if not ckpt_path:\n # Or in the parent of logdir.\n ckpt_path = latest_checkpoint(os.path.join(logdir, os.pardir))\n if not ckpt_path and not has_tensor_files:\n continue\n if ckpt_path:\n config.model_checkpoint_path = ckpt_path\n\n # Sanity check for the checkpoint file.\n if (config.model_checkpoint_path and\n not checkpoint_exists(config.model_checkpoint_path)):\n logging.warning('Checkpoint file %s not found',\n config.model_checkpoint_path)\n continue\n configs[run_name] = config\n config_fpaths[run_name] = config_fpath\n return configs, config_fpaths", "def defaultconfig(self):\r\n\r\n config_data = {\r\n \"path_to_database\": \"FUDB/FOLLOWUP.DB\",\r\n \"path_to_frontend\": \"FUDB/\",\r\n \"path_to_dcs_info\": \"FUDB/\",\r\n \"path_to_bin\": \"bin/\",\r\n \"path_to_excels_exported_from_database\": \"excels exported/\",\r\n \"path_to_excels_to_be_imported_in_database\": \"excels to be imported/\",\r\n \"path_to_new_opfiles\": \"DC BATCHES IN WORK/0 NEW/\",\r\n \"path_to_batches_unassigned\": \"DC BATCHES IN WORK/1 UNASSIGNED/\",\r\n \"path_to_batches_prepfiles\": \"DC BATCHES IN WORK/2 PREPARED FILES/\",\r\n \"path_to_batches_assigned\": \"DC BATCHES IN WORK/3 ASSIGNED/\",\r\n \"path_to_batches_tobechecked\": \"DC BATCHES IN WORK/4 TO BE CHECKED/\",\r\n \"path_to_batches_tbimported\": \"DC BATCHES IN WORK/5 TO BE IMPORTED/\",\r\n \"path_to_batches_finished\": \"DC BATCHES IN WORK/6 FINISHED/\",\r\n \"path_to_batches_instandby\": \"DC BATCHES IN WORK/7 IN STANDBY/\",\r\n \"path_to_batches_unrecordable\": \"DC BATCHES IN WORK/8 UNRECORDABLE/\",\r\n \"batch_status_options_responsible\": \"PREP. OP FILE, IMPORTATION & SPLIT FILE, RELIABILITY & DATA UPGRADE, CHECK OP FILE, CHECK SPLIT FILE, CHECK FRONT END, **TO BE CHECKED\",\r\n \"batch_status_options_proofreader\": \"OP FILE OK, SPLIT FILE OK, FRONT END OK, **TO BE IMPORTED, **FINISHED, **REWORK, **STANDBY, **UNRECORDABLE\",\r\n \"batch_status_options_overall\": \"ONGOING, STANDBY, FINISHED, UNRECORDABLE\",\r\n \"aircrafts\": \"A300, A300-600, A310, A320, A330, A340, A350, A380\",\r\n \"split_batch_factor\": \"2, 3, 4, 5, 6, 7, 8, 9\",\r\n \"IDlentgh\": \"6\",\r\n \"port\": \"5000\"\r\n }\r\n \r\n if not os.path.isfile(os.path.join(self.cwd, \"config.json\")):\r\n self.func.write_json(config_data, self.cwd, fname=\"config.json\")", "def process_config(self, filename):\n \n self.log_message(\"processing config file: \"+filename)\n parser = SafeConfigParser()\n parser.optionxform = str\n parser.read(filename)\n self.source_files[filename] = parser\n \n sections = parser.sections()\n for section in sections:\n \n options = parser.options(section)\n params = {}\n non_std = {}\n for option in options:\n ## any option that ends with the word \"password\" will be encrypted and will automatically be decrypted upon\n ## processing \n if option in self.standard_options:\n params[option] = self.get_value(option, parser.get(section, option))\n else:\n non_std[option] = self.get_value(option, parser.get(section, option))\n\n params['non_std'] = non_std\n params['source_file'] = filename\n params['name']=section\n params['run_date']=self.run_date\n c_entry = ConfigEntry(params)\n if c_entry.ready: \n entry_num = c_entry.get_entry_type()\n self.entries[self.entry_types[entry_num]].append(c_entry)\n self.entry_dict[section] = {'source':filename,'entry':c_entry}\n self.log_message(\"Loaded Config Entry: \"+section)\n else:\n self.log_message(\"Failed to load config entry: \"+section)\n\n return self.entries", "def config_collection(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tself.log('In config_collection',level=logging.DEBUG)\n\t\tcfg = self.cfg\n\t\tfor module_id in self.module_ids():\n\t\t\t# Default to None so we can interpret as ifneeded\n\t\t\tself.get_config(module_id, 'shutit.core.module.build', None, boolean=True, forcenone=True)\n\t\t\tself.get_config(module_id, 'shutit.core.module.remove', False, boolean=True)\n\t\t\tself.get_config(module_id, 'shutit.core.module.tag', False, boolean=True)\n\t\t\t# Default to allow any image\n\t\t\tself.get_config(module_id, 'shutit.core.module.allowed_images', [\".*\"])\n\t\t\tmodule = self.shutit_map[module_id]\n\t\t\tcfg_file = os.path.dirname(get_module_file(self,module)) + '/configs/build.cnf'\n\t\t\tif os.path.isfile(cfg_file):\n\t\t\t\t# use self.get_config, forcing the passed-in default\n\t\t\t\tconfig_parser = ConfigParser.ConfigParser()\n\t\t\t\tconfig_parser.read(cfg_file)\n\t\t\t\tfor section in config_parser.sections():\n\t\t\t\t\tif section == module_id:\n\t\t\t\t\t\tfor option in config_parser.options(section):\n\t\t\t\t\t\t\tif option == 'shutit.core.module.allowed_images':\n\t\t\t\t\t\t\t\toverride = False\n\t\t\t\t\t\t\t\tfor mod, opt, val in self.build['config_overrides']:\n\t\t\t\t\t\t\t\t\tval = val # pylint\n\t\t\t\t\t\t\t\t\t# skip overrides\n\t\t\t\t\t\t\t\t\tif mod == module_id and opt == option:\n\t\t\t\t\t\t\t\t\t\toverride = True\n\t\t\t\t\t\t\t\tif override:\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\tvalue = config_parser.get(section,option)\n\t\t\t\t\t\t\t\tif option == 'shutit.core.module.allowed_images':\n\t\t\t\t\t\t\t\t\tvalue = json.loads(value)\n\t\t\t\t\t\t\t\tself.get_config(module_id, option, value, forceask=True)\n\t\t\t# ifneeded will (by default) only take effect if 'build' is not\n\t\t\t# specified. It can, however, be forced to a value, but this\n\t\t\t# should be unusual.\n\t\t\tif cfg[module_id]['shutit.core.module.build'] is None:\n\t\t\t\tself.get_config(module_id, 'shutit.core.module.build_ifneeded', True, boolean=True)\n\t\t\t\tcfg[module_id]['shutit.core.module.build'] = False\n\t\t\telse:\n\t\t\t\tself.get_config(module_id, 'shutit.core.module.build_ifneeded', False, boolean=True)", "def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info['use_it']:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n if not is_mc:\n continue\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable, process_name))\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n key_dir = getKey(process_name)\n\n outputFile = os.path.join(\n self.dirs[key_dir][DKEY_HISTO], \"%s.root\" % process_name\n )\n self.outputFiles[process_name] = {\n 'inputFiles' : [],\n 'outputFile' : outputFile,\n }\n if os.path.isfile(outputFile) and tools_is_file_ok(outputFile, min_file_size = 2000):\n logging.info('File {} already exists --> skipping job'.format(outputFile))\n continue\n\n for jobId in inputFileList.keys():\n\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = inputFileList[jobId]\n if len(self.inputFiles[key_file]) == 0:\n logging.warning(\n \"'%s' = %s --> skipping job !!\" % (key_file, self.inputFiles[key_file])\n )\n continue\n\n self.cfgFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.txt\" % (process_name, jobId)\n )\n self.outputFiles_tmp[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_HISTO_TMP], \"histogram_%i.root\" % jobId\n )\n self.logFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"project_%s_%i.log\" % (process_name, jobId)\n )\n self.scriptFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.sh\" % (process_name, jobId)\n )\n projection_module = self.projection_module\n if projection_module == \"count\":\n projection_module = \"countHistogramAll\"\n if sample_name.startswith('/TTTo'):\n projection_module += \"CompTopRwgt\"\n elif sample_info['sample_category'].startswith('ttH'):\n projection_module += \"CompHTXS\"\n elif isSplitByNlheJet(process_name):\n projection_module += \"SplitByLHENjet\"\n elif isSplitByNlheHT(process_name):\n projection_module += \"SplitByLHEHT\"\n elif isSplitByNlheJetHT(process_name, sample_name):\n projection_module += \"SplitByLHENjetHT\"\n self.jobOptions_sbatch[key_file] = {\n 'histName' : process_name,\n 'inputFiles' : self.inputFiles[key_file],\n 'cfgFile_path' : self.cfgFiles_projection[key_file],\n 'outputFile' : self.outputFiles_tmp[key_file],\n 'logFile' : self.logFiles_projection[key_file],\n 'scriptFile' : self.scriptFiles_projection[key_file],\n 'projection_module' : projection_module,\n }\n if self.projection_module != 'puHist':\n self.jobOptions_sbatch[key_file]['ref_genWeight'] = self.ref_genWeights[process_name]\n if process_name not in self.ref_genWeights:\n raise RuntimeError(\"Unable to find reference LHE weight for process %s\" % process_name)\n self.createCfg_project(self.jobOptions_sbatch[key_file])\n self.outputFiles[process_name]['inputFiles'].append(self.outputFiles_tmp[key_file])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable)\n self.num_jobs['project'] += self.createScript_sbatch(\n self.executable, self.sbatchFile_projection, self.jobOptions_sbatch\n )\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_project(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n if self.plot:\n self.addToMakefile_plot(lines_makefile)\n self.addToMakefile_finalHadd(lines_makefile)\n self.createMakefile(lines_makefile)\n logging.info(\"Done\")\n\n return self.num_jobs", "def gen_config(self):\n if self.want:\n wantd = {\n (entry[\"process_id\"], entry.get(\"vrf\")): entry\n for entry in self.want.get(\"processes\", [])\n }\n else:\n wantd = {}\n if self.have:\n haved = {\n (entry[\"process_id\"], entry.get(\"vrf\")): entry\n for entry in self.have.get(\"processes\", [])\n }\n else:\n haved = {}\n\n # turn all lists of dicts into dicts prior to merge\n for thing in wantd, haved:\n for _pid, proc in iteritems(thing):\n for area in proc.get(\"areas\", []):\n virtual_link = {\n entry[\"id\"]: entry\n for entry in area.get(\"virtual_link\", [])\n }\n if bool(virtual_link):\n area[\"virtual_link\"] = virtual_link\n ranges = {\n entry[\"address\"]: entry\n for entry in area.get(\"ranges\", [])\n }\n if bool(ranges):\n area[\"ranges\"] = ranges\n\n proc[\"areas\"] = {\n entry[\"area_id\"]: entry for entry in proc.get(\"areas\", [])\n }\n if proc.get(\"distribute_list\"):\n if \"acls\" in proc.get(\"distribute_list\"):\n proc[\"distribute_list\"][\"acls\"] = {\n entry[\"name\"]: entry\n for entry in proc[\"distribute_list\"].get(\n \"acls\", []\n )\n }\n\n # if state is merged, merge want onto have\n if self.state == \"merged\":\n wantd = dict_merge(haved, wantd)\n\n # if state is deleted, limit the have to anything in want\n # set want to nothing\n if self.state == \"deleted\":\n haved = {\n k: v for k, v in iteritems(haved) if k in wantd or not wantd\n }\n wantd = {}\n\n # delete processes first so we do run into \"more than one\" errors\n if self.state == \"deleted\":\n haved_del = deepcopy(haved)\n want_process = {}\n for k, t_want in iteritems(haved_del):\n want_process[\"process_id\"] = t_want.get(\"process_id\")\n if not (len(t_want) == 2 and not t_want.get(\"areas\")):\n self._compare(want=want_process, have=haved_del.get(k, {}))\n if self.state == \"overridden\":\n haved_del = deepcopy(haved)\n want = {}\n for k, t_want in iteritems(haved_del):\n if k not in wantd:\n want[\"process_id\"] = t_want.get(\"process_id\")\n if not (len(t_want) == 2 and not t_want.get(\"areas\")):\n self._compare(want=want, have=haved_del.get(k, {}))\n\n for k, want in iteritems(wantd):\n self._compare(want=want, have=haved.pop(k, {}))", "def branch(configs, weights):\n\n nconfig = configs.configs.shape[0]\n probability = np.cumsum(weights)\n wtot = probability[-1]\n base = np.random.rand()\n newinds = np.searchsorted(\n probability, (base + np.linspace(0, wtot, nconfig)) % wtot\n )\n configs.resample(newinds)\n weights.fill(wtot / nconfig)\n return configs, weights", "def exp_config():\n with open(\n os.path.join(os.path.dirname(os.path.abspath(__file__)), \"experiment.yaml\")\n ) as f:\n exp_config = list(yaml.safe_load_all(f))\n\n for config in exp_config[0]:\n backward.populate_space(config)\n\n return exp_config", "def __init__(self, environment):\n with open('config.json') as f:\n self.config = eval(f.read())\n self.config = self.config[environment]", "def config_step(self):\n api = self.api\n # bisect_config may come as a FrozenDict (which is not serializable).\n bisect_config = dict(self.bisect_config)\n\n def fix_windows_backslashes(s):\n backslash_regex = re.compile(r'(?<!\\\\)\\\\(?!\\\\)')\n return backslash_regex.sub(r'\\\\', s)\n\n for k, v in bisect_config.iteritems():\n if isinstance(v, basestring):\n bisect_config[k] = fix_windows_backslashes(v)\n # We sort the keys to prevent problems with orders changing when\n # recipe_simulation_test compares against expectation files.\n config_string = json.dumps(bisect_config, indent=2, sort_keys=True)\n result = api.m.step('config', [])\n config_lines = config_string.splitlines()\n result.presentation.logs['Bisect job configuration'] = config_lines", "def gather_configs(self):\n configs = []\n for what in self.order:\n for key in self.plugins[what]:\n mgr = self.plugins[what][key]\n c = mgr.config(what='get')\n if c is not None:\n c.update({\n 'description': mgr.description\n })\n # print(\"Gathering configuration from \", c)\n configs.append(c)\n return configs", "def process_config(self):\n driver_options = self.config['service']['options']\n process_config = {\n 'assembler_config': {\n 'driver_options': driver_options,\n 'teststep_config': self.teststep_config,\n 'testcase_config': self.config['reader_settings']['test_case']['keys'],\n },\n 'assembly_config': self.config['assembly_settings'],\n }\n return process_config", "def generate_config(self):\n\n # Change crypto-config.yaml and add organizations\n yaml = YAML()\n with open(os.path.join(self.config_path, \"crypto-config-template.yaml\"), \"r\") as crypto_config_file:\n config = yaml.load(crypto_config_file)\n\n config[\"OrdererOrgs\"][0][\"Specs\"] = []\n for orderer_index in range(1, self.num_validators + 1):\n orderer_host, _ = self.experiment.get_peer_ip_port_by_id(orderer_index)\n config[\"OrdererOrgs\"][0][\"Specs\"].append({\n \"Hostname\": \"orderer%d\" % orderer_index,\n \"SANS\": [orderer_host]\n })\n\n config[\"PeerOrgs\"] = []\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n organization_config = {\n \"Name\": \"Org%d\" % organization_index,\n \"Domain\": \"org%d.example.com\" % organization_index,\n \"EnableNodeOUs\": True,\n \"Template\": {\n \"Count\": 1,\n \"SANS\": [organization_host]\n },\n \"Users\": {\n \"Count\": 1\n }\n }\n config[\"PeerOrgs\"].append(organization_config)\n\n with open(os.path.join(self.config_path, \"crypto-config.yaml\"), \"w\") as crypto_config_file:\n yaml.dump(config, crypto_config_file)\n\n # Change configtx.yaml\n yaml = YAML()\n with open(os.path.join(self.config_path, \"configtx-template.yaml\"), \"r\") as configtx_file:\n config = yaml.load(configtx_file)\n\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n org_admin = \"Org%dMSP.admin\" % organization_index\n org_peer = \"Org%dMSP.peer\" % organization_index\n org_client = \"Org%dMSP.client\" % organization_index\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n\n organization_config = {\n \"Name\": \"Org%dMSP\" % organization_index,\n \"ID\": \"Org%dMSP\" % organization_index,\n \"MSPDir\": \"crypto-config/peerOrganizations/org%d.example.com/msp\" % organization_index,\n \"Policies\": {\n \"Readers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s', '%s')\" % (org_admin, org_peer, org_client)\n },\n \"Writers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s')\" % (org_admin, org_peer)\n },\n \"Admins\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s')\" % (org_admin)\n }\n },\n \"AnchorPeers\": [{\n \"Host\": organization_host,\n \"Port\": 7000 + organization_index\n }]\n }\n\n commented_map = CommentedMap(organization_config)\n commented_map.yaml_set_anchor(\"Org%d\" % organization_index, always_dump=True)\n config[\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"]\\\n .append(commented_map)\n\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n consenter_port = 7000 + organization_index\n consenter_info = {\n \"Host\": organization_host,\n \"Port\": consenter_port,\n \"ClientTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index,\n \"ServerTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index\n }\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"].append(consenter_info)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"].append(\n \"%s:%d\" % (organization_host, consenter_port))\n\n with open(os.path.join(self.config_path, \"configtx.yaml\"), \"w\") as configtx_file:\n round_trip_dump(config, configtx_file, Dumper=RoundTripDumper)", "def env_config():\n # setup\n env = {'ELB_GCP_PROJECT': 'expected-gcp-project',\n 'ELB_GCP_REGION': 'expected-gcp-region',\n 'ELB_GCP_ZONE': 'expected-gcp-zone',\n 'ELB_BATCH_LEN': '93',\n 'ELB_CLUSTER_NAME': 'expected-cluster-name',\n 'ELB_RESULTS': 'gs://expected-results',\n 'ELB_USE_PREEMPTIBLE': 'true',\n 'ELB_BID_PERCENTAGE': '91'}\n\n for var_name in env:\n os.environ[var_name] = str(env[var_name])\n\n yield env\n\n # cleanup\n for var_name in env:\n # os.unsetenv does not work on every system\n del os.environ[var_name]", "def preload_all_configs(self):\n for _, _, filenames in os.walk(self.configDir):\n for filename in filenames:\n if filename[-3:] == \".py\" and filename != \"__init__.py\":\n configID = filename[0:-3]\n self.load_config(configID)", "def get_configs(candidate_filename):\n return (sortby('name')(haresources2.load(haresources2_file)),\n sortby('name')(crmdict2haresources(crm2dict(configure_parse()))))", "def gather_file_data(config):\n file_regex = re.compile(config['file_regex'])\n category_regex = re.compile(config['category_regex'])\n policies = {}\n\n for root, dirs, files in os.walk(config['c7n_policy_directory']):\n for file in files:\n if file_regex.match(file):\n file_path = root + '/' + file\n logging.debug('Processing file %s', file_path)\n with open(file_path, 'r') as stream:\n try:\n if category_regex.search(file_path):\n category = 'Security & Governance'\n else:\n category = 'Cost Controls'\n\n policies = yaml.load(stream)\n for policy in policies['policies']:\n logging.debug(\n 'Processing policy %s', policy['name'])\n policy['file_url'] = get_file_url(\n file_path, config)\n resource_type = policy['resource']\n if category not in c7n_data:\n c7n_data[category] = {}\n if resource_type not in c7n_data[category]:\n c7n_data[category][resource_type] = []\n c7n_data[category][resource_type].append(policy)\n except yaml.YAMLError as exc:\n logging.error(exc)", "def __init__(self, configfile):\n\n self.all_sqs = {}\n self.all_dynamodb = {}\n self.all_s3 = {}\n self.all_sites = []\n\n with open(configfile, 'r') as stream:\n try: \n self.config = yaml.safe_load(stream)\n self.all_sites = list(self.config['Sites'].keys())\n\n for site in self.all_sites:\n self.all_sqs[site] = self.make_sqs(site)\n self.all_dynamodb[site] = self.make_dynamodb(site)\n self.all_s3[site] = self.make_s3(site)\n \n except Exception as e:\n print(f\"Exception: {e}\")", "def get_configs(self, configs):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcp = LayerConfigParser()\n\t\tfail_str = ''\n\t\tfiles = []\n\t\tfor config_file in configs:\n\t\t\tif isinstance(config_file, tuple):\n\t\t\t\tcontinue\n\t\t\tif not shutit_util.is_file_secure(config_file):\n\t\t\t\tfail_str = fail_str + '\\nchmod 0600 ' + config_file\n\t\t\t\tfiles.append(config_file)\n\t\tif fail_str != '':\n\t\t\tif shutit_global.shutit_global_object.interactive > 1:\n\t\t\t\tfail_str = 'Files are not secure, mode should be 0600. Running the following commands to correct:\\n' + fail_str + '\\n'\n\t\t\t\t# Actually show this to the user before failing...\n\t\t\t\tself.log(fail_str,level=logging.INFO)\n\t\t\t\tself.log('Do you want me to run this for you? (input y/n)',level=logging.INFO)\n\t\t\t\tif shutit_global.shutit_global_object.interactive == 0 or shutit_util.util_raw_input(default='y') == 'y':\n\t\t\t\t\tfor f in files:\n\t\t\t\t\t\tself.log('Correcting insecure file permissions on: ' + f,level=logging.INFO)\n\t\t\t\t\t\tos.chmod(f,0o600)\n\t\t\t\t\t# recurse\n\t\t\t\t\treturn self.get_configs(configs)\n\t\t\telse:\n\t\t\t\tfor f in files:\n\t\t\t\t\tself.log('Correcting insecure file permissions on: ' + f,level=logging.INFO)\n\t\t\t\t\tos.chmod(f,0o600)\n\t\t\t\t# recurse\n\t\t\t\treturn self.get_configs(configs)\n\t\t\tself.fail(fail_str) # pragma: no cover\n\t\tfor config in configs:\n\t\t\tif isinstance(config, tuple):\n\t\t\t\tcp.readfp(config[1], filename=config[0])\n\t\t\telse:\n\t\t\t\tcp.read(config)\n\t\t# Treat allowed_images as a special, additive case\n\t\tself.build['shutit.core.module.allowed_images'] = cp.get_config_set('build', 'shutit.core.module.allowed_images')\n\t\treturn cp", "def read_config(self, config_filename):" ]
[ "0.5682938", "0.56562144", "0.56032443", "0.55194974", "0.5488775", "0.5429362", "0.53881335", "0.53552264", "0.5261554", "0.5189298", "0.5165816", "0.5155886", "0.51521325", "0.51439726", "0.51407737", "0.5138546", "0.51242834", "0.51000327", "0.50886273", "0.50497407", "0.50253147", "0.5022655", "0.49925423", "0.49751824", "0.49736312", "0.49456787", "0.49385846", "0.49353293", "0.49235234", "0.49176174" ]
0.6080864
0
n, the number of stairs O(2n) time and space
def climbing_stairs(n): if n < 2: return 1 if n == 2: return 2 return climbing_stairs(n-1) + climbing_stairs(n-2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def amount_of_stairs(n):\n\n matrix = [[0] * n for i in range(n)]\n\n for i in range(0, n):\n for j in range(1, i):\n matrix[i][j] = sum(matrix[i - j - 1][:j])\n matrix[i][i] = 1\n\n # print_matrix(matrix)\n return sum(matrix[n-1])", "def climbing_stairs(n):\n\tdico = {0:1, 1:1}\n\n\tfor i in range(2, n+1):\n\t\tdico[i] = dico[i-1] + dico[i-2]\n\n\treturn dico[n]", "def count_stair_ways(n):\n if n == 1:\n return 1\n if n == 2:\n return 2\n return count_stair_ways(n - 1) + count_stair_ways(n - 2)", "def climbStairs(n: int) -> int:\n fibonacci_sequence = [1, 2]\n if n < 2:\n return fibonacci_sequence[n-1]\n\n while len(fibonacci_sequence) != n:\n fibonacci_sequence.append(fibonacci_sequence[-1] + fibonacci_sequence[-2])\n\n return fibonacci_sequence[n-1]", "def sw(n):\n return 4*n*n + 2*n + 1", "def countArrangement(self, n: int) -> int:\n @lru_cache(None)\n def dfs(i, remains: Set[int]):\n if i == n+1:\n return 1\n cnt = 0\n for j in remains:\n if i%j == 0 or j%i == 0:\n cnt += dfs(i+1, remains - {j})\n return cnt\n\n return dfs(1, frozenset(range(1, n+1)))", "def countArrangement(self, n: int) -> int:\n def iter_digit(n):\n while n:\n yield n % 2\n n //= 2\n\n @lru_cache(None)\n def dfs(i, remains):\n if i == n+1:\n return 1\n cnt = 0\n for j, d in enumerate(iter_digit(remains)):\n if d == 0:\n continue\n if j%i == 0 or i%j == 0:\n remains ^= 2**j\n cnt += dfs(i+1, remains)\n remains ^= 2**j\n return cnt\n\n # starting from 11..10 (length is n+1)\n return dfs(1, 2**(n+1)-2)", "def climb_stairs_2(n):\n fib = list()\n fib.append(1)\n fib.append(1)\n if n == 0 or n == 1:\n return fib[n]\n for i in range(2, n+1):\n temp = fib[i-1] + fib[i-2]\n fib.append(temp)\n return fib[n]", "def countArrangement(self, n: int) -> int:\n def dfs(i, remains: List[int]):\n if i == n+1:\n return 1\n cnt = 0\n for j in range(1, n+1):\n if remains[j] is None and (i%j == 0 or j%i == 0):\n remains[j] = i\n cnt += dfs(i+1, remains)\n remains[j] = None\n return cnt\n\n return dfs(1, [None]*(n+1))", "def calcSpacings(self, n):\n\n first = next = (n) / float(n + 1)\n for i in range(n):\n yield (next, 1 - next)\n next = first - (1 - next)", "def num_squares(n):\n squares = _squares(n)\n cnt = 0\n remains = {n}\n while remains:\n cnt += 1\n tmp = set()\n for remain in remains:\n for sq in [sqq for sqq in squares if sqq <= remain]:\n if remain == sq:\n return cnt\n else:\n tmp.add(remain - sq)\n remains = tmp", "def schrage_nlogn(data):\n N = data.copy()\n for i in range(len(data)):\n N[i] = (N[i][0], N[i])\n heapq.heapify(N)\n \"\"\"\"\n mozna to zaltwic przy wczytaniu danych nie wplywa na zloznosc samego algorytmu\n \n N to tablica tablica krotek takich że (r , [r, p,q]), (r1, [r1 ,p1 , q1]) ........\n heapq sortuje po pierwszym elemncie dlatego tak\n \n G analogicznie z tym że sortowane jest malejaco po q więc G = [(q, [r, p ,q ]), (q1, [r1, p1, q1]) .......... ] \n \"\"\"\n G = []\n Pi = []\n t = N[0][0]\n start = timer()\n while len(G) != 0 or len(N) != 0:\n while len(N) != 0 and Schrage.save_min(N) <= t:\n e = heapq.heappop(N)\n heapq.heappush(G, (-e[1][2], e[1])) # O(log n)\n if len(G) != 0:\n e = heapq.heappop(G) # O(log n)\n Pi.append(e[1]) # O(1)\n t = t + e[1][1]\n else:\n t = N[0][0] # O(1)\n end = timer()\n executionTime = end - start\n return Pi, executionTime", "def numSquares(self, n):\n while len(Solution.F) <= n:\n i = len(Solution.F)\n Solution.F.append(sys.maxint)\n j = 1\n while i - j*j >= 0:\n Solution.F[i] = min(Solution.F[i], Solution.F[i-j*j]+1)\n j += 1\n\n return Solution.F[n]", "def num_squares(n):\n nums = _squares(n)\n\n dp = [0] * (n + 1)\n dp[0] = 0\n\n for i in range(1, n + 1):\n can = [j for j in nums if j <= i]\n dp[i] = 1 + min([dp[i - c] for c in can])\n\n return dp[n]", "def find(n):\n tn = int(n / 2)\n s = 2 * tn + 1\n count = 0\n line_list = []\n for i in range(1, n + 1):\n for j in range(i + 1, n + 1):\n if i + j != s:\n line_list.append(str(i) + \" \" + str(j))\n count += 1\n\n return count, line_list", "def numSquares(self, n):\n # Generate perfect square numbers smaller than n.\n perfect_square_numbers = []\n i = 1\n square_i = i * i\n while square_i <= n:\n perfect_square_numbers.append(square_i)\n i += 1\n square_i = i * i\n\n cur_level = [n]\n count = 0\n while cur_level:\n count += 1\n tmp = []\n for num in cur_level:\n for val in perfect_square_numbers:\n if num == val:\n return count\n if val < num:\n tmp.append(num - val)\n if val > num:\n break\n cur_level = tmp\n return count", "def count_tilings(n: int) -> int:\n if n < 5:\n # handle recursive base case\n return 2**(n - 1)\n else:\n # place each tile at end of row and recurse on remainder\n return (count_tilings(n - 1) +\n count_tilings(n - 2) +\n count_tilings(n - 3) +\n count_tilings(n - 4))", "def solve(n=10):\n return sum(M_N_S(n, d)[2] for d in range(10))", "def nw(n):\n return 4*n*n + 1", "def sum_of_squares(n):\n result = i = 0\n while i < n:\n result += i\n i += 1\n return result", "def sumn(n):\n return n * (n + 1) // 2", "def problem():\n size = 1001\n return sum(n**2 * 4 - 6 * n + 6 for n in range(3, size+1, 2)) + 1", "def num_of_ways(n):\n if n == 0 or n == 1:\n return 1\n \n n_minus_2_step = 1\n n_minus_1_step = 1\n n_step = None\n\n #num_of_ways(n) = num_of_ways(n-1) + num_of_ways(n-2)\n for i in range(n-1):\n n_step = n_minus_1_step + n_minus_2_step\n n_minus_2_step = n_minus_1_step\n n_minus_1_step = n_step\n \n return n_step", "def partitions(n):\n for a in range(2,n//2+1):\n yield a, n-a", "def I (self, n):", "def count_ways(n):\n if n < 0:\n return 0\n elif n == 0:\n return 1\n else:\n total = 0\n for i in range(1, min(n, 3) + 1):\n total += count_ways(n - i)\n return total", "def count_square_sums(n):\n if n == 0: return 1\n total = 4*( sum(1 for i in divisors(n) if i % 4 == 1) \n - sum(1 for i in divisors(n) if i % 4 == 3) )\n ## Remove duplicate countings if n > 0\n ## Eight duplicates: (+/-a, +/-b) (+/-b, +/-a) \n ## Four duplicates: (0,+1), (0,-1), (+1,0), (-1,0)\n ## Four duplicates: (+/-1,+/-1)\n flg = 0\n if is_square(n): flg += 1\n if is_square(n/2) and (n % 2 == 0): flg += 1\n return (total + 4*flg)/8", "def silnia_it(n):\n wynik = 1\n \n for i in range(1, n + 1):\n wynik = wynik * i\n return wynik", "def s2(n, k):\n if n == 0 or n != 0 and n == k:\n return 1\n if k == 0 or n < k:\n return 0\n return k * s2(n-1, k) + s2(n-1, k-1)", "def get_num_pairs(seq):\n n = len(seq)\n return int(n * (n-1)/2) # sum of arphmetic progression (n-1)...1" ]
[ "0.80979145", "0.7657417", "0.7441372", "0.6898625", "0.6698458", "0.66620463", "0.6619402", "0.65921396", "0.65706915", "0.6519971", "0.64716715", "0.629827", "0.62598455", "0.6244193", "0.62325424", "0.6217279", "0.6208951", "0.61819106", "0.60444623", "0.6036394", "0.60297346", "0.602797", "0.60179985", "0.6007097", "0.6006026", "0.6002901", "0.5990403", "0.5975867", "0.59650207", "0.5960061" ]
0.7815618
1
Evaluate the Borehole function on a set of input values.
def evaluate(xx: np.ndarray) -> np.ndarray: # Compute the Borehole function nom = 2 * np.pi * xx[:, 2] * (xx[:, 3] - xx[:, 5]) denom_1 = np.log(xx[:, 1] / xx[:, 0]) denom_2 = ( 2 * xx[:, 6] * xx[:, 2] / (np.log(xx[:, 1] / xx[:, 0]) * xx[:, 0] ** 2 * xx[:, 7]) ) denom_3 = xx[:, 2] / xx[:, 4] yy = nom / (denom_1 * (1 + denom_2 + denom_3)) return yy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _computeValueFunction(self, nbDims, low, high, retstep=False):\n # algorithms performing in discrete space will have a discrete\n # value function that cannot be evaluated at any point - only on the\n # ones for which they have been setup based on the problem it has been\n # setup to solve\n def __round(vec):\n return tuple(int(x) for x in vec)\n\n def __notround(vec):\n return vec\n\n _round = __notround\n if self._algo.DOMAIN['state'] == Spaces.Discrete:\n _round = __round\n\n allParams, stepSizes = self._discretizer.discretize(retstep=True)\n\n allActions = self._problem.getActionsList()\n reducer = max if self.reducer == 'max' else mean\n\n # returns a list\n data = [\n utils.extends({\n key: state[k]\n for k, key in enumerate(self.getKeys(nbDims))\n }, z=reducer([\n self._algo.actionValue(_round(state), action)\n for action in allActions]))\n for state in allParams\n ]\n if retstep:\n return data, stepSizes\n return data", "def bayes(j, x, p, q, r):\n tmp = []\n P = [0.653, 0.347]\n c = 2\n for k in range(c):\n res = conditional(x, k, p, q, r) * P[k]\n tmp.append(res)\n num = conditional(x, j, p, q, r) * P[j] * 1.0\n denom = sum(tmp)\n bt = num / denom\n return bt", "def EvaluateFunction(self, p_float=..., p_float=..., p_float=...):\n ...", "def evaluate_function(self, trajectory):\n objective_values_by_tag = self.evaluate_function_by_objective(trajectory)\n objective_function_values = 0.\n objective_distance_to_goal = 0.\n objective_function_values_init = 0.\n\n reachability_cost = False\n\n # Judge if we are using reachability cost\n for tag, objective_values in objective_values_by_tag:\n if tag == 'reach_avoid_4d':\n reachability_cost = True\n\n # No freezing cost!\n if reachability_cost:\n for tag, objective_values in objective_values_by_tag:\n if tag == 'reach_avoid_4d' or 'avoid_4d':\n objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n else:\n for tag, objective_values in objective_values_by_tag:\n objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n\n # ## Freeze the sum of 2 costs, at the minimum of the sum of cost\n # if reachability_cost:\n # for tag, objective_values in objective_values_by_tag:\n # if tag == 'reach_avoid_4d' or 'avoid_4d':\n # objective_function_values += objective_values\n # if tag == 'distance_to_goal':\n # objective_distance_to_goal += objective_values\n # try:\n # ## Freeze_v1, freeze at the minimum cost of sum: reach_avoid + avoid\n # # objective_function_values = self._freeze_cost_v1(objective_function_values, objective_distance_to_goal)\n #\n # # Freeze v2, freeze at the minimum cost of only reach_avoid\n # objective_function_values = self._freeze_cost_v2(objective_values_by_tag, objective_function_values, objective_distance_to_goal)\n # objective_function_values = self._reduce_objective_values(trajectory, objective_function_values)\n # except ValueError:\n # print(\"cannot freeze in total cost\")\n # objective_function_values = self._reduce_objective_values(trajectory, objective_function_values)\n # else:\n # for tag, objective_values in objective_values_by_tag:\n # objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n\n # ## Freeze the sum of 2 costs, at the minimum of the reach_avoid cost\n # if reachability_cost:\n # for tag, objective_values in objective_values_by_tag:\n # if tag == 'reach_avoid_4d' or 'avoid_4d':\n # objective_function_values = self._freeze_cost_v2(objective_values, objective_distance_to_goal)\n # objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n # else:\n # for tag, objective_values in objective_values_by_tag:\n # objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n\n return objective_function_values", "def approching_blackhole():\n blackhole = BlackHole()\n Rs = 8.0\n D_list = np.round(10**np.linspace(np.log10(50), np.log10(100000), 30))\n blackhole.open(blackhole.img_name, size=2000)\n\n for D in D_list:\n blackhole.compute(Rs, D)\n blackhole.img_save()", "def at_value(self, *value):\n result = 0\n for x in value:\n i = 0\n for coef in self.arg:\n result = result + coef * (x ** i)\n i = i + 1\n result = -result\n return -result", "def evaluate(self, signal_values: Array) -> Array:\n pass", "def evaluate(self, blackboard):\n pass", "def f(m, x, b):\n return m*x + b", "def pwBC(self, V, string, homogenize=False, value=None):\n value = self._getvalue(string, value)\n #print \"DEBUG:\", value\n if isinstance(value, dict):\n # TODO: get rid of homogenize (it is only used for PB and is ugly, because assumes scalar pde)\n if homogenize:\n value = {key: Constant(0.) for key in value}\n bcs = []\n for key, val in value.items():\n if val is not None:\n if isinstance(val, GenericFunction):\n #print type(val)\n #print val.__class__\n bcs.append(self.BC(V, val, key))\n elif isinstance(val, float) or isinstance(val, tuple):\n bcs.append(self.BC(V, Constant(val), key))\n else:\n dolfin_error(__name__+\".py\",\n \"assign boundary condition\",\n \"Value on '%s' for the BC '%s' is of unexpected type %s\" % (key, string, type(val)))\n return bcs\n else: # try to assign value on whole boundary\n if homogenize:\n value = Constant(0.)\n if value is None:\n return []\n elif isinstance(value, GenericFunction):\n bc = self.BC(V, value)\n elif isinstance(val, float) or isinstance(val, tuple):\n bc = self.BC(V, Constant(value))\n else:\n dolfin_error(__name__+\".py\",\n \"assign boundary condition\",\n \"Value for the BC '%s' is of unexpected type '%s'.\" % (string, type(val)))\n return [bc]", "def constant_folding(self, *args):\n return self._executor.constant_folding(*args)", "def bealefcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n assert n == 2, \"Beale's function is only defined on a 2D space.\"\n X = x[:, 0]\n Y = x[:, 1]\n\n scores = (\n (1.5 - X + (X * Y)) ** 2\n + (2.25 - X + (X * (Y**2))) ** 2\n + (2.625 - X + (X * (Y**3))) ** 2\n )\n\n return scores", "def evaluate(self, **kwargs) -> bool or [bool]:\n try:\n ready, variables, values = self._receive_values(**kwargs)\n answer = list()\n args = self.arguments[0].evaluate(ready=ready, values=(variables, values))\n for a in args:\n answer.append(self.function(a))\n return answer\n except ValueError:\n return self.function(self.arguments[0].evaluate())", "def eval_f(f, xs):\n l = []\n for x in xs:\n l.append(f(x))\n return l", "def _call(self, x):\n u = functional.tmp_u_prox\n v = functional.tmp_v_prox\n\n # Running generalized Sinkhorn iterations\n for j in range(functional.niter):\n # Safe-guarded u-update, to avoid divide-by-zero error.\n u_old = u.copy()\n tmp1 = functional.K_op(v)\n if np.min(tmp1) < 1e-30 or np.max(tmp1) > 1e+50:\n print('Numerical instability, truncation in Transport prox (Kv)',\n str(np.min(tmp1)), str(np.max(tmp1)))\n\n tmp = np.fmax(tmp1, 1e-30)\n\n\n u = functional.mu0 / tmp\n if np.min(u) < 1e-30 or np.max(u) > 1e+50:\n print('u (min/max)', str(np.min(u)), str(np.max(u)))\n\n # Safe-guarded v-update, to avoid divide-by-zero error.\n v_old = v.copy()\n\n tmp3 = functional.K_op_adjoint(u)\n if np.min(tmp3) < 1e-30 or np.max(tmp3) > 1e+50:\n print('Truncation in Transport prox (KTu)',\n str(np.min(tmp3)), str(np.max(tmp3)))\n print('u (min/max)', str(np.min(u)), str(np.max(u)))\n\n tmp4 = (self.const * tmp3 * np.exp(self.const * x))\n\n if np.min(tmp4) < 1e-30 or np.max(tmp4) > 1e+200:\n print('Argument in lambdert omega (min/max)',\n str(np.min(tmp4)), str(np.max(tmp4)))\n\n v = np.exp(self.const * x - lambertw_fulfix(tmp4))\n\n v1 = np.exp(self.const * x - scipy.special.lambertw(\n tmp4))\n if (v-v1).norm() > 1e-10:\n print('diff pga ny lambderw omega funciton',\n str((v-v1).norm()))\n print('v (min/max)', str(np.min(v)), str(np.max(v)))\n print('Argument in lambdert omega (min/max)',\n str(np.min(tmp4)), str(np.max(tmp4)))\n\n # If the updates in both u and v are small, break the loop\n if ((np.log(v)-np.log(v_old)).norm() < 1e-8 and\n (np.log(u)-np.log(u_old)).norm() < 1e-8):\n break\n\n # Store the u and v in the internal temporary variables of the\n # functional\n functional.tmp_u_prox = u\n functional.tmp_v_prox = v\n\n return x - self.sigma * functional.epsilon * np.log(v)", "def bws(x, y, **kwargs):\n\tx.sort()\n\ty.sort()\n\tnpx = np.array(x)\n\tnpy = np.array(y)\n\n\txs = np.unique(npx)\n\tys = np.unique(npy)\n\txys = set(xs).union(set(ys))\n\taxy = np.array(list(xys))\n\taxy.sort()\n\n\tG = np.array([len(axy[np.where(axy <= xi)]) for xi in xs])\n\tH = np.array([len(axy[np.where(axy <= yi)]) for yi in ys])\n\n\tn = len(G)\n\tm = len(H)\n\tfn = float(n)\n\tfm = float(m)\n\n\tN = np.linspace(1,n,num=n)\n\tM = np.linspace(1,m,num=m)\n\n\txt1 = np.power(G - N*(fm + fn)/fn, 2.0)\n\txtt = N/(fn+1.0)\n\txt2 = xtt*(1 - xtt)*(fm * (fm+fn)/fn)\n\tBx = np.sum(xt1/xt2)/fn\n\t\n\tyt1 = np.power(H - M*(fm + fn)/fm, 2.0)\n\tytt = M/(fm+1.0)\n\tyt2 = ytt*(1 - ytt)*(fn * (fm+fn)/fm)\n\tBy = np.sum(yt1/yt2)/fm\n\n\tB = (Bx+By)/2.0\n\n\tprint \"B = \", B\n\t\n\tJ = 3\n\tif \"j\" in kwargs:\n\t\tJ = kwargs[\"j\"]\n\t\n\treturn compute_xi(B, J)", "def part1b_1():\n xs = exampleInput\n backward = submission.computeBackward(simpleCRF, xs)\n for i in xrange(len(xs)):\n grader.requireIsEqual( 1.0, sum( backward[i].values() ) )", "def multiple_eval_for_loops_v1():", "def evaluate(self, board):", "def part1b_0():\n xs = exampleInput\n _, forward = submission.computeForward(simpleCRF, xs)\n for i in xrange(len(xs)):\n grader.requireIsEqual( 1.0, sum( forward[i].values() ) )", "def __call__(self,x):\n\n arr = np.array(x,copy=False,dtype=float)\n return self._filterfunc(arr,*self.parvals)", "def evaluate_model(points, function):\n function_values = np.zeros((len(points), 1))\n for i in range(0, len(points)):\n function_values[i,0] = function(points[i,:])\n return function_values", "def evalfunc(c, flist, x):\n return sum([c[i] * flist[i](x) for i in range(len(flist))])", "def evaluate(self, batch_x, batch_y):\n raise NotImplementedError()", "def boothfcn(x: np.ndarray) -> np.ndarray:\n\n n = x.shape[1]\n assert n == 2, \"Booth's function is only defined on a 2D space.\"\n\n X = x[:, 0]\n Y = x[:, 1]\n\n scores = (X + 2 * Y - 7) ** 2 + (2 * X + Y - 5) ** 2\n\n return scores", "def evaluate(self, X):\n\n\t\tpass", "def evaluate(self, *args, **kwargs):\n params = self.process_args(args, kwargs)\n a = params['a']\n b = params['b']\n return a * self.x + b", "def evaluate(self, input):\n\t\treturn self.function(np.dot(self.weights, np.array([-1] + list(input))))", "def multiple_eval_for_loops_v2():", "def eval_func(individual):\n \n tiled = np.tile(individual, (tile_factor, tile_factor))\n return calculate_force_on_sample(tiled, lam_frac_=lambda_factor)" ]
[ "0.5320009", "0.5202257", "0.5186564", "0.5178047", "0.5152033", "0.5131482", "0.5117862", "0.5089781", "0.50886065", "0.506275", "0.5042652", "0.5041347", "0.50235534", "0.50224406", "0.50089675", "0.4991437", "0.49813336", "0.49782556", "0.4977974", "0.4956625", "0.4945631", "0.4938334", "0.49292892", "0.49179026", "0.49123064", "0.49069476", "0.49046135", "0.4887199", "0.48773643", "0.48649344" ]
0.6051596
0
Returns the weak classifier that produces the least error for a given training set
def _fetch_best_weak_classifier(self, weighted_patches): min_error = 2. print "Training and measuring error for %d classifiers" % len(self.all_classifiers), dec = .05 i = 0 for wc in self.all_classifiers: i += 1 wc.train(weighted_patches) if wc.error < min_error: min_error = wc.error ret = wc if i > dec * len(self.all_classifiers): dec += .05 print ".", print "[DONE]" return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trainWeakClassifier(trainingSamples, weights, feature):\n #compute feature values\n featureValues = []\n positiveOrNegative = []\n for sample in trainingSamples:\n featureValues.append(feature.computeScore(sample[0], 0, 0))\n positiveOrNegative.append(sample[1])\n \n #zip with weights and sort by feature value\n featureValues = zip(featureValues, weights, positiveOrNegative)\n featureValues = sorted(featureValues, key=lambda tup: tup[0])\n \n #sum all weights of the positive and negative samples\n negativeWeightsTotal = 0\n positiveWeightsTotal = 0\n for value in featureValues:\n if value[2] == 1:\n positiveWeightsTotal += value[1]\n else:\n negativeWeightsTotal += value[1]\n \n #find the feature with the smallest error\n bestFeatureIndex = 0\n bestFeatureError = 1e10\n negativeWeightsSoFar = 0\n positiveWeightsSoFar = 0\n positiveOnTheLeft = 0\n positivesTotal = 0\n for i in range(0, len(featureValues)):\n error1 = positiveWeightsSoFar-negativeWeightsSoFar+negativeWeightsTotal\n error2 = negativeWeightsSoFar-positiveWeightsSoFar+positiveWeightsTotal\n error = min([error1, error2])\n \n if bestFeatureError > error:\n bestFeatureError = error\n bestFeatureIndex = i\n positiveOnTheLeft = positivesTotal\n \n if featureValues[i][2] == 1:\n positiveWeightsSoFar += featureValues[i][1]\n positivesTotal += 1\n else:\n negativeWeightsSoFar += featureValues[i][1]\n \n #count how much samples are there on the right\n positiveOnTheRight = positivesTotal - positiveOnTheLeft\n \n #determine the polarity and threshold\n polarity = -1\n threshold = featureValues[bestFeatureIndex][0]\n if positiveOnTheLeft > positiveOnTheRight:\n polarity = 1\n else:\n polarity = -1\n \n #build and return a weak classifier\n return WeakClassifier(feature, threshold, polarity)", "def find_best_classifier(data, possible_classifiers, target_classifier):\n best_disorder_score = 10000000\n best_classifier = None\n try:\n for classifier in possible_classifiers:\n total_disorder = average_test_disorder(data, classifier, target_classifier)\n if total_disorder < best_disorder_score:\n best_classifier = classifier\n best_disorder_score = total_disorder\n else:\n pass\n if best_classifier!=None:\n branches = split_on_classifier(data, best_classifier)\n if len(branches) == 1:\n raise NoGoodClassifiersError\n else:\n return best_classifier\n except Exception as e:\n raise NoGoodClassifiersError", "def pick_best_classifier(classifier_to_error_rate, use_smallest_error=True):\n best_classifier = None\n if use_smallest_error:\n best_classifier = min(classifier_to_error_rate, key=classifier_to_error_rate.get)\n else:\n best_classifier = max(classifier_to_error_rate, key=lambda x : abs(classifier_to_error_rate[x]-0.5))\n\n if make_fraction(classifier_to_error_rate[best_classifier]) == make_fraction(1,2):\n raise NoGoodClassifiersError\n\n #find a classifier that comes before this one alphabetically\n for c in classifier_to_error_rate:\n if use_smallest_error and classifier_to_error_rate[c] == classifier_to_error_rate[best_classifier]:\n if c < best_classifier:\n best_classifier = c\n if not use_smallest_error:\n error = make_fraction(abs(classifier_to_error_rate[best_classifier] - 0.5))\n check_error = make_fraction(abs(classifier_to_error_rate[c] -0.5))\n if error == check_error:\n if c < best_classifier:\n best_classifier = c\n return best_classifier", "def _classifier(self, test_set):\r\n return self._mahalanobis_classifier(test_set.features, self.targets)", "def clacError(classifier,weightedSet):\n\terror = 0\n\tfor ex in weightedSet:\n\t\tif classifier.test(ex) != ex.label:\n\t\t\terror += ex.weight\n\treturn error", "def _classifier(self, test_set):\r\n return self._euclidian_classifier(test_set.features, test_set.targets)", "def classifier_set(tuning=False):\n\tif tuning==False:\n\t\tclassifiers = [\n\t\t\tKNeighborsClassifier(50),\n\t\t\tSVC(kernel=\"linear\", C=0.025, probability=True),\n\t\t\tSVC(gamma=1, C=1, probability=True),\n\t\t\tGaussianProcessClassifier(1.0 * RBF(1.0)),\n\t\t\tDecisionTreeClassifier(criterion= 'entropy', min_samples_leaf= 30, min_samples_split= 10, splitter= 'random'),\n\t\t\tRandomForestClassifier(n_estimators=50, min_samples_leaf=30, min_samples_split=2),\n\t\t\tMLPClassifier(early_stopping=True, hidden_layer_sizes=100,learning_rate_init=0.1),\n\t\t\tAdaBoostClassifier(n_estimators= 50),\n\t\t\tGaussianNB(),\n\t\t\tLogisticRegression()\n\t\t\t]\n\t\tnames = [\"KNN\",\n\t\t\t \t\"L SVM\",\n\t\t\t \t\"RBF SVM\", \n\t\t\t \t\"GP\",\n\t\t\t\t\"DT\",\n\t\t\t\t\"RF\",\n\t\t\t\t\"NN\", \n\t\t\t\t\"AB\",\n\t\t\t\t\"NB\",\n\t\t\t\t\"LR\"\n\t\t\t\t]\n\treturn classifiers, names", "def run_weak_classifier(x: np.ndarray, c: svm.SVC) -> int:\n x = x.reshape((1, 36))\n return 1 if c.predict(x)[0] == 1 else 0", "def _validateClassification(self, trainingSet):\n wrongCount = 0.\n\n pv = []\n tv = []\n\n if self.K == 1:\n for example in trainingSet:\n Y = self.test(example)\n \n givenClass = example.label[0]\n if Y[0] < 0.5:\n chosenClass = 0\n else:\n chosenClass = 1\n \n pv.append(chosenClass)\n tv.append(givenClass)\n \n if chosenClass != givenClass:\n wrongCount += 1.\n else:\n for example in trainingSet:\n Y = self.test(example)\n \n posterior, chosenClass = max((x, i) for i, x in enumerate(Y))\n max_val, givenClass = max((x, i) for i, x in enumerate(example.label))\n \n pv.append(chosenClass)\n tv.append(givenClass)\n \t\t\t\n if chosenClass != givenClass:\n wrongCount += 1.\n \n return wrongCount/len(trainingSet), pv, tv", "def get_overall_misclassifications(H, training_points, classifier_to_misclassified):\n misclassified = []\n\n for p in training_points:\n score = 0\n for tup in H:\n c = tup[0]\n voting_power = tup[1]\n if p in classifier_to_misclassified[c]:\n score -= voting_power\n else:\n score += voting_power\n if score <= 0:\n misclassified.append(p)\n return set(misclassified)", "def get_best_model(x_train, y_train):\n # calculate class weights\n class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train),\n y_train)\n # convert to dict\n class_weights = dict(enumerate(class_weights))\n # encode label data\n y_train = to_categorical(y_train)\n\n return get_model(x_train, y_train, 256, 3, 'adamax', 'normal', class_weights)", "def choose_best_feature(data_set):\n feature_size = len(data_set[0]) - 1\n base_entropy = calc_entropy(data_set)\n best_info_gain = 0.0; best_feature = -1\n for i in xrange(feature_size):\n feat_list = [eg[i] for eg in data_set]\n unique_values = set(feat_list)\n new_entropy = 0.0\n for value in unique_values:\n sub_ds = splite_dataset(data_set, i, value)\n prob = len(sub_ds) / float(len(data_set))\n new_entropy += prob * calc_entropy(sub_ds)\n info_gain = base_entropy - new_entropy\n if info_gain > best_info_gain:\n best_info_gain = info_gain\n best_feature = i\n\n return best_feature", "def predict(self, test_set_):\n\n test_set = np.array(test_set_)\n n_samples, n_features = test_set.shape\n\n assert n_features == self.features\n\n single_feature = test_set[:, self.bestn]\n h = np.ones((n_samples))\n h[single_feature*self.bestd < self.bestp*self.bestd] = -1\n return h", "def MostCommonClassBaseline(training_set, test_set):\n wordtagcounts = {}\n\n for sentence in training_set:\n for (word, tag) in sentence:\n if word not in wordtagcounts:\n wordtagcounts[word] = {tag:1}\n elif tag in wordtagcounts[word]:\n wordtagcounts[word][tag] = wordtagcounts[word][tag] + 1\n elif tag not in wordtagcounts[word]:\n wordtagcounts[word][tag] = 1\n \n result_set = []\n for sentence in test_set:\n toadd = []\n for i in range(len(sentence) ):\n toadd.append( (sentence[i][0], max( wordtagcounts[ sentence[i][0] ], key=wordtagcounts[sentence[i][0] ].get ) ) )\n result_set.append( toadd )\n return result_set", "def misclassified_training_points(svm):\n wrong = []\n for point in svm.training_points:\n if point.classification is not classify(svm, point):\n wrong.append(point)\n return set(wrong)", "def classify(trainX, trainY, testX, testY):\n trainC = getClasses(trainY)\n P = estimatePosterior(trainX, trainC, testX)\n E = fit(testX, P)\n (e_rate, se, interval) = error.confidenceInterval(testY, E)\n return (P, E, e_rate, se, interval)", "def train_knn(training_data):\n return knnclassifier(training_data, keys, 3)", "def h_t(self, x, t):\n ret = 0\n strong_classifier = self.classifiers[0:t+1]\n for wc in strong_classifier:\n ret += wc.classify(x)\n return ret", "def h_t(self, x, t):\n ret = 0\n strong_classifier = self.classifiers[0:t+1]\n for wc in strong_classifier:\n ret += wc.classify(x)\n return ret", "def cross_validate(k, original_x_train, original_y_train, label, features: List[str],\n features_to_encode: List[str], balance_ratio: float,\n classifiers: List[ClassifierMixin]) \\\n -> Tuple[List[Tuple[Type[ClassifierMixin], Dict]], Type[ClassifierMixin]]:\n\n X, y = balance_train(original_x_train, original_y_train, label, balance_ratio)\n skf = StratifiedKFold(n_splits=k)\n val_scores = []\n for classifier in classifiers:\n print('Doing ', classifier.__class__)\n clf = make_pipeline(FeatureEncoder(features_to_encode, features), classifier)\n val_scores.append((classifier.__class__,\n model_selection.cross_validate(clf, X, y, scoring=('f1_weighted'),\n cv=skf, n_jobs=-1)))\n best_classifier_class = max([(mod, median(res['test_score'])) for mod, res in val_scores],\n key=lambda x: x[1])[0]\n return val_scores, best_classifier_class", "def find_best_classifier(classifiers, X_t, y_t, X_v, y_v, params, jobs):\n\n # Initialize result storage\n clfs_return = []\n train_scores = []\n test_scores = []\n\n # Loop through classifiers\n for classifier in classifiers:\n # Grid search, calibrate, and test the classifier\n classifier, train_score, test_score = train_calibrate_predict(\n classifier, X_t, y_t, X_v, y_v, params[classifier], jobs)\n\n # Append the result to storage\n clfs_return.append(classifier)\n train_scores.append(train_score)\n test_scores.append(test_score)\n\n # Return storage\n return clfs_return, train_scores, test_scores", "def train(self, X_, y_, W_):\n\n X = np.array(X_)\n y = np.array(y_)\n W = np.array(W_)\n steps = self.steps\n\n n_samples, n_features = X.shape\n assert n_samples == y.size\n\n bestn = 0\n bestd = 1\n bestp = 0\n minerr = W.sum()\n for n in range(n_features):\n err, d, p = self._optimize(X[:, n], y, W, steps)\n if err < minerr:\n minerr = err\n bestn = n\n bestd = d\n bestp = p\n \n self.features = n_features\n self.bestn = bestn\n self.bestd = bestd\n self.bestp = bestp\n\n return minerr", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier" ]
[ "0.67219883", "0.6134577", "0.6098932", "0.6068661", "0.6026129", "0.60093874", "0.5770285", "0.5695005", "0.56644756", "0.56505865", "0.5635311", "0.5626304", "0.5611976", "0.55347", "0.5491511", "0.54789203", "0.54689837", "0.54498893", "0.54498893", "0.54152846", "0.5410882", "0.5404726", "0.54002464", "0.54002464", "0.54002464", "0.54002464", "0.54002464", "0.54002464", "0.54002464", "0.54002464" ]
0.7245201
0
H_t(x) returns the summation of the responses of the first t weak classifiers.
def h_t(self, x, t): ret = 0 strong_classifier = self.classifiers[0:t+1] for wc in strong_classifier: ret += wc.classify(x) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def one_step(self, x, h):\n concatHX = torch.cat((x, h), 1)\n ft = self.sigmoid(self.linearF(concatHX))\n it = self.sigmoid(self.linearI(concatHX))\n newCt = ft*self.ct.clone() + it*self.tanh(self.linearC(concatHX))\n #self.ct = ft*self.ct.clone() + it*self.tanh(self.linearC(concatHX))\n ot = self.sigmoid(self.linearO(concatHX))\n ht = ot*self.tanh(newCt)\n self.ct = newCt\n \n return ht", "def one_step(self, x, h):\n concatHX = torch.cat((x, h), 1)\n zt = self.sigmoid(self.linearZ(concatHX))\n rt = self.sigmoid(self.linearR(concatHX))\n ht = (1-zt)*h + zt* self.tanh(self.linearH(rt*concatHX))\n return ht", "def one_step(self, x, h):\n concatHX = torch.cat((x, h), 1)\n ft = self.sigmoid(self.linearF(concatHX))\n it = self.sigmoid(self.linearI(concatHX))\n ct = ft*self.cts[-1] + it*self.tanh(self.linearC(concatHX))\n #self.ct = ft*self.ct.clone() + it*self.tanh(self.linearC(concatHX))\n ot = self.sigmoid(self.linearO(concatHX))\n ht = ot*self.tanh(ct)\n \n self.cts.append(ct)\n \n return ht", "def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = x # default output is all states\n \n return y", "def forward(self, h_prev, x_t):\n hidden_con = np.concatenate((h_prev.T, x_t.T), axis=0)\n h_next = np.tanh((np.matmul(hidden_con.T, self.Wh)) + self.bh)\n y = self.softmax((np.matmul(h_next, self.Wy)) + self.by)\n return h_next, y", "def forward(self, h_prev, x_t):\n h_concat_x = np.concatenate((h_prev.T, x_t.T), axis=0)\n h_next = np.tanh((np.matmul(h_concat_x.T, self.Wh)) + self.bh)\n y = self.softmax((np.matmul(h_next, self.Wy)) + self.by)\n\n return h_next, y", "def forward(self, h_prev, x_t):\n from scipy.special import softmax\n # softmax(arr, axis=0)\n m, i = x_t.shape\n Wi = self.Wh[:i]\n Wh = self.Wh[i:]\n cat = np.concatenate((h_prev, x_t), axis=1)\n # print('meow', cat.shape)\n h_next = np.tanh(cat @ self.Wh + self.bh)\n y = self.softmax(h_next @ self.Wy + self.by)\n return h_next, y\n\n\n\n\n\n\n\n\n\n\n\n m, i = x_t.shape\n U = self.Wh[:i]\n W = self.Wh[i:]\n x = x_t\n T = len(x_t)\n # During forward propagation we save all hidden states in s because need them later.\n # We add one additional element for the initial hidden, which we set to 0\n s = np.zeros((T + 1, len(self.Wh[:self.Wh.shape[1]]) ))\n s[-1] = np.zeros(self.Wh.shape[1])\n # The outputs at each time step. Again, we save them for later.\n o = np.zeros((T, len(self.Wh[:self.Wh.shape[1]])))\n # For each time step...\n for t in np.arange(T):\n # Note that we are indxing U by x[t]. This is the same as multiplying U with a one-hot vector.\n #s[t] = np.tanh(U[:, x_t[]] + W.dot(s[t - 1]))\n o[t] = softmax(self.V.dot(s[t]))\n return s, o\n \n m, i = x_t.shape\n Wi = self.Wh[:i]\n Wh = self.Wh[i:]\n print(\"wi\", Wi.shape, \"wh\", Wh.shape)\n print(\"wh\", self.Wh.shape, \"wy\", self.Wy.shape)\n print(\"bh\", self.bh.shape, \"by\", self.by.shape)\n print(\"xtshape\", x_t.shape, \"hprev\", h_prev.shape)\n print(\"one\", self.Wh[:i].shape)\n one = self.Wy.dot(x_t)# np.dot(x_t, Wh) # x_t.dot(self.Wh[:i])\n two = h_prev @ Wh # h_prev.dot(self.Wh[i:])\n sum = one + two\n h_next = np.tanh(sum + self.bh)\n soft = h_next @ self.Wy\n y = self.softmax(soft) # + self.by)\n return h_next, y", "def h(X, theta, n_hidden_layers=1):\n _, a = feed_forward(X, theta, n_hidden_layers)\n L = n_hidden_layers + 1 # last layer\n\n hypothesis = a[L]\n return hypothesis", "def forward(self, h_prev, x_t):\r\n m, i = x_t.shape\r\n _, h = h_prev.shape\r\n x_ht = np.hstack((h_prev, x_t))\r\n h_next = np.tanh(np.matmul(x_ht, self.Wh) + self.bh)\r\n y_n = np.matmul(h_next, self.Wy) + self.by\r\n y = self.softmax(y_n)\r\n return (h_next, y)", "def ifht(x_hat, N):\n y = (1.0/N)*fht(x_hat,N)\n return y", "def rnn_one_step(x_t, h_t):\n\n # Convert character id into embedding.\n x_t_emb = embed_x(tf.reshape(x_t,[-1,1]))[:,0]\n \n # Concatenate x embedding and previous h state.\n x_and_h = concatenate([x_t_emb, h_t])\n \n # Compute next state given x_and_h.\n h_next = get_h_next(x_and_h)\n #print(h_next.get_shape().as_list())\n \n # Get probabilities for language model P(x_next|h_next).\n output_probas = get_probas(h_next)\n \n return output_probas,h_next", "def forward(self, y, h):\n y = y.transpose(1, 0)\n\n t = self.linear_in(h)\n target = self.linear_in(h).unsqueeze(2) # batch x dim x 1\n\n # Get attention\n attn = torch.bmm(y, target).squeeze(2) # batch x T\n attn = F.softmax(attn, dim=1)\n attn3 = attn.view(attn.size(0), 1, attn.size(1)) # batch x 1 x T\n\n weighted_y = torch.bmm(attn3, y).squeeze(1) # batch x dim\n h_tilde = torch.cat((weighted_y, h), 1)\n\n h_tilde = torch.tanh(self.linear_out(h_tilde))\n\n return h_tilde, attn", "def get_h0(self, t):\n return self.h0 * np.sin(2 * np.pi * t / self.Pmod + self.Pmod_phi)", "def definitive_hotT_method():\n\tT, Xs = load_manticore(soln_2p_plus046, frames=(1, 5))\n\thist_limits = (10, 20)\n\thistxy, stats = gen_hist_and_stats(T,\n\t\t(mtc.get_pacs_mask() & (Xs < 1)),\n\t\tx_lim=hist_limits,\n\t\tsetting=-2\n\t)\n\thot_T = stats[0]\n\terr_hotT = stats[1]\n\treturn hot_T, err_hotT", "def h_T(self, z0):\n # Get the governing variables\n (B, N, u_slip, u_inf) = self.get_variables(z0, 0.)\n \n # Compute U_N\n U_N = u_slip / (B * N)**(1./4.)\n \n # Compute the correlation equation\n return 2.9 * np.exp(-(U_N - 1.0)**2 / 28.09) * (B / N**3)**(1./4.)", "def one_step(i_t, h_tm1):\n h_t = self.activation(T.dot(i_t, self.W) + T.dot(h_tm1, self.W_rec) + self.b)\n return h_t", "def activation(h):\n\n if(h > 0):\n return 1\n\n else:\n return 0", "def forward_pass(self, h_tm1, x_t): # Function though to be used by tf.scan\n\n # Convert vector-tensor form into matrix-tensor form\n x_t = tf.reshape(x_t, shape=[1, -1])\n h_tm1 = tf.reshape(h_tm1, shape=[1, -1])\n\n # Definitions of z_t and r_t\n z_t = tf.sigmoid(tf.matmul(x_t, self.Wz) + tf.matmul(h_tm1, self.Uz) + self.bz)\n r_t = tf.sigmoid(tf.matmul(x_t, self.Wr) + tf.matmul(h_tm1, self.Ur) + self.br)\n\n # Definition of h~_t\n h_proposal = tf.tanh(tf.matmul(x_t, self.Wh) + tf.matmul(tf.multiply(r_t, h_tm1), self.Uh) + self.bh)\n\n # Compute the next hidden state\n h_t = tf.multiply(1 - z_t, h_tm1) + tf.multiply(z_t, h_proposal)\n\n return tf.squeeze(h_t)", "def __call__(self, h):\n\n Wh = self.W(h)\n p_yt = F.log_softmax(Wh) # should be (B x V)\n\n return p_yt", "def sigmoid(t):\n sig=np.exp(t)/(1+np.exp(t))\n return sig", "def get_h_given_v_samples(self, x):\n \n sig_input = T.dot(x, self.W) + self.bhid\n \n sig_output= T.nnet.sigmoid(sig_input)\n \n sample = self.theano_rand_gen.binomial(size= sig_output.shape,\n n=1, \n p= sig_output,\n dtype=theano.config.floatX)\n \n return [sig_input, sig_output, sample]", "def h(x):\n h = -x*np.math.log(x, 2) - (1 - x)*np.math.log(1 - x, 2)\n return h", "def get_h0(self, t):\n return self.h0", "def cost_h(self, node, xg):\n h = (node.state - xg).T @ self.Q @ (node.state - xg)\n return float(h)", "def sigmoid(t):\n return np.exp(t)/(1+np.exp(t))", "def predict_boosting_example(x, h_ens):\r\n\r\n arr = []\r\n sum_alpha = 0\r\n\r\n for y in h_ens:\r\n # splitting hypothesis, weight pairs\r\n alpha, tree = h_ens[y]\r\n tst_pred = predict_example(x, tree)\r\n # appending prediction\r\n arr.append(tst_pred*alpha)\r\n sum_alpha += alpha\r\n predict_egz = np.sum(arr) / sum_alpha\r\n # weak learner\r\n if predict_egz >= 0.5:\r\n return 1\r\n else:\r\n return 0", "def sigmoid(t):\n return 1 / (1 + np.exp(-t))", "def sigmoid(t):\n\n return 1.0 / (1.0 + np.exp(-t))", "def response(x, t):\n xdot = [\"\", \"\"] # first and second derivative of x\n e = 10 # epsilon\n xdot[0] = x[0]\n xdot[1] = x[0] /(t[1]- t[0]) \n return xdot[1] - e(1 - x[0]**2) * xdot[0] + x[0]", "def H(t, args):\n\n f0 = args['f0']\n n = args['n']\n omega = args['omega']\n omegaDt = args['omegaDt']\n omegaArgs = args['omegaArgs']\n\n ad = create(n)\n a = destroy(n)\n # H0, for the first two terms see Silveri 2017 Quantum_systems_under_frequency_modulation\n ham = omega(t, omegaArgs)*(ad*a+0.5*qeye(n))\n # additional term because of w(t) not constant\n ham += 1j/4*omegaDt(t, omegaArgs)/omega(t, omegaArgs)*(a*a-ad*ad)\n # Force term (9**10^-9 = x0, extent of ground state wave function), see Wittmann diss\n # with compensation term -f0/w0^2 (e.g. no force in the case of no modulation)\n ham += 9*(f0/(omega(t, omegaArgs)**2) - f0/(omegaArgs[0]**2))*(ad + a)\n # ham += (9*10**-9)/(10**6)*(f0/(omega(t, omegaArgs)**2))*(ad + a)\n return(ham)" ]
[ "0.68166625", "0.67978925", "0.6778078", "0.6264831", "0.61125785", "0.60929614", "0.60755724", "0.6063902", "0.600523", "0.59320676", "0.5920946", "0.5919724", "0.58609223", "0.58361953", "0.5825794", "0.58144814", "0.579117", "0.57640105", "0.57490045", "0.5742261", "0.5742003", "0.5734985", "0.5728514", "0.57280314", "0.5727666", "0.5712966", "0.56938237", "0.56708175", "0.56579804", "0.564968" ]
0.74814725
0
H_t(x) returns the summation of the responses of the first t weak classifiers.
def h_t(self, x, t): ret = 0 strong_classifier = self.classifiers[0:t+1] for wc in strong_classifier: ret += wc.classify(x) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def one_step(self, x, h):\n concatHX = torch.cat((x, h), 1)\n ft = self.sigmoid(self.linearF(concatHX))\n it = self.sigmoid(self.linearI(concatHX))\n newCt = ft*self.ct.clone() + it*self.tanh(self.linearC(concatHX))\n #self.ct = ft*self.ct.clone() + it*self.tanh(self.linearC(concatHX))\n ot = self.sigmoid(self.linearO(concatHX))\n ht = ot*self.tanh(newCt)\n self.ct = newCt\n \n return ht", "def one_step(self, x, h):\n concatHX = torch.cat((x, h), 1)\n zt = self.sigmoid(self.linearZ(concatHX))\n rt = self.sigmoid(self.linearR(concatHX))\n ht = (1-zt)*h + zt* self.tanh(self.linearH(rt*concatHX))\n return ht", "def one_step(self, x, h):\n concatHX = torch.cat((x, h), 1)\n ft = self.sigmoid(self.linearF(concatHX))\n it = self.sigmoid(self.linearI(concatHX))\n ct = ft*self.cts[-1] + it*self.tanh(self.linearC(concatHX))\n #self.ct = ft*self.ct.clone() + it*self.tanh(self.linearC(concatHX))\n ot = self.sigmoid(self.linearO(concatHX))\n ht = ot*self.tanh(ct)\n \n self.cts.append(ct)\n \n return ht", "def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = x # default output is all states\n \n return y", "def forward(self, h_prev, x_t):\n hidden_con = np.concatenate((h_prev.T, x_t.T), axis=0)\n h_next = np.tanh((np.matmul(hidden_con.T, self.Wh)) + self.bh)\n y = self.softmax((np.matmul(h_next, self.Wy)) + self.by)\n return h_next, y", "def forward(self, h_prev, x_t):\n h_concat_x = np.concatenate((h_prev.T, x_t.T), axis=0)\n h_next = np.tanh((np.matmul(h_concat_x.T, self.Wh)) + self.bh)\n y = self.softmax((np.matmul(h_next, self.Wy)) + self.by)\n\n return h_next, y", "def forward(self, h_prev, x_t):\n from scipy.special import softmax\n # softmax(arr, axis=0)\n m, i = x_t.shape\n Wi = self.Wh[:i]\n Wh = self.Wh[i:]\n cat = np.concatenate((h_prev, x_t), axis=1)\n # print('meow', cat.shape)\n h_next = np.tanh(cat @ self.Wh + self.bh)\n y = self.softmax(h_next @ self.Wy + self.by)\n return h_next, y\n\n\n\n\n\n\n\n\n\n\n\n m, i = x_t.shape\n U = self.Wh[:i]\n W = self.Wh[i:]\n x = x_t\n T = len(x_t)\n # During forward propagation we save all hidden states in s because need them later.\n # We add one additional element for the initial hidden, which we set to 0\n s = np.zeros((T + 1, len(self.Wh[:self.Wh.shape[1]]) ))\n s[-1] = np.zeros(self.Wh.shape[1])\n # The outputs at each time step. Again, we save them for later.\n o = np.zeros((T, len(self.Wh[:self.Wh.shape[1]])))\n # For each time step...\n for t in np.arange(T):\n # Note that we are indxing U by x[t]. This is the same as multiplying U with a one-hot vector.\n #s[t] = np.tanh(U[:, x_t[]] + W.dot(s[t - 1]))\n o[t] = softmax(self.V.dot(s[t]))\n return s, o\n \n m, i = x_t.shape\n Wi = self.Wh[:i]\n Wh = self.Wh[i:]\n print(\"wi\", Wi.shape, \"wh\", Wh.shape)\n print(\"wh\", self.Wh.shape, \"wy\", self.Wy.shape)\n print(\"bh\", self.bh.shape, \"by\", self.by.shape)\n print(\"xtshape\", x_t.shape, \"hprev\", h_prev.shape)\n print(\"one\", self.Wh[:i].shape)\n one = self.Wy.dot(x_t)# np.dot(x_t, Wh) # x_t.dot(self.Wh[:i])\n two = h_prev @ Wh # h_prev.dot(self.Wh[i:])\n sum = one + two\n h_next = np.tanh(sum + self.bh)\n soft = h_next @ self.Wy\n y = self.softmax(soft) # + self.by)\n return h_next, y", "def h(X, theta, n_hidden_layers=1):\n _, a = feed_forward(X, theta, n_hidden_layers)\n L = n_hidden_layers + 1 # last layer\n\n hypothesis = a[L]\n return hypothesis", "def forward(self, h_prev, x_t):\r\n m, i = x_t.shape\r\n _, h = h_prev.shape\r\n x_ht = np.hstack((h_prev, x_t))\r\n h_next = np.tanh(np.matmul(x_ht, self.Wh) + self.bh)\r\n y_n = np.matmul(h_next, self.Wy) + self.by\r\n y = self.softmax(y_n)\r\n return (h_next, y)", "def ifht(x_hat, N):\n y = (1.0/N)*fht(x_hat,N)\n return y", "def rnn_one_step(x_t, h_t):\n\n # Convert character id into embedding.\n x_t_emb = embed_x(tf.reshape(x_t,[-1,1]))[:,0]\n \n # Concatenate x embedding and previous h state.\n x_and_h = concatenate([x_t_emb, h_t])\n \n # Compute next state given x_and_h.\n h_next = get_h_next(x_and_h)\n #print(h_next.get_shape().as_list())\n \n # Get probabilities for language model P(x_next|h_next).\n output_probas = get_probas(h_next)\n \n return output_probas,h_next", "def forward(self, y, h):\n y = y.transpose(1, 0)\n\n t = self.linear_in(h)\n target = self.linear_in(h).unsqueeze(2) # batch x dim x 1\n\n # Get attention\n attn = torch.bmm(y, target).squeeze(2) # batch x T\n attn = F.softmax(attn, dim=1)\n attn3 = attn.view(attn.size(0), 1, attn.size(1)) # batch x 1 x T\n\n weighted_y = torch.bmm(attn3, y).squeeze(1) # batch x dim\n h_tilde = torch.cat((weighted_y, h), 1)\n\n h_tilde = torch.tanh(self.linear_out(h_tilde))\n\n return h_tilde, attn", "def get_h0(self, t):\n return self.h0 * np.sin(2 * np.pi * t / self.Pmod + self.Pmod_phi)", "def definitive_hotT_method():\n\tT, Xs = load_manticore(soln_2p_plus046, frames=(1, 5))\n\thist_limits = (10, 20)\n\thistxy, stats = gen_hist_and_stats(T,\n\t\t(mtc.get_pacs_mask() & (Xs < 1)),\n\t\tx_lim=hist_limits,\n\t\tsetting=-2\n\t)\n\thot_T = stats[0]\n\terr_hotT = stats[1]\n\treturn hot_T, err_hotT", "def h_T(self, z0):\n # Get the governing variables\n (B, N, u_slip, u_inf) = self.get_variables(z0, 0.)\n \n # Compute U_N\n U_N = u_slip / (B * N)**(1./4.)\n \n # Compute the correlation equation\n return 2.9 * np.exp(-(U_N - 1.0)**2 / 28.09) * (B / N**3)**(1./4.)", "def one_step(i_t, h_tm1):\n h_t = self.activation(T.dot(i_t, self.W) + T.dot(h_tm1, self.W_rec) + self.b)\n return h_t", "def activation(h):\n\n if(h > 0):\n return 1\n\n else:\n return 0", "def forward_pass(self, h_tm1, x_t): # Function though to be used by tf.scan\n\n # Convert vector-tensor form into matrix-tensor form\n x_t = tf.reshape(x_t, shape=[1, -1])\n h_tm1 = tf.reshape(h_tm1, shape=[1, -1])\n\n # Definitions of z_t and r_t\n z_t = tf.sigmoid(tf.matmul(x_t, self.Wz) + tf.matmul(h_tm1, self.Uz) + self.bz)\n r_t = tf.sigmoid(tf.matmul(x_t, self.Wr) + tf.matmul(h_tm1, self.Ur) + self.br)\n\n # Definition of h~_t\n h_proposal = tf.tanh(tf.matmul(x_t, self.Wh) + tf.matmul(tf.multiply(r_t, h_tm1), self.Uh) + self.bh)\n\n # Compute the next hidden state\n h_t = tf.multiply(1 - z_t, h_tm1) + tf.multiply(z_t, h_proposal)\n\n return tf.squeeze(h_t)", "def __call__(self, h):\n\n Wh = self.W(h)\n p_yt = F.log_softmax(Wh) # should be (B x V)\n\n return p_yt", "def get_h_given_v_samples(self, x):\n \n sig_input = T.dot(x, self.W) + self.bhid\n \n sig_output= T.nnet.sigmoid(sig_input)\n \n sample = self.theano_rand_gen.binomial(size= sig_output.shape,\n n=1, \n p= sig_output,\n dtype=theano.config.floatX)\n \n return [sig_input, sig_output, sample]", "def sigmoid(t):\n sig=np.exp(t)/(1+np.exp(t))\n return sig", "def h(x):\n h = -x*np.math.log(x, 2) - (1 - x)*np.math.log(1 - x, 2)\n return h", "def get_h0(self, t):\n return self.h0", "def cost_h(self, node, xg):\n h = (node.state - xg).T @ self.Q @ (node.state - xg)\n return float(h)", "def sigmoid(t):\n return np.exp(t)/(1+np.exp(t))", "def predict_boosting_example(x, h_ens):\r\n\r\n arr = []\r\n sum_alpha = 0\r\n\r\n for y in h_ens:\r\n # splitting hypothesis, weight pairs\r\n alpha, tree = h_ens[y]\r\n tst_pred = predict_example(x, tree)\r\n # appending prediction\r\n arr.append(tst_pred*alpha)\r\n sum_alpha += alpha\r\n predict_egz = np.sum(arr) / sum_alpha\r\n # weak learner\r\n if predict_egz >= 0.5:\r\n return 1\r\n else:\r\n return 0", "def sigmoid(t):\n return 1 / (1 + np.exp(-t))", "def sigmoid(t):\n\n return 1.0 / (1.0 + np.exp(-t))", "def response(x, t):\n xdot = [\"\", \"\"] # first and second derivative of x\n e = 10 # epsilon\n xdot[0] = x[0]\n xdot[1] = x[0] /(t[1]- t[0]) \n return xdot[1] - e(1 - x[0]**2) * xdot[0] + x[0]", "def H(t, args):\n\n f0 = args['f0']\n n = args['n']\n omega = args['omega']\n omegaDt = args['omegaDt']\n omegaArgs = args['omegaArgs']\n\n ad = create(n)\n a = destroy(n)\n # H0, for the first two terms see Silveri 2017 Quantum_systems_under_frequency_modulation\n ham = omega(t, omegaArgs)*(ad*a+0.5*qeye(n))\n # additional term because of w(t) not constant\n ham += 1j/4*omegaDt(t, omegaArgs)/omega(t, omegaArgs)*(a*a-ad*ad)\n # Force term (9**10^-9 = x0, extent of ground state wave function), see Wittmann diss\n # with compensation term -f0/w0^2 (e.g. no force in the case of no modulation)\n ham += 9*(f0/(omega(t, omegaArgs)**2) - f0/(omegaArgs[0]**2))*(ad + a)\n # ham += (9*10**-9)/(10**6)*(f0/(omega(t, omegaArgs)**2))*(ad + a)\n return(ham)" ]
[ "0.6817799", "0.67995626", "0.67790145", "0.62669414", "0.61134875", "0.60938984", "0.6076991", "0.6066603", "0.600625", "0.5934693", "0.59222114", "0.5919796", "0.58628464", "0.58377874", "0.58264107", "0.58153915", "0.579338", "0.5765072", "0.5750654", "0.5745055", "0.57428974", "0.5738613", "0.5730961", "0.5730287", "0.5728649", "0.57146335", "0.56950897", "0.56719065", "0.56601316", "0.5652276" ]
0.74806714
1
Search for a book by ISBN number
def searchbook(isbn): print("Searching for isbn " + isbn + " in googlebooks...") result = _search_by_isbn(isbn) if result["totalItems"] == 0: return None b = _item2book(result["items"][0]) return b
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_book_by_isbn(isbn):\n return Book.get_book(isbn)", "def test_search_by_ISBN(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 0)\n s1.add_resource(b1)\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 1)", "def isbn_lookup(isbn):\n base = \"https://www.googleapis.com/books/v1/volumes?q=isbn=\"\n# Unfortunately we can't use the superior \"with spam as eggs\" syntax here...\n search = urlopen(base + isbn + \"&prettyprint=false\")\n lines = search.read()\n search.close()\n for bool_pair in [(\"false\", \"False\"), (\"true\", \"True\")]:\n lines = lines.replace(*bool_pair)\n volume_info = literal_eval(lines)[\"items\"][0][\"volumeInfo\"]\n title = volume_info[\"title\"]\n authors = ', '.join(a for a in volume_info[\"authors\"])\n return \"Title:\\t\\t%s\\nAuthor(s):\\t%s\" % (title, authors)", "def query(isbn):\r\n wq = WEBQuery(SERVICE_URL.format(isbn=isbn))\r\n r = wq.parse_data() if wq.check_data() else None\r\n if r:\r\n return _records(isbn, r)\r\n return r", "def book_by_isbn(ISBN):\n data = {}\n for book in root.findall('Book'):\n for elem in book:\n isbn = book.find('ISBN').text\n if isbn == ISBN:\n data['id'] = book.attrib['id']\n data[elem.tag] = elem.text\n return data", "def search(self, title=\"\", author=\"\", year=\"\", isbn=\"\"):\n self.cursor.execute(\"SELECT * FROM Book WHERE Title = ? OR Author = ? \\\n OR Year = ? OR ISBN = ?\", (title, author, year, isbn))\n rows = self.cursor.fetchall()\n return rows", "def search_book():\n\n title = request.form.get(\"search\")\n books = book_search_results(GR_KEY, title)\n acct = get_current_account(session['acct'])\n search = True\n\n return render_template(\"index.html\", books=books, acct=acct, search=search)", "def test_search_client_by_isbn(self, mock_get):\n\n response = isbn_utils.search_by(self.filter_isbn, self.ISBN)\n self.assertEqual(response.data, json.loads(open(UNIT_TEST_RESOURCES_FOLDER +\n FILE_NAME_ISBN_SEARCH_RESPONSE).read())[\"data\"])", "def search(title = \"\", author = \"\", year = \"\", isbn = \"\"):\n conn_obj = mysql.connector.connect(host='localhost',database='mydb',user='root',password='kks')\n cur_obj = conn_obj.cursor()\n cur_obj.execute(\"SELECT * \"\n \"FROM book \"\n \"WHERE title = %s OR author = %s OR year = %s OR isbn = %s\", \n (title, author, year, isbn))\n rows = cur_obj.fetchall()\n conn_obj.close()\n return rows", "def parse_isbn_url(url):\n parts = urlparse.urlparse(url)\n query = urlparse.parse_qs(parts.query)\n if ISBN_QS_KEY in query and len(query[ISBN_QS_KEY]):\n return query[ISBN_QS_KEY][0]", "def search():\n\n # TO DO: refine with wildcard to curb superfluous results\n \n # logged in users can search for books\n # via 'isbn', 'author', or 'title'\n query = request.form.get(\"search\")\n if not query:\n return render_template(\"home.html\", result=0, name=session[\"name\"],result_head=\"Results\")\n \n # query 'isbn'\n if query.isdigit():\n res = db.execute(\"SELECT * FROM books WHERE isbn LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n else:\n # query 'author'\n res = db.execute(\"SELECT * FROM books WHERE author LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n # If no result from author, query 'title'\n if len(res) == 0:\n res = db.execute(\"SELECT * FROM books WHERE title LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n if len(res) == 0:\n res = 0\n return render_template(\"home.html\", result=res, name=session[\"name\"], result_head=\"Results\")", "def get_isbn(self):\n return self.isbn", "def book_search(self, term):\n\n try:\n cur = self._db.cursor()\n search = f'%{term.upper()}%'\n cur.execute('SELECT rowid, * FROM books WHERE UPPER(title) like ? OR UPPER(author) like ?', (search, search))\n return self._cursor_to_booklist(cur)\n except sqlite3.Error as e:\n raise BookError(f'Error searching for books with search term {term}') from e", "def getISBN(self):\n return self.bookISBN", "def book_search(library: list) -> None:\n options = ['Author', 'Title', 'Publisher', 'Shelf', 'Category', 'Subject']\n prompt = '\\nWhat option would you like to search by?'\n choice = get_user_choice(options, prompt)\n if choice == '1':\n search_by_chosen_option(library, options[0])\n elif choice == '2':\n search_by_chosen_option(library, options[1])\n elif choice == '3':\n search_by_chosen_option(library, options[2])\n elif choice == '4':\n search_by_shelf(library)\n elif choice == '5':\n search_by_chosen_option(library, options[4])\n elif choice == '6':\n search_by_chosen_option(library, options[5])", "def api_book(isbn):\n isbn = Markup.escape(isbn)\n # check if book exist in database\n book_db = db.execute(\n \"SELECT * FROM books WHERE isbn LIKE :isbn\", {\"isbn\": isbn}\n ).fetchone()\n if book_db == None:\n return jsonify({\"error\": \"Invalid isbn or not in our database\"}), 404\n\n # Get detail from Goodreads\n res = requests.get(\n \"https://www.goodreads.com/book/review_counts.json\",\n params={\"key\": os.getenv(\"GOODREADS_API\"), \"isbns\": isbn},\n )\n\n if res.status_code != 200:\n raise Exception(\"ERROR: API request unsuccessful.\")\n data = res.json()\n book = data[\"books\"][0]\n\n # Print results\n return jsonify(\n {\n \"title\": book_db.title,\n \"author\": book_db.author,\n \"year\": book_db.year,\n \"isbn\": book_db.isbn,\n \"review_count\": book[\"work_ratings_count\"],\n \"average_score\": book[\"average_rating\"],\n }\n )", "def book(isbn):\n isbn = Markup.escape(isbn)\n # check if book exist in database\n book_db = db.execute(\n \"SELECT * FROM books WHERE isbn LIKE :isbn\", {\"isbn\": isbn}\n ).fetchone()\n if book_db == None:\n return render_template(\n \"error.html\", error=\"ISBN invalid or not in our Database.\"\n )\n\n # Get detail from Goodreads\n res = requests.get(\n \"https://www.goodreads.com/book/review_counts.json\",\n params={\"key\": os.getenv(\"GOODREADS_API\"), \"isbns\": isbn},\n )\n\n if res.status_code != 200:\n return render_template(\"error.html\", error=\"Not found on our API.\")\n data = res.json()\n book = data[\"books\"][0]\n\n # Get the reviews for the book.\n book_reviews = db.execute(\n \"SELECT review.*, users.nickname FROM review JOIN users ON review.user_id = users.id WHERE book_id = :book_id\",\n {\"book_id\": book_db.id},\n ).fetchall()\n\n # Get my own review\n user = session.get(\"user\")\n my_review = db.execute(\n \"SELECT * FROM review WHERE (book_id = :book_id) AND user_id = (SELECT id from users WHERE username LIKE :user)\",\n {\"book_id\": book_db.id, \"user\": user},\n ).fetchone()\n\n if my_review is not None:\n # Print results\n return render_template(\n \"book.html\",\n book=book,\n book_db=book_db,\n book_reviews=book_reviews,\n my_review=my_review,\n )\n else:\n return render_template(\n \"book.html\",\n book=book,\n book_db=book_db,\n book_reviews=book_reviews,\n my_review=None,\n )", "def get_book_data(isbn: int):\n try:\n book = next(iter(core.Book.search(('isbn', 'eq', isbn))))\n except StopIteration:\n pass # actually, I could put the whole rest of the function here\n else:\n data = core.Book.view_str(book.id)\n del data['id'], data['status'], data['return_date'], data['borrowed_by']\n del data['borrowed_by_id'], data['__str__']\n return data\n\n try:\n r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D'\n + str(isbn) + '&method=simpleSearch&cqlMode=true')\n r.raise_for_status()\n except requests.exceptions.RequestException:\n raise core.BuchSchlossError('no_connection', 'no_connection')\n\n person_re = re.compile(r'(\\w*, \\w*) \\((\\w*)\\)')\n results = {'concerned_people': []}\n\n page = bs4.BeautifulSoup(r.text)\n table = page.select_one('#fullRecordTable')\n if table is None:\n # see if we got multiple results\n link_to_first = page.select_one('#recordLink_0')\n if link_to_first is None:\n raise core.BuchSchlossError(\n 'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn)\n r = requests.get('https://portal.dnb.de'+link_to_first['href'])\n page = bs4.BeautifulSoup(r.text)\n table = page.select_one('#fullRecordTable')\n\n for tr in table.select('tr'):\n td = [x.get_text('\\n').strip() for x in tr.select('td')]\n if len(td) == 2:\n if td[0] == 'Titel':\n results['title'] = td[1].split('/')[0].strip()\n elif td[0] == 'Person(en)':\n for p in td[1].split('\\n'):\n g = person_re.search(p)\n if g is None:\n continue\n g = g.groups()\n if g[1] == 'Verfasser':\n results['author'] = g[0]\n else:\n results['concerned_people'].append(g[1]+': '+g[0])\n elif td[0] == 'Verlag':\n results['publisher'] = td[1].split(':')[1].strip()\n elif td[0] == 'Zeitliche Einordnung':\n results['year'] = td[1].split(':')[1].strip()\n elif td[0] == 'Sprache(n)':\n results['language'] = td[1].split(',')[0].split()[0].strip()\n\n results['concerned_people'] = '; '.join(results['concerned_people'])\n return results", "def is_isbn(val):\n if is_isbn10(val) or is_isbn13(val):\n if val[0:3] in [\"978\", \"979\"] or not is_ean13(val):\n return True\n return False", "def find_book(code: str) -> Dict:\n pass", "def test_search_book_id(self):\n response = self.client.open(\n '/juanbedoya29/ApiRest/1.0.0/books/2',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get_single_book_info(self, isbn):\n self.cursor.execute(\"SELECT * FROM book WHERE ISBN=%s\", (isbn,))\n books = self.cursor.fetchall()\n for book in books:\n authors = []\n self.cursor.execute(\"\"\"SELECT name FROM Author A, Wrote W, Book B WHERE A.ID = W.authorID AND\n W.ISBN = B.ISBN AND B.ISBN = %s\"\"\", (isbn,))\n for auth in self.cursor.fetchall():\n authors.append(auth[0])\n return book, authors", "def search_for_books(self, query):\n books = []\n book = Book(self.db)\n for row in self.db.cursor().execute('SELECT genre_id FROM genres WHERE ' + query):\n books.extend(self.get_books(row[0]))\n\n return books", "def search_by_type(book_search):\n\n book_type_translator = {\n '1':'fiction',\n '2':'crime',\n '3':'adventure'\n }\n\n print(\"What type of book are you looking for? Enter a number\")\n print(\n \"\\n\".join(f\"{num}.{genre}\" for num, genre in book_type_translator.items()))\n\n book_type_number = 0\n\n while True:\n book_type_number = input('> ')\n\n if book_type_number in book_type_translator:\n book_type = book_type_translator[book_type_number]\n book_printer(book_type)\n elif book_type_number == 'X':\n return\n else:\n print(\"Book type invalid, try again or [X] to exit\")", "def get_isbn_items(query=\"\"):\n url = \"https://archive.org/advancedsearch.php?q=\" + query\n r = requests.get(url)\n isbn_items = r.json()[\"response\"][\"docs\"]\n print(f\"Length of isbn_items: {len(isbn_items)}\")\n return isbn_items", "def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return checkdigit(code) == code[-1]", "def isISBN(code, isbn13=True):\n\n return isISBN13(code) if isbn13 else isISBN10(code)", "def search(self, term):\n term = u'%' + term.decode('utf8') + u'%'\n cursor = self._dbcon.cursor()\n t = (term, term, term)\n sql = u\"\"\"select rowid, * from books where (title like ?) or\n (author like ?) or (filename like ?)\"\"\"\n cursor.execute(sql, t)\n result = cursor.fetchall()\n cursor.close()\n return [self._book_from_query_result(x) for x in result]", "def get_single_books(isbn):\n return_value = Book.get_book(isbn)\n return jsonify(return_value)", "def create_book(self, title, isbn):\n isbn_list = [book.get_isbn() for book in self.books.keys()]\n if isbn in isbn_list:\n print(\"ISBN {isbn} already exists. Please provide a unique ISBN.\".format(isbn=isbn))\n else:\n return Book(title, isbn)" ]
[ "0.79139775", "0.7770317", "0.75252795", "0.69055563", "0.68805486", "0.6762374", "0.6645071", "0.6621116", "0.6558435", "0.65381473", "0.64426", "0.64143974", "0.6401315", "0.63997155", "0.6371291", "0.6358525", "0.6311553", "0.62363183", "0.62286764", "0.6191785", "0.6131164", "0.6121516", "0.6101259", "0.6090152", "0.6080857", "0.60655856", "0.6023662", "0.6020463", "0.6001155", "0.5968251" ]
0.825122
0
Get the second move of the user from keyboard.
def getSecondMove(self, firstMove): while True: try: move = tuple(int(str.strip()) for str in raw_input('Choose your second move: ').split(',')) break except ValueError: print("Input is not a integer.") while len(move) != 2 or abs(move[0]-firstMove[0]) + abs(move[1]-firstMove[1]) != 1: print 'Second move is not valid.' move = tuple(int(str.strip()) for str in raw_input('Choose your second move: ').split(',')) return move
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_move(self):\n return int(input('Enter your move: '))", "def get_next_move(self):\n if self.move == 'X':\n return 'O'\n return 'X'", "def second_move(self):\n self.play_sound(self.second_key)\n self.end_move()", "def getMovement(self):\n keys = ugame.buttons.get_pressed()\n dx = 0\n dy = 0\n # check for arrow keys - NOTE: currently tile collision only supports one direction of movement at a time (no diagonal)\n if keys & ugame.K_RIGHT:\n dx = 4\n elif keys & ugame.K_LEFT:\n dx = -4\n elif keys & ugame.K_UP:\n dy = -4\n elif keys & ugame.K_DOWN:\n dy = 4\n if keys & ugame.K_X:\n self.animations.goToState('attack') # TODO: prevent walking while attacking\n if keys & ugame.K_O:\n text.show()\n # return desired movement\n return dx, dy", "def get_move_from_user(self):\n user_input = input(\"Move: \")\n if user_input == 'undo':\n return user_input\n try:\n move_list = user_input.split(\" \")\n move_list[1] = int(move_list[1])\n except:\n move_list = ['XX', 0, 'XX']\n return move_list", "def get_move() -> str:\n msg = 'Enter a move for that section (C to check, S to swap, R to rotate): '\n move = input(msg)\n while not wf.is_valid_move(move):\n print('Invalid move!')\n move = input(msg) \n return move", "def user_input():\n key = input('Move position or press y to do turn: ')\n if key == 'w' or 's' or 'd' or 'a' or 'y':\n return key\n elif key == 'h':\n print('There is no help yet')\n else:\n print('Need help? Press \"h\"')\n return user_input()", "def processInput(direction):\n userinput = screen.getch()\n if userinput == curses.KEY_UP:\n direction = 3\n elif userinput == curses.KEY_DOWN:\n direction = 2\n elif userinput == curses.KEY_LEFT:\n direction = 1\n elif userinput == curses.KEY_RIGHT:\n direction = 0\n return direction", "def get_input(self):\n try:\n char = self.screen.get_wch()\n return (char, self._key_name(char))\n except KeyboardInterrupt:\n return (-1, \"^C\")\n except:\n return False", "def get_input(self) -> int:\n assert CursesMenu.stdscr is not None\n return CursesMenu.stdscr.getch()", "def getFirstMove(self):\n while True:\n try:\n move = tuple(int(str.strip()) for str in raw_input('Choose your first move: ').split(','))\n break\n except ValueError:\n print(\"Input is not a integer.\")\n\n while move not in [(1, 1), (self.grid.width/2, self.grid.height/2), \\\n (self.grid.width/2+1, self.grid.height/2+1), (self.grid.width, self.grid.height)]:\n print 'First move is not valid.'\n move = tuple(int(str.strip()) for str in raw_input('Choose your first move: ').split(','))\n return move", "def wait_second_move(self):\n self.qr_unregister()\n self.env.keyboard.listen_once(self.catch_key_second, key_down)", "def get_player_move(board_positions, is_position_availible):\n player_input = None\n move = None\n while move not in board_positions.keys() or is_position_availible(move) is False:\n print(\"What is your next move? Input in the form letter + number Ex. a3\")\n player_input = input().lower()\n letter = player_input[0] + \" \"\n number = player_input[1:]\n if len(number) < 2:\n number = number + \" \"\n move = letter + number\n return move", "def human_move(self):\n move = -1\n while move < 1 or move > self.BOARD.COLUMNS:\n try:\n move = input(\"{}: Choose a column>>> \".format(self.NAME))\n\n for i in self.QUIT:\n if str(move) == i:\n return None\n\n move = int(move)\n\n except KeyboardInterrupt:\n exit(0)\n except ValueError:\n pass\n if self.PIECE_COUNT <= 0:\n # cannot do anything\n self.STATE == Spectator.State.INACTIVE\n return None\n else:\n return move", "def choose_move(self):\n return 0", "def player_move():\n\tmove = None\n\twhile move not in moves:\n\t\tmove = raw_input(\"What is your move %s? --> \" % name)\n\treturn move", "def keypress(self):\n return self._client.getUserInput()", "def secondMove(board):\r\n # Get position of first tile\r\n (y1, x1) = board.black[0]\r\n\r\n if y1 <= board.size / 2:\r\n y2 = 1\r\n else:\r\n y2 = -1\r\n\r\n if x1 <= board.size / 2:\r\n x2 = 1\r\n else:\r\n x2 = -1\r\n return (y1 + y2, x1 + x2)", "def get_key_press():\n return ord(getch.getch());", "def wait_first_move(self):\n self.env.keyboard.listen_once(self.catch_key_first, key_down)", "def get_current_move(self):\n x_count = self.game_board.count('X')\n o_count = self.game_board.count('O')\n if x_count <= o_count:\n return 'X'\n return 'O'", "def GetMove(self, board):\n move = None\n while True:\n move = input(\"Enter coordinates as XY (e.g. 21): \")\n if board[Game.GetIndexFromCoords(*move)] == \" \":\n return Game.GetIndexFromCoords(*move)\n else:\n print(\"Space occupied.\")", "def interactive_strategy(game: Any) -> Any:\r\n move = input(\"Enter a move: \")\r\n return game.str_to_move(move)", "def select_move(self, game, player) -> int:\n print()\n print(f\"Player {player} turn\")\n game.print_player_perspective(player)\n\n move = -1\n while(move==-1):\n entered_move = input (\"Enter move: \")\n\n if(int(entered_move) in game.possible_moves(player)):\n move = int(entered_move)\n else:\n print(\"Entered an invalid move\")\n\n print()\n return move", "def interactive_strategy(game: Game) -> str:\n move = input(\"Enter a move: \")\n return game.str_to_move(move)", "def game_input(self):\n inp = \"\"\n while inp not in [\"DOWN\", \"RIGHT\", \"UP\", \"LEFT\"]:\n inp = input(\"Use the numeric keypad to choose a direction\").upper()\n if inp == \"Q\":\n break\n inp = self.pave_num(inp)\n return inp", "def interactive_strategy(game: Any) -> Any:\n move = input(\"Enter a move: \")\n return game.str_to_move(move)", "def interactive_strategy(game: Any) -> Any:\n move = input(\"Enter a move: \")\n return game.str_to_move(move)", "def interactive_strategy(game: Any) -> Any:\n move = input(\"Enter a move: \")\n return game.str_to_move(move)", "def get_action(self, state):\n from graphics_utils import keys_waiting\n from graphics_utils import keys_pressed\n keys = keys_waiting() + keys_pressed()\n if keys != []:\n self.keys = keys\n\n legal = state.get_legal_actions(self.index)\n move = self.get_move(legal)\n\n if move == Directions.STOP:\n # Try to move in the same direction as before\n if self.last_move in legal:\n move = self.last_move\n\n if (self.STOP_KEY in self.keys) and Directions.STOP in legal:\n move = Directions.STOP\n\n if move not in legal:\n move = random.choice(legal)\n\n self.last_move = move\n return move" ]
[ "0.6976197", "0.61256886", "0.607909", "0.60758954", "0.60556537", "0.59408665", "0.59039104", "0.582013", "0.5779158", "0.57545716", "0.5722253", "0.57185733", "0.57105273", "0.56864023", "0.5655696", "0.56548995", "0.56412286", "0.5633291", "0.5616646", "0.55753934", "0.5574415", "0.55721235", "0.55687964", "0.554603", "0.5535772", "0.5520699", "0.55043864", "0.55043864", "0.55043864", "0.55011433" ]
0.6608401
1
Print the win rate of the user against the computer.
def calculateWinRate(): times = 10 winRate = 0.0 for i in range(times): game = Game('user', 6, 6) winRate += game.play(5, False, True, False, False) winRate = winRate/times print "Winrate:", winRate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_stats(self):\n print(\"Simulation took: {:.2f} seconds to execute\".format(time.time() - self.start_time))\n for i, win in enumerate(self.wins):\n average = 0\n if win:\n average = float(self.tries[i]) / win\n print(\"Player {} wins: {} with (average number of rounds: {:.2f})\".format(i+1, win, average))", "def player_win(self):\r\n\r\n self.summary = (\" \" * 83) + \"YOU WIN\"\r\n print(\"Player wins against opponent.\\n\")\r\n self.player_wins += 1", "def report_rounds(self):\n return print(f\"Total Rounds played: {sum([self.wins, self.draws, self.losses])}\")", "def show_results(self):\r\n\r\n if self.player_cards > self.computer_cards: # player wins\r\n print('\\nCongratulations!!')\r\n print('You WIN by {0} / {1}'.format(self.player_cards, self.computer_cards))\r\n elif self.player_cards < self.computer_cards: # computer wins\r\n print('\\nToo bad!!')\r\n print('You LOST by {0} / {1}'.format(self.player_cards, self.computer_cards))\r\n else: # tied\r\n print('You TIED by {0} / {1}'.format(self.player_cards, self.computer_cards))", "def display_score(self, win, player, computer):\n font = pygame.font.SysFont('comicsans', 70)\n if player < 10 and computer < 10:\n pygame.draw.rect(win, black, (150, 30, 75, 50))\n pygame.draw.rect(win, black, (295, 30, 75, 50))\n text1 = font.render(str(player), 1, white)\n text2 = font.render(str(computer), 1, white)\n win.blit(text1, (185, 35))\n win.blit(text2, (297, 35))", "def play_game():\r\n cutoff = 30 # CHANGE THIS IF YOU WANT A LONGER GAME!\r\n user_total = 0\r\n comp_total = 0\r\n\r\n print \"First to\", cutoff, \"points wins!\"\r\n print\r\n\r\n while user_total < cutoff and comp_total < cutoff:\r\n\r\n # get the user and computer words, convert to lower case\r\n user_word = raw_input(\"Your play:\").lower()\r\n comp_word = get_computer_play().lower()\r\n print \"Computer played\", comp_word\r\n\r\n # calculate user and computer scores\r\n user_score = total_points(user_word)\r\n print \"User score:\", user_score\r\n comp_score = total_points(comp_word)\r\n print \"Computer score:\", comp_score\r\n\r\n # check whether this was a trick round, and score appropriately\r\n # round winner's score is added, round loser's score is subtracted\r\n is_trick = is_trick_round(user_word, comp_word)\r\n if is_trick:\r\n print \"TRICK ROUND!\"\r\n if (is_trick and user_score < comp_score) or (not is_trick and user_score > comp_score):\r\n print \"You win!\"\r\n user_total += user_score\r\n comp_total -= comp_score\r\n elif (is_trick and user_score > comp_score) or (not is_trick and user_score < comp_score):\r\n print \"You lose!\"\r\n user_total -= user_score\r\n comp_total += comp_score\r\n else:\r\n print \"You tie!\"\r\n \r\n # display current score totals\r\n print \"Current scores:\"\r\n print \"\\tYou:\", user_total\r\n print \"\\tComputer:\", comp_total\r\n print\r\n\r\n # display overall winner\r\n print \"Game over:\",\r\n if comp_total > user_total:\r\n print \"Computer wins!\"\r\n else:\r\n print \"You win!\"", "def eval(self):\n\n ratio_player_win = self.player_wins / self.num_test\n ratio_opponent_win = self.opponent_wins / self.num_test\n ratio_tie = 1.0 - ratio_player_win - ratio_opponent_win\n\n print(\"\\nPlayer Test Results:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_tie))\n\n ratio_optimal_win = self.optimal_wins / self.num_test\n ratio_optimal_loose = self.optimal_losses / self.num_test\n ratio_optimal_tie = 1.0 - ratio_optimal_win - ratio_optimal_loose\n\n print(\"\\nOptimal Results:\")\n print(\"\\tPlayer {0:.2f}%\".format(100.0 * ratio_optimal_win))\n print(\"\\tOpponent {0:.2f}%\".format(100.0 * ratio_optimal_loose))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_optimal_tie))\n\n # Ratio of win, loss diff between player and optimal\n # positive if the player beats opponent\n relative_result = ((ratio_player_win - ratio_opponent_win) /\n (ratio_optimal_win - ratio_optimal_loose))\n\n print(\"\\nResults Player Relative Optimal:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win / ratio_optimal_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win / ratio_optimal_loose))\n print(\"\\tScore {0:.2f}%\".format(100.0 * relative_result))\n\n if self.last_test is not None:\n print(\"Diff from last test score is {0:.2f}%\".format(100.0 * (relative_result - self.last_test)))\n self.last_test = relative_result", "def printOutcome(self):\n o = self.getOutcome()\n if o == 0:\n print(\"No winner\")\n else:\n print(\"Player\", o, \" won\")", "def win(self):\n self.score += 1\n self.ids['score'].text = 'SCORE: ' + str(self.score)", "def win_round(bank, report):\n print(f\"\\nYOU WIN THIS ROUND!!!\\n\"\n f\"You get to collect {WIN_BONUS()}x the bet you placed!\\n\"\n f\"Hope you can use this towards your student loans ;)\\n\")\n bank.collect(WIN_BONUS())\n report.record(result=\"win\")", "def display_result(self) -> None:\n winner = self.state.winner\n if winner:\n self._display_message(winner + ' wins!')\n else:\n self._display_message('Draw')\n\n self._display_message(\n f'\\n{self.state.player1} has {self.state.player1_score} wins'\n )\n self._display_message(\n f'{self.state.player2} has {self.state.player2_score} wins\\n'\n )", "def event_player_wins(self) -> None:\n win_amount = self.user.bet\n print(\"Congratulations, you win:\", win_amount)\n self.user.win_balance(self.user.bet)", "def print_result(winner):\r\n pass", "def disp_score():", "def score(self):\n score_message = {\n 'Onewins': \"\\nThe Winner is Player 1!\",\n 'Twowins': \"\\nThe Winner is Player 2!\",\n 'Tie': \"\\nTie! Looks like everyone's a winner!\",\n 'Nowinner': \"\\nYikes, neither of you win!\"\n }\n if self.pone_score > self.ptwo_score:\n print(score_message['Onewins'])\n elif self.pone_score < self.ptwo_score:\n print(score_message['Twowins'])\n elif self.pone_score == 0 and self.ptwo_score == 0:\n print(score_message['Nowinner'])\n else:\n print(score_message['Tie'])", "def Win(self):\n print ( 10*\"*\")\n print (\"Player \" + self.character + \" says:\")\n print (\"I Won\")\n print ( 10*\"*\")", "def print_stats(self):\n self.clear_top()\n font2 = pygame.font.SysFont('comicsans', 40, True) # creates new font object\n minutes, seconds = divmod(self.current_time - self.start, 60) # calculation\n minutes, seconds = round(minutes), round(seconds) # rounds numbers\n if seconds == 60:\n seconds = 0\n # Draw text onto the screen\n text = font2.render('Attempts: ' + str(self.recursions), 1, (0, 0, 0))\n if len(str(seconds)) == 1:\n seconds = '0' + str(seconds)\n text2 = font2.render(' Time: 0{}:{}'.format(minutes, seconds),\n 1, (0, 0, 0))\n self.screen.blit(text, (20, 20))\n self.screen.blit(text2, (480, 20))\n pygame.display.update((0, 0, 720, 800))", "def print_report(self):\n print '=' * 20 + ' %s ' % self.label + '=' * 20\n print '%-20s%5s\\t%4s\\t%4s\\t%4s\\t%4s' % (\n 'Hand' + '=' * 16, '#', 'Frac', 'W', 'Tie', 'L')\n for hand, result_dict in self.counts.iteritems():\n total_for_hand = sum(result_dict.itervalues())\n if total_for_hand == 0:\n win_frac = 0.0\n tie_frac = 0.0\n loss_frac = 0.0\n else:\n win_frac = float(result_dict[WIN_RESULT])/total_for_hand\n tie_frac = float(result_dict[TIE_RESULT])/total_for_hand\n loss_frac = float(\n result_dict[LOSS_RESULT])/total_for_hand\n print '%-20s%5d\\t%0.3f\\t%0.3f\\t%0.3f\\t%0.3f' % (\n hand, total_for_hand, float(total_for_hand)/self.total_items,\n win_frac, tie_frac, loss_frac)", "def print_winner(self):\n if self.winner is None:\n print('There was no winner')\n else:\n print('The winner was {}!'.format(\n self.__class__.COUNTER_REPRESENTATION[self.winner]))", "def print_statistics(self):\n print 'Ran %s iterations in %0.3f seconds\\n' % (\n self.iterations, self.elapsed_time)\n\n print 'Overall Equity'\n for index in range(len(self.holdem_ranges)):\n range_short_form = '%r' % self.holdem_ranges[index]\n print 'P%s) %-15s %0.3f' % (\n index,\n range_short_form,\n float(self.win_stats.get(index, 0))/self.iterations)\n print '\\n'\n print 'Hand distribution for each player'\n for stats in self.player_stats:\n stats.print_report()", "def RPSf():\n rps_dict = {'r': 0,\n 'p': 1,\n 's': 2}\n rps_list = ['Rock', 'Paper', 'Scissors']\n count = [0, 0, 0] # index 0 is for our score, 1 for bot, 2 for draw.\n while True:\n computer = randint(0, 2)\n print(f\"\\nYour score: {count[0]}. Bot score: {count[1]} . Draw: {count[2]}\") # Prints the score\n user = input(\"Rock, Paper, Scissors, Go!: \").lower()\n if user not in 'rps' and user not in rps_list:\n if QuBa(user):\n return\n else:\n print(\"That is not a valid choice. Please try again: \")\n continue\n print(f\"The bot chooses {rps_list[computer]}!\")\n if (rps_dict[user] + 1) == computer or (computer == 0 and rps_dict[user] == 2):\n print('You lose!')\n count[1] += 1\n elif rps_dict[user] == computer:\n print('Draw')\n count[2] += 1\n else:\n print('You win!')\n count[0] += 1", "def score(wins, tries):\n return str(wins) + \" / \" + str(tries)", "def score(wins, tries):\n return str(wins) + \" / \" + str(tries)", "def print_result(winner):\n pass", "def print_player_rank_and_points(self):\r\n pass", "def print_current_scores(self, round_num, index):\n print(f'\\n{self._players_list[index].name.upper()} '\n f'YOUR TURN. ROUND: {round_num + 1}')\n\n print('-'*21)\n print('ROLL SCORES'.rjust(16))\n self._players_list[index].print_stacked_score_dict()\n\n print('-'*21)\n print('TOP SCORE BONUS'.rjust(19))\n print(f\"Top Score:\".ljust(16) +\n f\"{self._players_list[index].get_top_score()}\".rjust(3))\n print(f\"Top Bonus Score:\".ljust(16) +\n f\"{self._players_list[index].get_top_bonus_score()}\".rjust(3))\n\n print('-'*21)\n print('TOTAL SCORES'.rjust(19))\n print(f\"Total Top:\".ljust(16) +\n f\"{self._players_list[index].get_total_top_score()}\".rjust(3))\n print(f\"Total Bottom:\".ljust(16) +\n f\"{self._players_list[index].get_total_bottom_score()}\".rjust(3))\n\n print('-'*21)\n print(f\"GRAND TOTAL:\".ljust(16) +\n f\"{self._players_list[index].get_grand_total_score()}\".rjust(3))", "def opponents_score(self):\n if self.opponent_wickets == 10:\n var1 = \"All Out\"\n return str('{0} {1}').format(self.opponent_runs, var1)\n else:\n var1 = self.opponent_wickets\n return str('{0}-{1}').format(self.opponent_runs, var1)", "def game_summary(score_sheet1, score_sheet2):\n total_score1 = calculate_total(score_sheet1)\n total_score2 = calculate_total(score_sheet2)\n print(f'--------------------------------------------------\\nGame over! {score_sheet1[0]} has the total score of '\n f'{total_score1}, {score_sheet2[0]} has the total score of {total_score2}, ', end='')\n if total_score1 > total_score2:\n print('player1 win!')\n elif total_score2 > total_score1:\n print('player2 win!')\n elif total_score2 == total_score1:\n print(\"It's a draw!\")", "def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)", "def display_results(self):\n print(f'AI player results - Wins: {self._wins}, Draws: {self._draws}, Loss: {self._losses}')\n print(f'Total win percentage {(self._wins / self._count) * 100}', end='\\n\\n')\n\n with open(os.path.join(os.path.dirname(__file__), 'ai-results.json'), 'w') as f:\n output_dict = {str(k): v for k, v in self._state_values.items()}\n json.dump(output_dict, f, indent=4, sort_keys=True)" ]
[ "0.73558235", "0.6755521", "0.6746405", "0.66018116", "0.65230495", "0.641469", "0.6412145", "0.6407791", "0.6297011", "0.6257574", "0.62541705", "0.6229966", "0.62088585", "0.61787057", "0.61663157", "0.61631507", "0.6109566", "0.6108312", "0.6083345", "0.6081153", "0.6047822", "0.604235", "0.604235", "0.6022457", "0.6016492", "0.60135317", "0.60119426", "0.5991665", "0.59619635", "0.59561914" ]
0.69364434
1
Recupera un empleado por su id
def get(self, id): resultado = EmployeeModel.query.filter_by(employee_id=id).first() if resultado: return resultado api.abort(404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])", "def find_employee_by_id(self,id):\n self.employee_id()\n if id in self.emp_id:\n print(self.emp_id[id])\n return self.emp_id[id]\n else:\n print(\"Employee not found\")", "def get_employee_by_id(self, employee_id):\n employee = self.admin_repository.get_employee_by_id(employee_id)\n if employee:\n print('''Name: {}\\nEmail: {}\\n\n '''.format(employee[0], employee[1]))\n return employee\n else:\n print(\"Invalid Id\")\n return False", "def get(id_: int):\n logger.debug('Retrieving employee by id %i.', id_)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.id == id_\n ).scalar()\n if not employee:\n raise Exception(f\"Can't get employee with id {id_}\", )\n except Exception as exception:\n logger.error('An error occurred while retrieving employee with id %i.'\n ' Exception: %s', id_, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by id %i.', id_)\n return employee", "def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise", "def _getCadastroEmpregos(self, id_cadastro):\n return self.execSql(\"select_cadastro_empregos\",\n id_cadastro=int(id_cadastro))", "def __init__(self, nombre_depto, id_depto):\n self.nombre_depto = nombre_depto\n self.id_depto = id_depto\n self.empleados = []", "def anyadir_empleado(self, empleado):\n self.empleados.append(empleado)", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def read():\n\tid_buscar = int(input(\"Ingrese ID de pokemon: \"))\n\texistencia = \"\"\"\n\t\t\t\tSELECT * FROM sansanito\n\t\t\t\tWHERE id = :1\"\"\"\n\tcur.execute(existencia, [id_buscar])\n\tres = cur.fetchall()\n\t# Res vacio implica que no existe registro con ID ingresado\n\tif res == []:\n\t\tprint(\"ID no encontrado en la tabla!\")\n\t\treturn\n\telse:\n\t\tprint_table(hdrs_sansanito, True, res)", "def put(self, id):\n empleadoactualizar = EmployeeModel.query.filter_by(employee_id=id).first()\n if empleadoactualizar:\n reg = api.payload\n empleadoactualizar.employee_id = reg['employee_id']\n empleadoactualizar.name = reg['name']\n empleadoactualizar.age = reg['age']\n empleadoactualizar.position = reg['position']\n empleadoactualizar.fechaingreso = datetime.date.fromisoformat(reg['fechaingreso'])\n db.session.merge(empleadoactualizar)\n db.session.commit()\n return 201\n api.abort(404)", "def get(self,request,*args,**kwargs):\n\n\t\tsucursal = Sucursal.objects.get(id=kwargs['spk'])\n\n\t\tuser_form = UserForm()\n\t\templeado_form = EmpleadoForm( initial={'sucursal':sucursal.id} )\n\n\t\tforms = [user_form,empleado_form]\n\t\tcontext = {\n\t\t'section_title':'Nuevo Empleado',\n\t\t'button_text':'Crear',\n\t\t'sucursal':sucursal,\n\t\t'user_form':user_form,\n\t\t'empleado_form':empleado_form }\n\n\t\treturn render_to_response(\n\t\t\t'empleado/empleado_form.html',\n\t\t\tcontext,\n\t\t\tcontext_instance=RequestContext(request))", "def deletar_empresa(id):\n empresa = Empresa.query.get_or_404(id)\n \n db.session.delete(empresa)\n db.session.commit()\n flash('Empresa deletada com sucesso.')\n\n return redirect(url_for('home.listar_empresas'))", "def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise", "def carregarEndereco(self, ide):\r\n try:\r\n self.__id = int(ide)\r\n self.cursor.execute(\"SELECT * FROM ENDERECO WHERE ID = %s;\" %(self.__id))\r\n if self.cursor.rowcount == 1:\r\n return self.cursor.fetchone()\r\n else:\r\n return None\r\n except:\r\n return None", "def get_employee_by_id(employee_id):\n where = Employee.id == employee_id\n query = get_employee_query(where)\n return query.one()", "def editar_empresa(id):\n cadastrando_empresa = False\n\n empresa = Empresa.query.get_or_404(id)\n form = EditarEmpresaForm(obj=empresa)\n\n if form.validate_on_submit():\n empresa.nome = form.nome.data\n empresa.simbolo = form.simbolo.data\n empresa.regiao = form.regiao.data\n empresa.tipo = form.tipo.data\n empresa.abertura = form.abertura.data\n empresa.fechamento = form.fechamento.data\n empresa.zona = form.zona.data\n empresa.moeda = form.moeda.data\n db.session.commit()\n flash('Empresa editada com sucesso!')\n\n return redirect(url_for('home.listar_empresas'))\n\n form.nome.data = empresa.nome\n form.simbolo.data = empresa.abertura \n form.regiao.data = empresa.regiao\n form.tipo.data = empresa.tipo\n form.abertura = empresa.abertura\n form.fechamento = empresa.fechamento\n form.zona.data = empresa.zona\n form.moeda.data = empresa.moeda\n\n\n return render_template('home/empresa.html', action=\"Edit\",\n cadastrando_empresa=cadastrando_empresa, form=form,\n empresa=empresa, title=\"Editar empresa\")", "def carregarProfessor(self, ide):\r\n try:\r\n self.__id = int(ide)\r\n self.cursor.execute(\"SELECT * FROM PROFESSOR WHERE ID = %s;\" %(self.__id))\r\n if self.cursor.rowcount == 1:\r\n return self.cursor.fetchone()\r\n else:\r\n return None\r\n except:\r\n return None", "def get_examen(self, id_examen):\n\n self.logger.info(\"\\t[+] get_examen [+]\")\n self.logger.info(f\"\\t[+] id_examen {id_examen} [+]\")\n try:\n return self.examens.select().where(self.examens.columns.id_examen == id_examen).execute()\n except Exception as e:\n self.logger.critical(\"\\t[-] Exception occured [-]\")\n self.logger.critical(\"\\t\" + str(e))\n self.logger.critical(\"\\t[-] Exception occured [-]\")", "def jugador_editar(id):\n\tjugador = Jugador.query.get_or_404(id)\n\tdata = json.loads(request.get_data())\n\tif(data['nombre'] == '' or data['apellido'] == ''):\n\t\treturn bad_request('Registrar nombre y apellido')\n\tif(data['activo'] == '1'):\n\t\tjugador.activo = True\n\telse:\n\t\tjugador.activo = False\n\t\n\tjugador.nombre = data['nombre']\n\tjugador.apellido = data['apellido']\n\tjugador.dinero = data['dinero']\n\tdb.session.add(jugador)\n\tdb.session.commit()\n\n\treturn jsonify(jugador.to_dict())", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def get_employee_by_name(self, name):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE name=%s', (name,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])", "def retrieve(self, request, pk=None):\n employee = self.get_employee_object(pk)\n print(F\"Employee: {employee}\")\n serializer = data_serializers.PresentEmployeeDataSerializer(employee)\n return Response(serializer.data)", "def post(self):\r\n piso=self.request.get('piso')\r\n numext=self.request.get('numext')\r\n numint=self.request.get('numint')\r\n piso=self.validonumero(piso)\r\n numext=self.validonumero(numext)\r\n numint=self.validonumero(numint)\r\n \r\n empresa=empresas()\r\n empresa.nombre=self.request.get('desc')\r\n empresa.calle=self.request.get('calle')\r\n empresa.numeroExterior=int(numext)\r\n empresa.numeroInterior=int(numint)\r\n empresa.colonia=self.request.get('colonia')\r\n empresa.piso=int(piso)\r\n empresa.andador=self.request.get('andador')\r\n empresa.codigo_postal=int(self.request.get('cp'))\r\n empresa.sitioweb=self.request.get('web')\r\n empresa.correo=self.request.get('mail')\r\n empresa.nombreContacto=\"\"\r\n empresa.paternoContacto=\"\"\r\n empresa.maternoContacto=\"\"\r\n #### \r\n ciudad=self.request.get('ciudad')\r\n query=\"where ciudad='%s'\"%ciudad\r\n cd=ciudades.gql(query)\r\n city=cd.fetch(1)\r\n for lstcd in city:\r\n empresa.id_Ciudad=lstcd.key().id()\r\n empresa.put()\r\n jsondic={}\r\n jsondata=[]\r\n jsondata+=[self.addKey(jsondic,\"Dato\", empresa.key().id())]\r\n self.response.out.write(simplejson.dumps(jsondata))\r\n return False", "def get(self, _id):", "def employee_id(self):\n for i in self.emp_dict:\n self.emp_id[i] = self.emp_dict[i][0]\n #print(self.emp_id)\n return self.emp_id", "def employee_get(emp_id):\n try:\n emp = Employee.objects.get(id=emp_id)\n except Employee.DoesNotExist:\n return JsonResponse({\n 'status': False,\n 'message': 'Employee does not exists in database'\n }, status=404)\n _data = {\n 'id': emp.id,\n 'first_name': emp.first_name,\n 'last_name': emp.last_name,\n 'age': emp.age,\n 'city': emp.city.name,\n 'state': emp.state.name,\n 'country': emp.country.name\n }\n return JsonResponse(_data, safe=False)", "def get_object(id):", "def test_api_can_get_employee_by_id(self):\n res = self.client().get(service_url_emp+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))", "def get(self, id):\n tmp = userDao.get_one_entry(id)\n return tmp" ]
[ "0.6796346", "0.6473423", "0.6111904", "0.61043555", "0.6066562", "0.59767336", "0.59119356", "0.58309406", "0.5822549", "0.5791337", "0.57300967", "0.5717637", "0.57097596", "0.56613207", "0.5634401", "0.5623324", "0.5607379", "0.55943084", "0.5550843", "0.5541731", "0.5540049", "0.55154824", "0.5511026", "0.5495446", "0.5481149", "0.545855", "0.5452154", "0.54467785", "0.5445394", "0.54418564" ]
0.6809357
0
Elimina un empleado por su id
def delete(self, id): empleadoeliminar = EmployeeModel.query.filter_by(employee_id=id).first() if empleadoeliminar: db.session.delete(empleadoeliminar) db.session.commit() return 201 api.abort(404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deletar_empresa(id):\n empresa = Empresa.query.get_or_404(id)\n \n db.session.delete(empresa)\n db.session.commit()\n flash('Empresa deletada com sucesso.')\n\n return redirect(url_for('home.listar_empresas'))", "def remove_employee(self, id):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('DELETE FROM employee WHERE employeeID=%s', (id,))\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to remove Employee!\\n(%s)' % (error))", "def delete_employee():\r\n id = request.args.get('id', \"\")\r\n return render_template(\"delete_employee.html\", id=id)", "def delete(id_: int):\n logger.debug('Deleting employee with id %i.', id_)\n try:\n delete_employee = Employee.query.get(id_)\n db.session.delete(delete_employee)\n except Exception as exception:\n logger.error('An error occurred while deleting employee with id %i. '\n 'Exception: %s', id_, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully deleted employee with id %i.', id_)", "def delete(self, request, pk):\n employee = EmployeeDetail.objects.get(pk=pk)\n employee.delete()\n return Response(\n data=' Entry deleted',\n status=status.HTTP_400_BAD_REQUEST\n )", "def cmd_delete_employee():\r\n id = request.form.get('id', \"\")\r\n confirm = request.form.get(\"confirm\", \"\")\r\n if confirm != \"DELETE\":\r\n flash(f\"Contact '{id}' NOT deleted. Please enter DELETE in the confirm field.\")\r\n return redirect(url_for('main.jobs'))\r\n \r\n index = get_employee_by_id(id)\r\n User.query.filter(User.id == id).delete()\r\n db.session.commit()\r\n\r\n\r\n if index != None:\r\n flash(f\"Employee '{id}' was succesfully deleted!\")\r\n return redirect(url_for('main.employees'))\r\n else:\r\n flash(f\"Employee '{id}' was not found\")\r\n return redirect(url_for('main.employees'))", "def delete(self, id):\n r = validate_get(id)\n tareaID = r.tarea.id\n r.destroySelf()\n flash(_(u'El %s fue eliminado permanentemente.') % name)\n raise redirect('../list/%d' % tareaID)", "def delete(self, _id):", "def remove(self,producto):\n id_producto = str(producto.id)\n if id_producto in self.carro:\n del self.carro[id_producto]\n self.save()", "def remove(table, id_):\n\n # 3\n for index in range(len(table)):\n if table[index][0] == id_:\n table.pop(index)\n data_manager.write_table_to_file('hr/persons.csv', table)\n return table", "def post(self, request,post_id):\n form = DelEventoForm(request.POST)\n if form.is_valid():\n try:\n u = Evento.objects.get(id = form.cleaned_data['id'])\n correo = request.POST.get('correo', '')\n\n v = RegEvento.objects.all()\n \n\n if(u.correo == correo):\n for i in v:\n if(i.id_Evento == u.id):\n print(\"eliminado\")\n print(i.id_Evento)\n print(\"aoeu\")\n print(u.id)\n send_mail(\n 'Cancelacion Evento',\n 'Da click para confirmar tu registro',\n '[email protected]',\n [i.email_Usuario],\n fail_silently=False,\n )\n i.delete()\n u.delete()\n messages.info(request, 'Evento eliminado')\n\n else:\n messages.info(request, 'No puedes eliminar este evento, porque no es tuyo(valores incorrectos)')\n \n except:\n messages.info(request, 'El evento no existe, cambiaste valores')\n\n return redirect(\"Eventos:listaEventos\")\n #return render(request, self.template, self.context)", "def delete(self, table, element):\n\n conditions = [\"id\"]\n values = element[\"id\"]\n result = self.__delete(table, conditions, [values])\n return result", "def delete(self, cls, id):\n pass", "def eliminar(idComentario, usuario, actividad):\n comentario = Comentario.objects.filter(idcomentario = idComentario)\n comentario.delete()\n\n # actividad = Actividad.objects.get(idact=comentario.idactcomentario)\n Accion.objects.crearAccion(\n usuario,\n \"El usuario %s elimino un comentario en la actividad %s\" % (usuario.username, actividad.nombreact),\n 'i')", "def remove(table, id_):\n\n # your code\n\n key = common.check_for_key(id_,table)\n\n if key == None:\n ui.print_error_message('Key does not exist')\n else:\n table.pop(key)\n data_manager.write_table_to_file('hr/persons.csv', table) \n\n #print(table)\n return table", "def eliminar(self, producto):\n\n producto_id = str(producto)\n\n if producto_id in self.carro:\n del self.carro[producto_id]\n self.guardar()", "def delete(self, id):\n raise NotImplementedError", "def remove_employee(self, employee):\n self.employees.remove(employee)", "def delete_employee():\n employee_Id_list = db.get_employee_Id_list()\n print(\"The current employee list is \" , employee_Id_list)\n while True:\n delete_id = get_user_string(\"Enter the employee id to be delete\")\n if int(delete_id) in employee_Id_list:\n employee_to_be_deleted = db.get_employee(delete_id)\n db.delete_employee(delete_id)\n print(\"Employee \" + employee_to_be_deleted.full_name + \" has been delete from employee\")\n break\n else:\n print(\"No Id found\")\n continue", "def excluir_usuario():\n try:\n if current_user.is_administrator():\n cod_id = request.args.get('id')\n usuario = Usuario.query.filter_by(cod_usuario = cod_id).one()\n db.session.delete(usuario)\n db.session.commit()\n return listar_usuarios()\n return redirect(url_for('main.index'))\n except Exception as e:\n abort(500, e)", "def remove(table, id_):\n count=0\n searched_index=-1\n in_it=False\n for i in table:\n if i[0]==id_:\n searched_index=count\n in_it=True\n count+=1\n\n if in_it: \n table.pop(searched_index)\n else:\n ui.print_error_message(\"ID not found\")\n \n return table", "def delete(self,id):\r\n return delete(id=id)", "def remove(table, id_):\n\n common.toremoveid(\"inventory/inventory.csv\",data_manager.get_table_from_file(\"inventory/inventory.csv\"),id_)", "def delete(aidi, flag=True):\n\texistencia = \"\"\"\n\t\t\t\tSELECT nombre FROM sansanito\n\t\t\t\tWHERE id = :1\"\"\"\n\tdel_query = \"\"\"\n\t\t\t\tDELETE FROM sansanito\n\t\t\t\tWHERE id = :1\"\"\"\n\tcur.execute(existencia, [aidi])\n\tres = cur.fetchall()\n\t# Hay un registro con ID - y como es PK, es unico.\n\tif res != []:\n\t\tcur.execute(del_query, [aidi])\n\t\tif flag:\n\t\t\tprint(\"Registro con ID\", aidi, \"borrado exitosamente.\")\n\telse:\n\t\tprint(\"ID no encontrado en la tabla!\")\n\t\tprint(\"Devolviendo al menu principal...\")", "def emeventdelete(request):\n if(request.GET):\n eid=request.GET.get(\"id\")\n s=\"delete from tbleventprograms where pId='\"+str(eid)+\"'\"\n try:\n c.execute(s)\n db.commit()\n except:\n pass\n else:\n return HttpResponseRedirect(\"/emevent\")\n return render(request,\"emevent.html\")", "def destroy(self, request, pk=None):\n try:\n deleted_team = self.controller.delete_employee(pk)\n return Response(status=status.HTTP_204_NO_CONTENT)\n except domain_exceptions.ObjectEntityDoesNotExist as e:\n return Response(e.message, status=status.HTTP_404_NOT_FOUND)", "def delete_plante(id):\n plante = get_plante(id)\n nom=plante.get_name()\n db.session.delete(plante)\n get_parterre(plante.get_parterre()).delete_plante(plante)\n p = Actions(\n contenu = \"Suppression de la plante \"+nom + \" au parterre \"+ get_parterre(plante.get_parterre()).get_name(),\n liste = 1\n )\n db.session.add(p)\n db.session.commit()\n return redirect(url_for(\"parterre\"))", "def remove(id):\n q = User.delete().where(User.id == id)\n try:\n q.execute()\n except Exception as e:\n return e\n return redirect(url_for('db'))", "def delete_item_by_id(self, id):\n response = self.table_connector.delete_item(Key={self.primary_key: id})\n print(response)", "def destroy(self, request, pk=None):\n try:\n deleted_team_employee = self.controller.remove_team_employee(pk)\n return Response(status=status.HTTP_204_NO_CONTENT)\n except (domain_exceptions.ObjectEntityDoesNotExist,\n domain_exceptions.EmployeeHasOneTeam\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)" ]
[ "0.7282368", "0.7227127", "0.71270996", "0.6703881", "0.66633046", "0.664032", "0.6582684", "0.6498842", "0.6348776", "0.6261464", "0.6236101", "0.62215394", "0.61652684", "0.6162873", "0.61538523", "0.61431855", "0.61428344", "0.6076793", "0.6056831", "0.6049542", "0.60467", "0.60339445", "0.6033722", "0.6027399", "0.6027291", "0.60045797", "0.6003166", "0.59845483", "0.597826", "0.596296" ]
0.75576895
0
Actualiza los datos de un empleado por su id
def put(self, id): empleadoactualizar = EmployeeModel.query.filter_by(employee_id=id).first() if empleadoactualizar: reg = api.payload empleadoactualizar.employee_id = reg['employee_id'] empleadoactualizar.name = reg['name'] empleadoactualizar.age = reg['age'] empleadoactualizar.position = reg['position'] empleadoactualizar.fechaingreso = datetime.date.fromisoformat(reg['fechaingreso']) db.session.merge(empleadoactualizar) db.session.commit() return 201 api.abort(404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET name = %s, email = %s, office = %s, extra_info = %s, picture_location = %s, '\n 'research_group = %s, title = %s, is_external = %s, is_admin = %s, is_active = %s '\n 'WHERE id = %s;',\n (obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active, obj.e_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def editar_empresa(id):\n cadastrando_empresa = False\n\n empresa = Empresa.query.get_or_404(id)\n form = EditarEmpresaForm(obj=empresa)\n\n if form.validate_on_submit():\n empresa.nome = form.nome.data\n empresa.simbolo = form.simbolo.data\n empresa.regiao = form.regiao.data\n empresa.tipo = form.tipo.data\n empresa.abertura = form.abertura.data\n empresa.fechamento = form.fechamento.data\n empresa.zona = form.zona.data\n empresa.moeda = form.moeda.data\n db.session.commit()\n flash('Empresa editada com sucesso!')\n\n return redirect(url_for('home.listar_empresas'))\n\n form.nome.data = empresa.nome\n form.simbolo.data = empresa.abertura \n form.regiao.data = empresa.regiao\n form.tipo.data = empresa.tipo\n form.abertura = empresa.abertura\n form.fechamento = empresa.fechamento\n form.zona.data = empresa.zona\n form.moeda.data = empresa.moeda\n\n\n return render_template('home/empresa.html', action=\"Edit\",\n cadastrando_empresa=cadastrando_empresa, form=form,\n empresa=empresa, title=\"Editar empresa\")", "def put(self, request, pk):\n data = request.data\n data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n EmployeeDetail.objects.filter(pk=pk).update(department=department, manager=manager, **data)\n return Response(\n data=\"request.data\"\n )", "def do_update_data(self, *args):\n print(\"Provide data to update :\")\n id_field = dict()\n id_field['id'] = input(\"Provide id to update :\")\n values = {**id_field, **self.__class__.populate_data()}\n self.connection_obj.update_into_table(**values)\n print(\"Data Update Successful\")", "def post(self, request):\n\n try:\n eventoid = request.POST.get('id', '')\n etiquetas = request.POST.get('etiquetas', '')\n u = Evento.objects.get(id=eventoid)\n\n u.etiquetas = etiquetas\n u.save()\n print(\"Exito en la actualizacion de etiquetas\")\n except:\n print(\"Error en la actualizacion\")\n\n \n return redirect(\"Eventos:listaEventos\")\n #return render(request, self.template, self.context)", "def alterar_cliente(self, ID, nome, sobrenome, tel_list, email_list, empresa):\r\n if nome != '':\r\n print(f'Alterando nome para {nome}')\r\n self.clientes[ID].nome = nome.title()\r\n elif sobrenome != '':\r\n print(f'Alterando sobrenome para {sobrenome}')\r\n self.clientes[ID].sobrenome = sobrenome.title()\r\n elif len(tel_list) > 0:\r\n print(f'Alterando telefones para {tel_list}')\r\n self.clientes[ID].tel_list = tel_list\r\n elif len(email_list) > 0:\r\n print(f'Alterando email para {email_list}')\r\n self.clientes[ID].email_list = email_list\r\n elif empresa != '':\r\n print(f'Alterando empresa para {empresa}')\r\n self.clientes[ID].empresa = empresa.title()", "def update(table, id_):\n\n # your code\n key = common.check_for_key(id_,table)\n if key == None:\n ui.print_error_message('Key does not exist')\n else:\n return_inputs = ui.get_inputs(['Name', 'Age'], 'Enter New Values')\n modif_index = key\n\n table[modif_index][NAME] = return_inputs[FIRST_PROP]\n table[modif_index][AGE] = return_inputs[SECOND_PROP]\n data_manager.write_table_to_file('hr/persons.csv', table) \n\n return table", "def update(self):\n if not self.id:\n raise DataValidationError(\"Update called with empty ID field\")\n db.session.commit()\n db.session.refresh(self)", "def update(self, id, **kw):\n orden = kw['orden']\n del kw['orden']\n del kw['tareaID']\n if kw['los_archivos_entrada'].filename:\n kw['archivos_entrada'] = kw['los_archivos_entrada'].file.read()\n del kw['los_archivos_entrada']\n if kw['los_archivos_a_comparar'].filename:\n kw['archivos_a_comparar'] = kw['los_archivos_a_comparar'].file.read()\n del kw['los_archivos_a_comparar']\n # TODO : Hacer ventanita mas amigable para cargar esto.\n try:\n kw['archivos_a_guardar'] = tuple(kw['archivos_guardar'].split(','))\n except AttributeError:\n pass\n del kw['archivos_guardar']\n r = validate_set(id, kw)\n flash(_(u'El %s fue actualizado.') % name)\n raise redirect('../list/%d' % r.tarea.id)", "def post(self):\r\n piso=self.request.get('piso')\r\n numext=self.request.get('numext')\r\n numint=self.request.get('numint')\r\n piso=self.validonumero(piso)\r\n numext=self.validonumero(numext)\r\n numint=self.validonumero(numint)\r\n \r\n empresa=empresas()\r\n empresa.nombre=self.request.get('desc')\r\n empresa.calle=self.request.get('calle')\r\n empresa.numeroExterior=int(numext)\r\n empresa.numeroInterior=int(numint)\r\n empresa.colonia=self.request.get('colonia')\r\n empresa.piso=int(piso)\r\n empresa.andador=self.request.get('andador')\r\n empresa.codigo_postal=int(self.request.get('cp'))\r\n empresa.sitioweb=self.request.get('web')\r\n empresa.correo=self.request.get('mail')\r\n empresa.nombreContacto=\"\"\r\n empresa.paternoContacto=\"\"\r\n empresa.maternoContacto=\"\"\r\n #### \r\n ciudad=self.request.get('ciudad')\r\n query=\"where ciudad='%s'\"%ciudad\r\n cd=ciudades.gql(query)\r\n city=cd.fetch(1)\r\n for lstcd in city:\r\n empresa.id_Ciudad=lstcd.key().id()\r\n empresa.put()\r\n jsondic={}\r\n jsondata=[]\r\n jsondata+=[self.addKey(jsondic,\"Dato\", empresa.key().id())]\r\n self.response.out.write(simplejson.dumps(jsondata))\r\n return False", "def jugador_editar(id):\n\tjugador = Jugador.query.get_or_404(id)\n\tdata = json.loads(request.get_data())\n\tif(data['nombre'] == '' or data['apellido'] == ''):\n\t\treturn bad_request('Registrar nombre y apellido')\n\tif(data['activo'] == '1'):\n\t\tjugador.activo = True\n\telse:\n\t\tjugador.activo = False\n\t\n\tjugador.nombre = data['nombre']\n\tjugador.apellido = data['apellido']\n\tjugador.dinero = data['dinero']\n\tdb.session.add(jugador)\n\tdb.session.commit()\n\n\treturn jsonify(jugador.to_dict())", "def change_employee(self, employee):\n cursor = self.dbconnect.get_cursor()\n try:\n if employee.id == None:\n raise Exception('no id given')\n cursor.execute('select * from employee where employeeID=%s', (str(employee.id),))\n if cursor.rowcount == 0:\n raise Exception('no employee found with that id')\n cursor.execute(\n 'update employee set name= %s,email= %s,office= %s,title= %s,INTernORextern= %s,active= %s,promotor= %s where employeeID=%s',\n (employee.name, employee.email, employee.office, employee.title,\n employee.internOrExtern, employee.active, employee.promotor, employee.id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise Exception('unable to change employee')", "def edit_employee(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n form = Edit_User_Form(obj = employee)\n \n #form.location.choices = db.session.query(Location.id, Location.site_name).all()\n \n #form.certs.choices = db.session.query(Certs.id , Certs.cert_name).all()\n\n if form.validate_on_submit():\n \n employee.email = form.email.data, \n employee.first_name = form.first_name.data,\n employee.last_name = form.last_name.data,\n employee.hire_date = form.hire_date.data, \n employee.is_admin = form.is_admin.data\n\n \n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/edit_user.html\", employee = employee, form = form)", "def update(table, id_):\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise ValueError(\"The given ID not in the table.\")\n titles_sales = [\"Name: \", \"Birth Year: \"]\n inputs = ui.get_inputs(titles_sales, \"Specify new properties\")\n for index, item in enumerate(table):\n if id_ == item[ID]:\n table[index] = inputs\n table[index].insert(0, id_)\n return table", "def update_by_id(cls, id, name, surname):\n\t\tauthor = Author.query.get(id)\n\t\tauthor.name = name\n\t\tauthor.surname = surname\n\t\tdb.session.commit()", "def save(self):\n if self.id:\n self.update()\n else:\n self.create()", "def add_employee(self, empl):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee values(default,%s,%s,%s,%s,%s,%s,%s,%s)',\n (empl.name, empl.email, empl.office, empl.research_group, empl.title, empl.internOrExtern,\n empl.active, empl.promotor))\n cursor.execute('SELECT LASTVAL()')\n eid = cursor.fetchone()[0]\n empl.id = eid\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save Employee!\\n(%s)' % (error))", "def update_employee(emp_id, key=None, value=None, items=None):\n if items is None:\n if key is None or value is None:\n return {\"Error\": \"At least one key/value pair is required\"}\n items = {key: value}\n elif isinstance(items, str):\n items = salt.utils.yaml.safe_load(items)\n\n xml_items = \"\"\n for pair in items:\n xml_items += '<field id=\"{}\">{}</field>'.format(pair, items[pair])\n xml_items = \"<employee>{}</employee>\".format(xml_items)\n\n status, result = _query(\n action=\"employees\",\n command=emp_id,\n data=xml_items,\n method=\"POST\",\n )\n\n return show_employee(emp_id, \",\".join(items.keys()))", "def editarData(self, ide, ano, mes, dia, hora = None):\r\n try:\r\n if ide == None:\r\n return False\r\n else:\r\n self.__temp = str(ano)+'-'+str(mes)+'-'+str(dia)\r\n self.cursor.execute(\"UPDATE DATAS SET DAT = '%s' WHERE ID = %s;\" %(self.__temp, ide))\r\n if hora != None:\r\n self.cursor.execute(\"UPDATE DATAS SET HORA = '%s' WHERE ID = %s;\" %(hora, ide))\r\n if hora == None:\r\n self.cursor.execute(\"UPDATE DATAS SET HORA = NULL WHERE ID = %s;\" %(ide))\r\n return True\r\n except:\r\n return False", "def anyadir_empleado(self, empleado):\n self.empleados.append(empleado)", "def realizar_entrega(self, id_alumno, comentarios, archivos):\r\n if Entrega.objects.filter(alumno_id=id_alumno, actvidad_id=self.pk).count() == 0:\r\n self._registrar_entrega(id_alumno, comentarios, archivos)\r\n else:\r\n self._actualizar_entrega(id_alumno, comentarios, archivos)", "def establecerSeccion(self, cuentas, unaSeccion):\n for unaCuenta in cuentas :\n unaCuenta.seccion = unaSeccion\n self.almacen.commit()", "def update(table, id_):\n\n # 4\n for index in range(len(table)):\n if table[index][0] == id_:\n addnew = ui.get_inputs(\n ['name: ', 'birth_year: '],\n 'Updating list of hr')\n addnew.insert(0, id_)\n table[index] = addnew\n data_manager.write_table_to_file('hr/persons.csv', table)\n return table", "def id_estagio(self, id_estagio):\n self._id_estagio = id_estagio", "def actualizar(self, producto, cantidad):\n producto_id = str(producto)\n\n if producto_id in self.carro:\n self.carro[producto_id]['cantidad'] = cantidad\n \n self.guardar()", "def update_estado_entidad(self, entidad_id, estado):\n proy = self.get_by_id(entidad_id)\n proy._estado = estado \n self.update(proy)", "def actualizar_valores(self,valores): \n self.__valores.update(valores)\n self.set_valores(self.__valores)", "def edit_register(id):\n add_employee = False\n\n employee = Employee.query.get_or_404(id) #from table\n print('----update 1----')\n form = UpdateForm(obj=employee) #if not 404\n print('----update 2----')\n if form.validate_on_submit():\n employee.email = email=form.email.data\n employee.username=form.username.data\n employee.glad_id=form.glad_id.data\n employee.tel_no=form.tel_no.data\n employee.role_id=form.role_id.data\n employee.password=form.password.data\n\n # UPDATE employee to the database\n print('----update----',employee.role_id)\n db.session.commit()\n flash('You have successfully updated! ')\n\n # # redirect to the login page\n # return redirect(url_for('auth.login'))\n\n # load registration template\n return render_template('auth/register.html', form=form, title='Update')", "def save(self)->None:\n item = database.cursor.fetchone()\n if item:\n self.id = item['id']\n database.connection.commit()", "def update(table, id_):\n id_storage = common.get_values_from_column(table, 0)\n if id_ in id_storage:\n table = manage_data_from_user(table, id_storage, id_, True)\n # Here u can make changes:\n\n else:\n ui.print_error_message('This option does not exist.')\n\n return table" ]
[ "0.6351262", "0.6216321", "0.60944134", "0.60876113", "0.60243225", "0.59532285", "0.5936486", "0.59092456", "0.5875384", "0.5838871", "0.5790332", "0.5743715", "0.5691845", "0.5663868", "0.56201196", "0.56019706", "0.5600619", "0.5597508", "0.5596143", "0.55812967", "0.557982", "0.5578758", "0.55715984", "0.55448174", "0.5532151", "0.5521813", "0.55198354", "0.55146563", "0.54876626", "0.54804784" ]
0.7382169
0
This endpoint allows clients to GET the min sensor reading for a device.
def request_device_readings_min(device_uuid): # Set the db that we want and open the connection if app.config['TESTING']: conn = sqlite3.connect('test_database.db') else: conn = sqlite3.connect('database.db') conn.row_factory = sqlite3.Row cur = conn.cursor() # Execute the query cur.execute('select min(value) from readings where device_uuid="{}"'.format(device_uuid)) rows = cur.fetchall() # Return the JSON return jsonify([dict(zip(['value'], row)) for row in rows]), 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request_device_readings_min(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT *, MIN(r.value) from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n # Execute the query\n cur.execute(sql, params)\n row = cur.fetchone()\n\n if not row[0]:\n return 'No results found', 200\n\n # Return the JSON\n return jsonify(dict(zip(['device_uuid', 'type', 'value', 'date_created'], row))), 200", "def test_device_readings_min(self):\n request = self.client().get('/devices/{}/readings/min/'.format(self.device_uuid))\n\n self.assertEqual(request.json.get('value', None), 22)", "def get_sensor(userid, deviceid, sensorid):\n sensor_response = requests.get(\"http://sensor-access:5600/v1/sensors/{}\".format(sensorid))\n return make_response(sensor_response.content, sensor_response.status_code)", "def get_sensors(userid, deviceid):\n sensor_response = requests.get(\"http://sensor-access:5600/v1/sensors\", json=request.json)\n return make_response(sensor_response.content, sensor_response.status_code)", "def request_device_readings_mode(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT r.value from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n sql += 'GROUP BY r.value ORDER BY COUNT(*) DESC LIMIT 1'\n\n # Execute the query\n cur.execute(sql, params)\n row = cur.fetchone()\n\n if not row[0]:\n return 'No results found', 200\n\n return str(row[0]), 200", "def read_sensor_raw(self):\n return self.read_sensor()", "def read_sensor_raw(self):\n return self.read_sensor()", "def sensor(self , sensor_index):\n sensor = obd_sensors.SENSORS[sensor_index]\n try:\n r = self.get_sensor_value(sensor)\n except \"NORESPONSE\":\n r = \"NORESPONSE\"\n return (sensor.name,r, sensor.unit)", "def get(self, request, pk):\n sensor_obj = get_object_or_404(SensorData, id=pk)\n serializer = SensorDataSerializer(sensor_obj)\n return Response(serializer.data, 200)", "def test_device_readings_mean(self):\n request = self.client().get('/devices/{}/readings/mean/'.format(self.device_uuid))\n\n self.assertEqual(request.json.get('value', None), 61)", "def request_device_readings_max(device_uuid):\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # Execute the query\n cur.execute('select MAX(value) from readings where device_uuid=\"{}\"'.format(device_uuid))\n rows = cur.fetchall()\n\n # Return the JSON\n return jsonify([dict(zip(['value'], row)) for row in rows]), 200", "def min_temperature(self):\n mini, maxi = ct.c_int(), ct.c_int()\n self.lib.GetTemperatureRange(ct.pointer(mini), ct.pointer(maxi))\n return mini.value", "def request_device_readings_mean(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT AVG(r.value) from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n # Execute the query\n cur.execute(sql, params)\n row = cur.fetchone()\n\n if not row[0]:\n return 'No results found', 200\n\n return str(row[0]), 200", "def GET_sensors(self):\n self.sensors.GetAll()\n self.sensors.sensors['charging-state'] = \\\n pyrobot.CHARGING_STATES[self.sensors.sensors['charging-state']]\n print simplejson.dumps(self.sensors.sensors)", "def request_device_readings_mean(device_uuid):\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # Execute the query\n cur.execute('select AVG(value) from readings where device_uuid=\"{}\"'.format(device_uuid))\n rows = cur.fetchall()\n\n # Return the JSON\n return jsonify([dict(zip(['value'], row)) for row in rows]), 200", "def get_sensor(name):\n name = _lookup(name)\n all_data = mc.get('sensor_values')\n try:\n return all_data[name]\n except KeyError:\n raise KeyError(\"No Sensor with that name\")", "def get_kwh_reading(self):\n\n svc = \"urn:micasaverde-com:serviceId:EnergyMetering1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n\n return self.get_variable(svc, \"KWHReading\")", "def request_device_readings_max(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT *, MAX(r.value) from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n # Execute the query\n cur.execute(sql, params)\n row = cur.fetchone()\n\n if not row[0]:\n return 'No results found', 200\n\n # Return the JSON\n return jsonify(dict(zip(['device_uuid', 'type', 'value', 'date_created'], row))), 200", "def test_device_readings_get_temperature(self):\n request = self.client().get('/devices/{}/readings/?type=temperature'.format(self.device_uuid))\n\n self.assertEqual(len(request.json), 3)", "def get_sensor(self, sensor_id):\n return self.sensors.get(sensor_id)", "def get(self, request, unit_id):\n start_date = get_start_date(request)\n end_date = get_end_date(request)\n readings = MeterReading.objects.filter(\n unit=unit_id,\n date__gte=start_date,\n date__lte=end_date,\n reading_type='ELECTRICITY'\n )\n serializer = MeterReadingSerializer(readings, many=True)\n return Response(serializer.data)", "def low_temperature(self):\r\n try:\r\n return str(self.connect()['main']['temp_min'])\r\n except:\r\n return '@weather_low_temperature'", "def request_device_readings_median(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT r.value from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n sql += 'ORDER BY r.value'\n\n # Execute the query\n cur.execute(sql, params)\n rows = [row[0] for row in cur.fetchall()]\n\n if len(rows) == 0:\n return 'No results found', 200\n\n return str(median(rows)), 200", "def get_sensor_consumption(self, service_location_id, sensor_id, start, end, aggregation):\n url = urljoin(URLS['servicelocation'], str(service_location_id), \"sensor\", str(sensor_id), \"consumption\")\n return self._get_consumption(url=url, start=start, end=end, aggregation=aggregation)", "def get_sensor(self, sensor_id):\n return self.sensors[sensor_id]", "def temperature_sensor():\n\n\tsensor_name = \"humiture\"\n\treg_addr = 26\n\tdata_len = 4\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\t# get sensor data\n\tdata = rospy.wait_for_message('MediumSize/SensorHub/Temperature', Temperature, 2)\n\ttemperature = data.temperature\n\n\tdelete_sensor(sensor_name)\n\treturn temperature", "def get(self):\n try:\n log.debug(\"Device info : \")\n #get the payload to influx DB\n url = \"http://localhost:8086/query\"\n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\":\"SELECT DISTINCT(deviceId) FROM(SELECT deviceId,q1 FROM \\\"ttd_devices\\\" ) \" }\n response = requests.request(\"GET\", url, params=querystring)\n r_d=json.loads(response.text)\n result_d=[]\n for rec in r_d['results'][0]['series']:\n for element in rec['values']:\n result_d.append(element[1])\n result={}\n result['status'] = 1\n result['message']=result_d\n return_status = 200\n except ValueError as e:\n result = {}\n log.exception('Value Exception while fetching device list')\n result['status'] = 0\n return_status = 400\n result['message'] = e.args[0]\n except :\n result = {}\n log.exception('Exception while fetching the device data')\n return_status = 500\n result['status'] = 0\n result['message'] = 'Internal Error has occurred while fetching devie data'\n finally:\n resp = Response(json.dumps(result), status=return_status, mimetype=\"application/json\")\n return resp", "def get_data(self):\n return read_sensor(bus=self.bus,\n address=self.address)", "def get_temperature(self, sensor: int = 0) -> float:\n\n return self.send(self.cmd.GET_HEATING_ACT)", "def min_temp(self) -> float | None:\n try:\n return self._device.config[\"min_temp\"]\n except TypeError: # 'NoneType' object is not subscriptable\n return" ]
[ "0.73609155", "0.7277634", "0.6801135", "0.6467421", "0.62297684", "0.6126676", "0.6126676", "0.60351396", "0.5986735", "0.59371185", "0.59295577", "0.592824", "0.59234166", "0.5918338", "0.59107876", "0.58846885", "0.5864855", "0.5862119", "0.58309174", "0.58287567", "0.5826", "0.58171713", "0.5770503", "0.57511854", "0.57383704", "0.57218134", "0.5628263", "0.562464", "0.5590188", "0.5575424" ]
0.7499323
0
This endpoint allows clients to GET the max sensor reading for a device.
def request_device_readings_max(device_uuid): # Set the db that we want and open the connection if app.config['TESTING']: conn = sqlite3.connect('test_database.db') else: conn = sqlite3.connect('database.db') conn.row_factory = sqlite3.Row cur = conn.cursor() # Execute the query cur.execute('select MAX(value) from readings where device_uuid="{}"'.format(device_uuid)) rows = cur.fetchall() # Return the JSON return jsonify([dict(zip(['value'], row)) for row in rows]), 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_device_readings_max(self):\n request = self.client().get('/devices/{}/readings/max/'.format(self.device_uuid))\n\n self.assertEqual(request.json.get('value', None), 100)", "def request_device_readings_max(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT *, MAX(r.value) from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n # Execute the query\n cur.execute(sql, params)\n row = cur.fetchone()\n\n if not row[0]:\n return 'No results found', 200\n\n # Return the JSON\n return jsonify(dict(zip(['device_uuid', 'type', 'value', 'date_created'], row))), 200", "def get_max_readings( self ):\n return 2500", "def test_device_readings_min(self):\n request = self.client().get('/devices/{}/readings/min/'.format(self.device_uuid))\n\n self.assertEqual(request.json.get('value', None), 22)", "def read(self):\n # One method of getting a resource is calling get_resource from the client instance. get_resource\n # takes the lwm2m uri string as a parameter. The uri is the object id, then the instance id, then\n # the resource id.\n max_resource = lwm2m_client.get_resource(\"3323/1/5602\")\n # Resources can also be accessed using the index operator from the client instance.\n min_resource = lwm2m_client[3323][1][5601]\n \n pressure = self.pressure_sensor.read_psi()\n \n max_resource.value = max(max_resource.value, pressure)\n min_resource.value = min(min_resource.value, pressure)\n logger.debug(\"PressureValue read called: pressure = {}, max = {}, min = {}\".format(pressure, max_resource.value, min_resource.value))\n return pressure", "def temperature_limit(self):\n return self._read(MX_TEMPERATURE_LIMIT)", "def request_device_readings_min(device_uuid):\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # Execute the query\n cur.execute('select min(value) from readings where device_uuid=\"{}\"'.format(device_uuid))\n rows = cur.fetchall()\n\n # Return the JSON\n return jsonify([dict(zip(['value'], row)) for row in rows]), 200", "def request_device_readings_mode(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT r.value from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n sql += 'GROUP BY r.value ORDER BY COUNT(*) DESC LIMIT 1'\n\n # Execute the query\n cur.execute(sql, params)\n row = cur.fetchone()\n\n if not row[0]:\n return 'No results found', 200\n\n return str(row[0]), 200", "def max_temperature(self):\n mini, maxi = ct.c_int(), ct.c_int()\n self.lib.GetTemperatureRange(ct.pointer(mini), ct.pointer(maxi))\n return maxi.value", "def read(self, max=None, min=1):\r\n for result in self.readAsync(max, min):\r\n pass\r\n return result", "def get_sensor(userid, deviceid, sensorid):\n sensor_response = requests.get(\"http://sensor-access:5600/v1/sensors/{}\".format(sensorid))\n return make_response(sensor_response.content, sensor_response.status_code)", "def get_sensors(userid, deviceid):\n sensor_response = requests.get(\"http://sensor-access:5600/v1/sensors\", json=request.json)\n return make_response(sensor_response.content, sensor_response.status_code)", "def max_voltage_limit(self):\n return self._read(MX_MAX_VOLTAGE_LIMIT)", "def test_device_readings_mean(self):\n request = self.client().get('/devices/{}/readings/mean/'.format(self.device_uuid))\n\n self.assertEqual(request.json.get('value', None), 61)", "def request_device_readings_median(device_uuid):\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # Execute the query\n cur.execute('''\n SELECT AVG(value) FROM (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n )\n '''.format(device_uuid, device_uuid, device_uuid))\n rows = cur.fetchall()\n # Return the JSON\n return jsonify([dict(zip(['value'], row)) for row in rows]), 200", "def get_max_item(self):\n return self._get_page('maxitem').json()", "def get_peak(self):\r\n \r\n sensor_1_list = []\r\n\r\n for i in self.return_data:\r\n sensor_1_list.append(i[0])\r\n\r\n sensor_peak = max(sensor_1_list)\r\n \r\n return(sensor_peak)", "def get_max(self):\n return self.serie.max()", "def test_device_readings_median(self):\n request = self.client().get('/devices/{}/readings/median/'.format(self.device_uuid))\n\n self.assertEqual(request.json.get('value', None), 61)", "def _get_maximum(self):\n return self._maximum", "def test_device_readings_get_humidity(self):\n request = self.client().get('/devices/{}/readings/?type=humidity'.format(self.device_uuid))\n\n self.assertEqual(len(request.json), 1)", "def get_number_of_devices(self):\n return self.drt_manager.get_number_of_devices()", "def getEnergyUsage():\n energy_data = asyncio.run(plug.get_emeter_realtime())\n\n return energy_data", "def request_device_readings_mean(device_uuid):\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # Execute the query\n cur.execute('select AVG(value) from readings where device_uuid=\"{}\"'.format(device_uuid))\n rows = cur.fetchall()\n\n # Return the JSON\n return jsonify([dict(zip(['value'], row)) for row in rows]), 200", "def max_humidity(self):\n return 60", "def _maximum(self) -> float:\n if self._type == \"power\":\n return 5.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_max\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]", "def max(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max\")", "def max(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max\")", "def request_device_readings_median(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT r.value from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n sql += 'ORDER BY r.value'\n\n # Execute the query\n cur.execute(sql, params)\n rows = [row[0] for row in cur.fetchall()]\n\n if len(rows) == 0:\n return 'No results found', 200\n\n return str(median(rows)), 200", "def getAltitudeReadings():\n return RoboCaller().call(\"getAltitudeReadings\", \"int\")" ]
[ "0.7779371", "0.75452", "0.6591695", "0.6190245", "0.6057066", "0.59755903", "0.5965035", "0.58626163", "0.58388036", "0.5802646", "0.5797329", "0.57818156", "0.5768744", "0.5755819", "0.5721714", "0.5696872", "0.56544673", "0.56484824", "0.564793", "0.56272566", "0.56242967", "0.562079", "0.56003195", "0.55873203", "0.5555696", "0.5555073", "0.5540249", "0.5540249", "0.5529533", "0.55208874" ]
0.779173
0
This endpoint allows clients to GET the median sensor reading for a device.
def request_device_readings_median(device_uuid): # Set the db that we want and open the connection if app.config['TESTING']: conn = sqlite3.connect('test_database.db') else: conn = sqlite3.connect('database.db') conn.row_factory = sqlite3.Row cur = conn.cursor() # Execute the query cur.execute(''' SELECT AVG(value) FROM ( SELECT value FROM readings where device_uuid="{}" ORDER BY value LIMIT 2 - ( SELECT COUNT(*) FROM readings where device_uuid="{}" ) % 2 OFFSET ( SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid="{}" ) ) '''.format(device_uuid, device_uuid, device_uuid)) rows = cur.fetchall() # Return the JSON return jsonify([dict(zip(['value'], row)) for row in rows]), 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request_device_readings_median(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT r.value from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n sql += 'ORDER BY r.value'\n\n # Execute the query\n cur.execute(sql, params)\n rows = [row[0] for row in cur.fetchall()]\n\n if len(rows) == 0:\n return 'No results found', 200\n\n return str(median(rows)), 200", "def test_device_readings_median(self):\n request = self.client().get('/devices/{}/readings/median/'.format(self.device_uuid))\n\n self.assertEqual(request.json.get('value', None), 61)", "def request_device_readings_mean(device_uuid):\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # Execute the query\n cur.execute('select AVG(value) from readings where device_uuid=\"{}\"'.format(device_uuid))\n rows = cur.fetchall()\n\n # Return the JSON\n return jsonify([dict(zip(['value'], row)) for row in rows]), 200", "def request_device_readings_mean(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT AVG(r.value) from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n # Execute the query\n cur.execute(sql, params)\n row = cur.fetchone()\n\n if not row[0]:\n return 'No results found', 200\n\n return str(row[0]), 200", "def test_device_readings_mean(self):\n request = self.client().get('/devices/{}/readings/mean/'.format(self.device_uuid))\n\n self.assertEqual(request.json.get('value', None), 61)", "def on_get(self, req, resp):\n task = get_median_for_last_min.delay(time.time())\n\n result_url = os.path.join(\n os.environ['MEDIAN_API_URL'], 'result', task.id)\n resp.body = json.dumps({'result_url': result_url})\n resp.status = falcon.HTTP_200", "def get_median(self):\n return self.serie.median()", "def median(self):\n return self._lift(\"median\")", "def get_sensor(userid, deviceid, sensorid):\n sensor_response = requests.get(\"http://sensor-access:5600/v1/sensors/{}\".format(sensorid))\n return make_response(sensor_response.content, sensor_response.status_code)", "def Median(data):\n return data.median()", "def get(self, request, unit_id):\n start_date = get_start_date(request)\n end_date = get_end_date(request)\n readings = MeterReading.objects.filter(\n unit=unit_id,\n date__gte=start_date,\n date__lte=end_date,\n reading_type='ELECTRICITY'\n )\n serializer = MeterReadingSerializer(readings, many=True)\n return Response(serializer.data)", "def get_median(self):\n med_value= self.df[self.col_name].median()\n return med_value", "def request_device_readings_max(device_uuid):\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # Execute the query\n cur.execute('select MAX(value) from readings where device_uuid=\"{}\"'.format(device_uuid))\n rows = cur.fetchall()\n\n # Return the JSON\n return jsonify([dict(zip(['value'], row)) for row in rows]), 200", "def request_device_readings_max(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT *, MAX(r.value) from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n # Execute the query\n cur.execute(sql, params)\n row = cur.fetchone()\n\n if not row[0]:\n return 'No results found', 200\n\n # Return the JSON\n return jsonify(dict(zip(['device_uuid', 'type', 'value', 'date_created'], row))), 200", "def get_sensors(userid, deviceid):\n sensor_response = requests.get(\"http://sensor-access:5600/v1/sensors\", json=request.json)\n return make_response(sensor_response.content, sensor_response.status_code)", "def median(self):\n return self._summarize(lambda c: c.median)", "def median(self) -> \"Stream[float]\":\n return self.agg(np.median).astype(\"float\")", "def request_device_readings_quartiles(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n if not start:\n return 'error on the required start data', 400\n end = post_data.get('end', None)\n if not end:\n return 'error on the required end data', 400\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT r.value from readings r WHERE r.type = ? AND r.device_uuid = ? AND r.date_created >= ? AND r.date_created <= ?'\n params = [type, device_uuid, start, end]\n\n sql += 'ORDER BY r.value'\n\n # Execute the query\n cur.execute(sql, params)\n rows = [row[0] for row in cur.fetchall()]\n\n mid = len(rows) // 2\n\n if (len(rows) % 2 == 0):\n # even\n lowerQ = median(rows[:mid])\n upperQ = median(rows[mid:])\n else:\n # odd\n lowerQ = median(rows[:mid]) # same as even\n upperQ = median(rows[mid + 1:])\n\n return str(lowerQ) + \",\" + str(upperQ), 200", "def request_device_readings_mode(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT r.value from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n sql += 'GROUP BY r.value ORDER BY COUNT(*) DESC LIMIT 1'\n\n # Execute the query\n cur.execute(sql, params)\n row = cur.fetchone()\n\n if not row[0]:\n return 'No results found', 200\n\n return str(row[0]), 200", "def median(self) -> Union[int, float]:\n return self._data.median()", "def request_device_readings_min(device_uuid):\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # Execute the query\n cur.execute('select min(value) from readings where device_uuid=\"{}\"'.format(device_uuid))\n rows = cur.fetchall()\n\n # Return the JSON\n return jsonify([dict(zip(['value'], row)) for row in rows]), 200", "def median(self):\n # TO DO\n pass", "def request_device_readings_min(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT *, MIN(r.value) from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n # Execute the query\n cur.execute(sql, params)\n row = cur.fetchone()\n\n if not row[0]:\n return 'No results found', 200\n\n # Return the JSON\n return jsonify(dict(zip(['device_uuid', 'type', 'value', 'date_created'], row))), 200", "def get_meter_data(query):\n\n logger.debug(\"sMap: Getting meter data...\")\n r = requests.post(url, data=query)\n logger.debug(\"%s\", r)\n payload = r.json()\n logger.debug(\"%s\", payload)\n\n return payload", "def estimate_median(self):\n return self.estimate_percentile(0.5)", "def get_data(url):\n\n # Splits the URL and gets the params\n split_url = url.split(\"/\")\n\n if len(split_url) > 5 or len(split_url) < 4:\n raise ValueError(\n \"The input URL contains too many params. You sure this is a valid URL?\"\n )\n\n name = None\n id = None\n\n # If the `name` wasn't provided we scrape for it\n if len(split_url) == 4:\n id = split_url[3]\n page = requests.get(url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n name_element = soup.find(\"input\", {\"id\": \"pspi\"})\n if name_element:\n name = name_element[\"value\"]\n else:\n name = split_url[3]\n id = split_url[4]\n\n if not name or not id:\n raise ValueError(\n \"The input URL did not contain a valid ID or name. You sure this is a valid URL?\"\n )\n\n # Then we request the API URL with the 2 params\n r = requests.get(\n \"https://stats.uptimerobot.com/api/getMonitor/{}?m={}\".format(name, id)\n )\n d = json.loads(r.text)\n\n if \"monitor\" not in d:\n raise ValueError(\"Name or ID not valid. You sure this is a valid URL?\")\n\n sorted_values = sorted(d[\"monitor\"][\"responseTimes\"], key=lambda k: k[\"value\"])\n\n results = {\"min\": {}, \"max\": {}, \"median\": None}\n\n results[\"max\"] = {\n \"datetime\": sorted_values[len(sorted_values) - 1][\"datetime\"],\n \"value\": sorted_values[len(sorted_values) - 1][\"value\"],\n }\n\n results[\"min\"] = {\n \"datetime\": sorted_values[0][\"datetime\"],\n \"value\": sorted_values[0][\"value\"],\n }\n\n # This is a faster way to compute median as the array is already sorted.\n index = (len(sorted_values) - 1) // 2\n\n if len(sorted_values) % 2:\n results[\"median\"] = sorted_values[index][\"value\"]\n else:\n results[\"median\"] = (\n sorted_values[index][\"value\"] + sorted_values[index + 1][\"value\"]\n ) / 2.0\n\n return results", "def untruncatedMedian(self):\n return self._distribution.untrMedian()", "def test_device_readings_get_humidity(self):\n request = self.client().get('/devices/{}/readings/?type=humidity'.format(self.device_uuid))\n\n self.assertEqual(len(request.json), 1)", "def get(self, request, unit_id):\n start_date = get_start_date(request)\n end_date = get_end_date(request)\n readings = MeterReading.objects.filter(\n unit=unit_id,\n date__gte=start_date,\n date__lte=end_date,\n reading_type='GAS'\n )\n serializer = MeterReadingSerializer(readings, many=True)\n return Response(serializer.data)", "def median(self):\n self.data.sort()\n\n if len(self.data) % 2 == 1:\n median = self.data[int(self.size/2)]\n else:\n median = (self.data[int(self.size/2 - 1)] + \n self.data[int(self.size/2)]) / 2\n return median" ]
[ "0.7735845", "0.7634895", "0.67608035", "0.64864635", "0.63579154", "0.6352341", "0.6207965", "0.618097", "0.6035242", "0.59368026", "0.59210455", "0.5914636", "0.5910662", "0.58867925", "0.5876195", "0.582881", "0.5785498", "0.5767511", "0.5755149", "0.5734404", "0.57133216", "0.56827587", "0.5585946", "0.554797", "0.554708", "0.55370086", "0.55276555", "0.5523847", "0.5449022", "0.54238445" ]
0.7654329
1
This endpoint allows clients to GET the mean sensor readings for a device.
def request_device_readings_mean(device_uuid): # Set the db that we want and open the connection if app.config['TESTING']: conn = sqlite3.connect('test_database.db') else: conn = sqlite3.connect('database.db') conn.row_factory = sqlite3.Row cur = conn.cursor() # Execute the query cur.execute('select AVG(value) from readings where device_uuid="{}"'.format(device_uuid)) rows = cur.fetchall() # Return the JSON return jsonify([dict(zip(['value'], row)) for row in rows]), 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request_device_readings_mean(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT AVG(r.value) from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n # Execute the query\n cur.execute(sql, params)\n row = cur.fetchone()\n\n if not row[0]:\n return 'No results found', 200\n\n return str(row[0]), 200", "def test_device_readings_mean(self):\n request = self.client().get('/devices/{}/readings/mean/'.format(self.device_uuid))\n\n self.assertEqual(request.json.get('value', None), 61)", "def get_sensors(userid, deviceid):\n sensor_response = requests.get(\"http://sensor-access:5600/v1/sensors\", json=request.json)\n return make_response(sensor_response.content, sensor_response.status_code)", "def request_device_readings_median(device_uuid):\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # Execute the query\n cur.execute('''\n SELECT AVG(value) FROM (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n )\n '''.format(device_uuid, device_uuid, device_uuid))\n rows = cur.fetchall()\n # Return the JSON\n return jsonify([dict(zip(['value'], row)) for row in rows]), 200", "def get_sensor(userid, deviceid, sensorid):\n sensor_response = requests.get(\"http://sensor-access:5600/v1/sensors/{}\".format(sensorid))\n return make_response(sensor_response.content, sensor_response.status_code)", "def mean_sensor_id_get(sensor_id, start_date=None, end_date=None): # noqa: E501\n try:\n client = InfluxDBClient('influxdb', 8086, 'user', 'user', 'sensor')\n sensor_id = \"laptop_temperature_1\"\n str = \"\"\n if start_date is not None:\n str = f\"WHERE time > '{datetime.fromtimestamp(start_date)}'\"\n if end_date is not None:\n if len(str) > 0:\n str += \" AND \"\n else:\n str = \"WHERE \"\n str += f\"time < '{datetime.fromtimestamp(end_date)}'\"\n request = f\"SELECT mean({sensor_id}) from client1 {str} GROUP BY *;\"\n print(request)\n result = client.query(request)\n mean = list(result.get_points())[0]['mean']\n except:\n traceback.print_exc()\n return []\n return [mean]", "def request_device_readings_min(device_uuid):\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # Execute the query\n cur.execute('select min(value) from readings where device_uuid=\"{}\"'.format(device_uuid))\n rows = cur.fetchall()\n\n # Return the JSON\n return jsonify([dict(zip(['value'], row)) for row in rows]), 200", "def request_device_readings_median(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT r.value from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n sql += 'ORDER BY r.value'\n\n # Execute the query\n cur.execute(sql, params)\n rows = [row[0] for row in cur.fetchall()]\n\n if len(rows) == 0:\n return 'No results found', 200\n\n return str(median(rows)), 200", "def get_all_sensors():\n\tquery_url = 'http://localhost:8079/api/query'\n\tquery = \"select *\"\n\tr = requests.post(query_url, query)\n\treturn r.content", "def request_device_readings_mode(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT r.value from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n sql += 'GROUP BY r.value ORDER BY COUNT(*) DESC LIMIT 1'\n\n # Execute the query\n cur.execute(sql, params)\n row = cur.fetchone()\n\n if not row[0]:\n return 'No results found', 200\n\n return str(row[0]), 200", "def request_device_readings_min(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT *, MIN(r.value) from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n # Execute the query\n cur.execute(sql, params)\n row = cur.fetchone()\n\n if not row[0]:\n return 'No results found', 200\n\n # Return the JSON\n return jsonify(dict(zip(['device_uuid', 'type', 'value', 'date_created'], row))), 200", "def GET_sensors(self):\n self.sensors.GetAll()\n self.sensors.sensors['charging-state'] = \\\n pyrobot.CHARGING_STATES[self.sensors.sensors['charging-state']]\n print simplejson.dumps(self.sensors.sensors)", "def get_patient_average():\n r = requests.get(\"http://vcm-7474.vm.duke.edu:5000/api/heart_rate/average/2\")\n print(r.text)", "def get(self, request, unit_id):\n start_date = get_start_date(request)\n end_date = get_end_date(request)\n readings = MeterReading.objects.filter(\n unit=unit_id,\n date__gte=start_date,\n date__lte=end_date,\n reading_type='ELECTRICITY'\n )\n serializer = MeterReadingSerializer(readings, many=True)\n return Response(serializer.data)", "def get(self, request, pk):\n sensor_obj = get_object_or_404(SensorData, id=pk)\n serializer = SensorDataSerializer(sensor_obj)\n return Response(serializer.data, 200)", "def sensor(self , sensor_index):\n sensor = obd_sensors.SENSORS[sensor_index]\n try:\n r = self.get_sensor_value(sensor)\n except \"NORESPONSE\":\n r = \"NORESPONSE\"\n return (sensor.name,r, sensor.unit)", "def index(request):\n\tif request.method == 'GET':\n\t\treturn_json = json.loads(get_all_sensors())\n\t\treturn Response(return_json)", "def sensors():\n sensor_data = query_db('SELECT * FROM sensors')\n return jsonify(results=sensor_data)", "def get_sensor_consumption(self, service_location_id, sensor_id, start, end, aggregation):\n url = urljoin(URLS['servicelocation'], str(service_location_id), \"sensor\", str(sensor_id), \"consumption\")\n return self._get_consumption(url=url, start=start, end=end, aggregation=aggregation)", "def test_device_readings_min(self):\n request = self.client().get('/devices/{}/readings/min/'.format(self.device_uuid))\n\n self.assertEqual(request.json.get('value', None), 22)", "def test_device_readings_get_temperature(self):\n request = self.client().get('/devices/{}/readings/?type=temperature'.format(self.device_uuid))\n\n self.assertEqual(len(request.json), 3)", "def read_sensor_raw(self):\n return self.read_sensor()", "def read_sensor_raw(self):\n return self.read_sensor()", "def get(self, request, *args, **kwargs):\n device = Device.objects.get(name=kwargs[\"device_name\"])\n global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\")\n status_code, data = graph_ql_query(request, device, global_settings.sot_agg_query)\n data = json.loads(json.dumps(data))\n return Response(GraphQLSerializer(data=data).initial_data, status=status_code)", "def get(self, request, unit_id):\n start_date = get_start_date(request)\n end_date = get_end_date(request)\n readings = MeterReading.objects.filter(\n unit=unit_id,\n date__gte=start_date,\n date__lte=end_date,\n reading_type='GAS'\n )\n serializer = MeterReadingSerializer(readings, many=True)\n return Response(serializer.data)", "def request_device_readings(device_uuid):\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n \n if request.method == 'POST':\n # Grab the post parameters\n post_data = json.loads(request.data)\n sensor_type = post_data.get('type')\n value = post_data.get('value')\n date_created = post_data.get('date_created', int(time.time()))\n\n # Insert data into db\n cur.execute('insert into readings (device_uuid,type,value,date_created) VALUES (?,?,?,?)',\n (device_uuid, sensor_type, value, date_created))\n \n conn.commit()\n\n # Return success\n return 'success', 201\n else:\n # Execute the query\n cur.execute('select * from readings where device_uuid=\"{}\"'.format(device_uuid))\n rows = cur.fetchall()\n\n # Return the JSON\n return jsonify([dict(zip(['device_uuid', 'type', 'value', 'date_created'], row)) for row in rows]), 200", "def request_device_readings_max(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n end = post_data.get('end', None)\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT *, MAX(r.value) from readings r WHERE r.type = ? AND r.device_uuid = ?'\n params = [type, device_uuid]\n if start:\n sql += 'AND r.date_created >= ?'\n params += [start]\n if end:\n sql += 'AND r.date_created <= ?'\n params += [end]\n\n # Execute the query\n cur.execute(sql, params)\n row = cur.fetchone()\n\n if not row[0]:\n return 'No results found', 200\n\n # Return the JSON\n return jsonify(dict(zip(['device_uuid', 'type', 'value', 'date_created'], row))), 200", "def request_device_readings_max(device_uuid):\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # Execute the query\n cur.execute('select MAX(value) from readings where device_uuid=\"{}\"'.format(device_uuid))\n rows = cur.fetchall()\n\n # Return the JSON\n return jsonify([dict(zip(['value'], row)) for row in rows]), 200", "def getSensors(self):\n return self.listener.sensors", "def test_device_readings_get_humidity(self):\n request = self.client().get('/devices/{}/readings/?type=humidity'.format(self.device_uuid))\n\n self.assertEqual(len(request.json), 1)" ]
[ "0.7780582", "0.7480308", "0.6766443", "0.67064244", "0.6468211", "0.64030886", "0.6369193", "0.63479865", "0.6325126", "0.6304141", "0.61911434", "0.6138687", "0.61250997", "0.60851073", "0.60354185", "0.6026088", "0.5993065", "0.5972872", "0.5889185", "0.5886066", "0.58598566", "0.5825794", "0.5825794", "0.5824541", "0.58204234", "0.5798609", "0.57803535", "0.5775738", "0.57755697", "0.5679036" ]
0.779958
0
Removes old files from a directory older than a given limit
def removeOldFiles(directory,daysLimit): files = os.listdir(directory) files = [ f for f in files if re.search('.tif$', f, re.I)] now = time.time() for file in files: if os.stat(directory+file).st_mtime < now - daysLimit * 86400: if os.path.isfile(directory+file): os.remove(os.path.join(directory, file)) print "Old file deleted: ", file statusRemoval = 0 return statusRemoval
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_old_entries(captchas_dir, max_age=1200):\n if access(captchas_dir, W_OK):\n basetime = datetime.datetime.now() - datetime.timedelta(seconds=max_age)\n for dname in listdir(captchas_dir):\n d = path.join(captchas_dir, dname)\n if basetime > datetime.datetime.fromtimestamp(stat(d).st_mtime):\n try:\n for f in listdir(d):\n remove(path.join(d, f))\n rmdir(d)\n except:\n pass", "def remove_old_files(root, namepattern='*', maxfiles=1):\n if maxfiles < 0:\n raise ValueError('maxfiles must be >= 0, got %s' % maxfiles)\n\n lstFiles = []\n for f in _os.listdir(root):\n if _fnmatch.fnmatch(f, namepattern):\n fileName = _os.path.join(root, f)\n lstFiles.append(fileName)\n lstFiles.sort(key=_os.path.getmtime, reverse=True)\n for f in lstFiles[maxfiles:]:\n try:\n _os.remove(f)\n except (OSError, IOError):\n pass", "def purge_old(directory: Path, extension: str, threshold_age_days=60):\n found = 0\n now = time.time()\n for node in directory.iterdir():\n if node.suffix == f\".{extension}\":\n file_modified = node.stat().st_mtime\n if (now - file_modified) // (24 * 3600) >= threshold_age_days:\n node.unlink()\n found += 1\n\n if found > 0:\n print(col.bold_green(f\" cleaned-up {found} {extension} files older than: {threshold_age_days} in {directory}\"))", "def remove_older_backups(days=30):\n oldest = arrow.now().shift(days=-30).timestamp\n files = [os.path.join(app.config['BACKUP_DIR'], x) for x in os.listdir(app.config['BACKUP_DIR']) if re.search(r'backup-(\\d{4}).zip', x, re.IGNORECASE)]\n for fpath in files:\n s = os.stat(fpath)\n if s.st_ctime < oldest:\n print(\"deleting\", fpath)\n os.unlink(fpath)", "def removeOldFiles(directory, time):\n modTime = 60 * 60 * 24 * 4 # max number of seconds to keep aux files\n for file in listdir(directory):\n if time - path.getmtime(path.join(directory, file)) > modTime and\\\n file != 'reveal.js':\n try:\n remove(path.join(directory, file))\n except OSError:\n pass", "def clear_outdated_files():\n for f in os.listdir(MEDIA_ROOT):\n file_path = os.path.join(MEDIA_ROOT, f)\n if os.path.isfile(file_path) and os.stat(file_path).st_mtime < time.time() - STORE_PDF_DAYS * 86400:\n os.remove(file_path)", "def trim_dir(directory: str) -> None:\n\n def access_time(f: str) -> float:\n return os.stat(os.path.join(directory, f)).st_atime\n\n files = sorted(os.listdir(directory), key=access_time)\n file_name = os.path.join(directory, files[0])\n logger.debug('removing least accessed file: {}', file_name)\n os.remove(file_name)", "def remove_files(max_age_sec):\n with session_transaction() as session:\n nb_deleted = File.remove_old_files(max_age_sec, session)\n log.debug(\"Max_age_sec: %s Nb_deleted: %s\", max_age_sec, nb_deleted)\n return nb_deleted", "def remove_expired_files():\n from models import FlowFile\n FlowFile.objects.filter(\n state__in=[FlowFile.STATE_UPLOADING, FlowFile.STATE_UPLOAD_ERROR],\n updated__lte=datetime.datetime.date() - datetime.timedelta(days=FLOWJS_EXPIRATION_DAYS)\n ).delete()", "def main():\n dir_path = '/home/ubuntu/test_files' # path for the log files that needs to be pruned\n stat_file_name = 'file_status_info' # temp file will be created to store the stat of each files to calculate when to delete\n \n # Get the list of all the files where we want to perfrom the delete operations\n file_list = get_list_of_files_in_dir(dir_path)\n\n # Get the current system date\n current_date = get_current_date()\n\n # Iterate through all the log, error, info files in the specified directory path and check for the criteria of file older than 5 days and delete.\n for fil in file_list:\n get_file_stat(dir_path, stat_file_name, fil)\n filename, file_date = get_file_last_modification_date(stat_file_name)\n\n print(\"*********** %s file stat is written **************\" % fil)\n days = abs(current_date - file_date).days\n \n # Check if the file modification date if older than 5 days.\n if days > 5:\n remove_files(os.path.join(dir_path, fil))\n else:\n print(\"No eligible file(s) found to be deleted\")", "def remove_old_ftp_downloads(folder):\r\n date_now = datetime.datetime.utcnow()\r\n all_paths = glob(os.path.join(folder,'Runoff*netcdf*'))\r\n for path in all_paths:\r\n\tdate_file = datetime.datetime.strptime(os.path.basename(path).split('.')[1],'%Y%m%d')\r\n if os.path.isdir(path):\r\n rmtree(path)\r\n else:\r\n os.remove(path)\r\n\tif date_now - date_file < datetime.timedelta(1):\r\n\t os.mkdir(path)", "def remove_files(path, days=7, exclude=[], older_than=True, test=False, subdirs=False):\n\n # get removal date and operator\n remove_after = datetime.now() - relativedelta(days=days)\n op = operator.lt\n if not older_than:\n op = operator.gt\n\n # optional test\n if test:\n 'print testing....\\n'\n def remove(*args): pass\n else:\n def remove(*args):\n os.remove(args[0])\n\n # walk thru directory\n for root, dirs, files in os.walk(path):\n if not root.endswith('.gdb'):\n for f in files:\n if not f.lower().endswith('.lock'):\n if not any(map(lambda ex: fnmatch.fnmatch(f, ex), exclude)):\n last_mod = datetime.fromtimestamp(os.path.getmtime(os.path.join(root, f)))\n\n # check date\n if op(last_mod, remove_after):\n try:\n remove(os.path.join(root, f))\n print 'deleted: \"{0}\"'.format(os.path.join(root, f))\n except:\n print '\\nCould not delete: \"{0}\"!\\n'.format(os.path.join(root, f))\n else:\n print 'skipped: \"{0}\"'.format(os.path.join(root, f))\n else:\n print 'excluded: \"{0}\"'.format(os.path.join(root, f))\n else:\n print 'file is locked: \"{0}\"'.format(os.path.join(root, f))\n else:\n print 'excluded files in: \"{0}\"'.format(root)\n\n # break or continue if checking sub-directories\n if not subdirs:\n break\n\n return", "def deleteOldFolder(self, folderPath, maxAgeMinutes):\n folderPath = os.path.abspath(folderPath)\n print(\"deleting old files in:\", folderPath)\n imageFiles = glob.glob(folderPath+\"/*.jpg\")\n imageFiles += glob.glob(folderPath+\"/*.png\")\n imageFiles += glob.glob(folderPath+\"/*.gif\")\n for fname in sorted(imageFiles):\n bn = os.path.basename(fname).split(\".\")\n if len(bn) < 2:\n continue\n stampNow = self.timeCode()\n stampThen = bn[1]\n ageMinutes = self.timestampAgeMinutes(stampThen, stampNow)\n if ageMinutes > maxAgeMinutes:\n self.log(\"deleting old:\"+fname)\n os.remove(fname)", "def delfiles(self, max=0, min=0):\r\n # Removes the files from min up to max\r\n for i in range(max-1, min-1, -1):\r\n if i<len(self._filelist):\r\n del self._filelist[i]\r\n if i<len(self._energy):\r\n del self._energy[i]", "def delete_files(logger, folder, days=90):\n folder = Path(folder)\n parent_folder = str(folder.parent).strip(os.sep)\n base_folder = os.path.basename(parent_folder)\n count = 0\n for file in folder.glob(\"*\"):\n if not file.is_dir():\n file_modified = datetime.fromtimestamp(file.lstat().st_mtime)\n file_date = file_modified.strftime(\"%H:%M %d-%b-%Y\")\n if datetime.now() - file_modified > timedelta(days):\n count += 1\n try:\n file.unlink()\n logger.info(f\"deleted {file.name} from {base_folder}\")\n except Exception as e:\n logger.info(f\"Could not delete file! Error message:\\n{e}\")", "def clean_cache(self):\n cachedir = self.config.get('cachedir', self.CACHEDIR)\n if os.path.exists(cachedir):\n for file_name in os.listdir(cachedir):\n path = os.path.join(cachedir, file_name)\n cache_time = int(\n self.config.get(\n 'clean_cache_after',\n self.CLEAN_CACHE_AFTER\n )\n )\n max_cache_time = 60 * 60 * cache_time\n if time.time() > os.path.getmtime(path) + max_cache_time:\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(os.path.join(cachedir, file_name))", "def deleteOld(self, maxAgeMinutes=60*2):\n self.deleteOldFolder(self.downloadFolder, maxAgeMinutes)\n self.deleteOldFolder(self.thumbnailFolder, maxAgeMinutes)\n self.deleteOldFolder(self.averagesFolder, maxAgeMinutes)", "def cleanup_folder(\r\n folder_path: str,\r\n max_file_size_mb: int = 50,\r\n last_file_usage: int = 3,\r\n replace_with_info: bool = True,\r\n excluded_folders: List[str] = None,\r\n):\r\n total_cleaned_up_mb = 0\r\n removed_files = 0\r\n\r\n for dirname, subdirs, files in os.walk(folder_path):\r\n if excluded_folders:\r\n for excluded_folder in excluded_folders:\r\n if excluded_folder in subdirs:\r\n log.debug(\"Ignoring folder because of name: \" + excluded_folder)\r\n subdirs.remove(excluded_folder)\r\n for filename in files:\r\n file_path = os.path.join(dirname, filename)\r\n\r\n file_size_mb = int(os.path.getsize(file_path) / (1024.0 * 1024.0))\r\n if max_file_size_mb and max_file_size_mb > file_size_mb:\r\n # File will not be deleted since it is less than the max size\r\n continue\r\n\r\n last_file_usage_days = None\r\n if get_last_usage_date(file_path):\r\n last_file_usage_days = (\r\n datetime.now() - get_last_usage_date(file_path)\r\n ).days\r\n\r\n if last_file_usage_days and last_file_usage_days <= last_file_usage:\r\n continue\r\n\r\n current_date_str = datetime.now().strftime(\"%B %d, %Y\")\r\n removal_reason = (\r\n \"File has been removed during folder cleaning (\"\r\n + folder_path\r\n + \") on \"\r\n + current_date_str\r\n + \". \"\r\n )\r\n if file_size_mb and max_file_size_mb:\r\n removal_reason += (\r\n \"The file size was \"\r\n + str(file_size_mb)\r\n + \" MB (max \"\r\n + str(max_file_size_mb)\r\n + \"). \"\r\n )\r\n\r\n if last_file_usage_days and last_file_usage:\r\n removal_reason += (\r\n \"The last usage was \"\r\n + str(last_file_usage_days)\r\n + \" days ago (max \"\r\n + str(last_file_usage)\r\n + \"). \"\r\n )\r\n\r\n log.info(filename + \": \" + removal_reason)\r\n\r\n # Remove file\r\n try:\r\n os.remove(file_path)\r\n\r\n if replace_with_info:\r\n with open(file_path + \".removed.txt\", \"w\") as file:\r\n file.write(removal_reason)\r\n\r\n if file_size_mb:\r\n total_cleaned_up_mb += file_size_mb\r\n\r\n removed_files += 1\r\n\r\n except Exception as e:\r\n log.info(\"Failed to remove file: \" + file_path, e)\r\n\r\n log.info(\r\n \"Finished cleaning. Removed \"\r\n + str(removed_files)\r\n + \" files with a total disk space of \"\r\n + str(total_cleaned_up_mb)\r\n + \" MB.\"\r\n )", "def purge_htmlfiles(args, posts):\n htmlist = list_of_htmlfiles(args, posts)\n html_to_remove = list()\n for fullname in glob.glob(os.path.join(args.root, '*.htm*')):\n if fullname not in htmlist:\n html_to_remove.append(fullname)\n\n if len(html_to_remove) > args.thumbnails.threshold_htmlfiles:\n inpt = 'x'\n while inpt not in 'yn':\n inpt = input(f'{len(html_to_remove)} html files to remove. Continue [y|n]? ').lower()\n if inpt == 'n':\n return\n\n for name in html_to_remove:\n print('Removing html files', name)\n os.remove(name)", "def delete_files_to_insert_new_file(self, new_file_size: int):\n cumulative_files_size = 0\n for current_file_hash in self.usage_queue:\n # Find the least recently used file\n current_file_size = len(self.storage[current_file_hash])\n cumulative_files_size += current_file_size\n\n # Removes one file\n if current_file_size + self.get_free_space_size() >= new_file_size:\n self.remove_file_from_cache(current_file_hash)\n\n break\n\n # Removes multiple files\n elif cumulative_files_size + self.get_free_space_size() >= new_file_size:\n files_to_delete = [ # Get the least recently used files up to this file (including)\n file for file in itertools.islice(\n self.usage_queue,\n self.usage_queue.index(current_file_hash) + 1\n )\n ]\n for file in files_to_delete:\n self.remove_file_from_cache(file)\n break", "def cleanStamps(self, criteria):\n dirEmpty = True\n for s in self.iterStamps():\n if criteria(s):\n os.unlink(self.getFile(s))\n else:\n dirEmpty = False\n try:\n os.rmdir(self.path)\n except OSError:\n pass", "def remove_old_logs():\r\n three_days_old = dt.date.today() - dt.timedelta(days=2)\r\n three_days_ago = three_days_old.strftime('%Y%m%d')\r\n\r\n for f in os.listdir(ANCILS_DIR):\r\n if not f.startswith(('model_configs_latest.txt', 'model_configs-2019-11-02.txt')):\r\n file_date = f.strip('.txt').split('_')[2].replace(\"-\",\"\")\r\n\r\n if not file_date.endswith('01'):\r\n if int(file_date) < int(three_days_ago):\r\n cmd1 = \"git add {}\".format(os.path.join(ANCILS_DIR, f))\r\n subprocess.run(cmd1, shell=True)\r\n cmd = \"git rm -f {}\".format(os.path.join(ANCILS_DIR, f))\r\n subprocess.run(cmd, shell=True)", "def truncate(self):\n for file_name in os.listdir(self.path):\n if file_name[0:4] == 'data':\n os.remove(self.path + '/' + file_name)\n self.current_row = 0", "def main(directory=\"/images\"):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n parent_dir = os.path.dirname(dir_path)\n threshold = 5000\n start = time.time()\n num_images = 0\n for fn in os.listdir(parent_dir + directory):\n num_images += 1\n full_path = parent_dir + directory + \"/\" + fn\n if os.stat(full_path).st_size < threshold:\n os.remove(full_path)\n print(\"deleted file\", fn)\n print(\"Total files\", num_images)\n curr_time = time.time()\n print(\"Time run so far\", round(curr_time - start, \"\\n\"))\n end = time.time()\n print(\"Time to delete files:\", round(end - start), \"seconds\")\n print(\"Number of images\", num_images)", "def __purge_old_files(self):\n\n chkpts = self.checkpointer.sorted_checkpoints()\n p_chkpts = []\n e_chkpts = []\n for c in chkpts:\n if c.startswith(self.checkpointer.prefix + CheckpointingCallback.PERIODIC_PREFIX):\n p_chkpts.append(c)\n\n if c.startswith(self.checkpointer.prefix + CheckpointingCallback.EPOCH_PREFIX):\n e_chkpts.append(c)\n\n # Delete periodic checkpoints\n if self.max_files is not None and len(p_chkpts) > self.max_files:\n for c in p_chkpts[self.max_files:]:\n log.debug(\"CheckpointingCallback deleting {}\".format(c))\n self.checkpointer.delete(c)\n\n # Delete older epochs\n if self.max_epochs is not None and len(e_chkpts) > self.max_epochs:\n for c in e_chkpts[self.max_epochs:]:\n log.debug(\"CheckpointingCallback deleting (epoch) {}\".format(c))\n self.checkpointer.delete(c)", "def _remove_old_items(self):\n if self.size_limit is not None:\n while len(self) > self.size_limit:\n self.popitem(last=False)", "def remove_old_files(filelist):\n\n for filename in filelist:\n if path.exists(filename):\n try:\n remove(filename)\n print \"%s deleted\" % filename \n except Exception: #TODO Exception spesifik.\n stderr.write(\"%s cannot remove. Please check your priviledge\\n\"\n % filename)\n exit(1)", "def _cleanup(self, fnum):\n while os.path.exists('%s.%s' % (self.name, fnum)):\n try:\n fname = '%s.%s' % (self.name, fnum)\n os.unlink(fname)\n # self.log.debug(\"Cleaned up file: %s\", fname)\n except:\n pass\n fnum -= 1", "def delete_file(file):\n for _ in range(10):\n print(\"deleting\", file)\n try:\n if path.isfile(file):\n remove(file)\n except Exception as error:\n sleep(5)\n print('Delete failed, retrying...', error)\n else:\n break", "def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")" ]
[ "0.7052211", "0.69445324", "0.68056613", "0.6585461", "0.65334916", "0.6289249", "0.62429726", "0.6144015", "0.5986444", "0.59845823", "0.59236914", "0.58389485", "0.581854", "0.5799415", "0.5794532", "0.57340544", "0.5671793", "0.56483847", "0.5646341", "0.56375355", "0.5629638", "0.55624217", "0.5542746", "0.5523278", "0.5518025", "0.55058193", "0.5497412", "0.5496283", "0.5491605", "0.54903185" ]
0.7327303
0
Generate input file for ANN
def genInput(tasksInfo, results, origLocation, destLocation, typeGray, samplSize = 0.10): print '\nbegin of genInput\n' # Training / aplication treina = True verdade = False # Sampling pixels from image sampl = True if sampl == True: buildSampl = True else: buildSampl = False # Write data to file if treina: outInput = open('trainInput.dat', 'w') #~ outInput1par = open('trainInput1par.dat', 'w') outOutput = open('trainOutput.dat', 'w') outTasks = open('trainTasks.dat', 'w') #~ outOutputClass = open('trainOutputClass.dat', 'w') selecOut = open('selected.dat', 'w') else: outInput = open('aplicInput.dat', 'w') outTasks = open('aplicTasks.dat', 'w') #~ outInput1par = open('aplicInput1par.dat', 'w') if verdade: outOutput = open('verdadeOutput.dat', 'w') #~ outOutputClass = open('verdadeOutputClass.dat', 'w') #Setting info on temporary directory for images numberImages = 12 tmpImg = [] for i in range(numberImages): tmpImg.append(destLocation+"tmpImg_n"+str(i+1).zfill(2)+"/") imgFile = [] imgFile.append('2011352') imgFile.append('2011353') imgFile.append('2011355') imgFile.append('2011357') imgFile.append('2011358') imgFile.append('2011359') imgFile.append('2011360') imgFile.append('2011361') imgFile.append('2011362') imgFile.append('2011363') imgFile.append('2011364') imgFile.append('2011365') #If we need to skip line finishLine = True #Getting number of tasks numberTasks = len(tasksInfo) print 'number of tasks: ', numberTasks for task in range(numberTasks): #Geting the selected day for each task taskId = tasksInfo[task]['taskId'] for img in range(numberImages): imgName = tmpImg[img] + str(taskId) + '.tif' #Openning image (and testing) if os.path.exists(imgName) is False: print 'INPUT -> Task miss: ' + str(taskId) + ' Image: ' + str(img) + ' Name: ' + imgName finishLine = False continue print 'INPUT -> Task: ' + str(taskId) + ' Image: ' + str(img) fileSat = gdal.Open(imgName, GA_ReadOnly) if fileSat is None: print 'Could not open ' + imgName sys.exit(1) # Read band values from image rows = fileSat.RasterYSize cols = fileSat.RasterXSize R_band_sat = fileSat.GetRasterBand(1) G_band_sat = fileSat.GetRasterBand(2) B_band_sat = fileSat.GetRasterBand(3) R_data_sat = R_band_sat.ReadAsArray(0, 0, cols, rows) G_data_sat = G_band_sat.ReadAsArray(0, 0, cols, rows) B_data_sat = B_band_sat.ReadAsArray(0, 0, cols, rows) #Closing image fileSat = None #If we are sampling the image, then we'll pick our samples print 'sampl: ', sampl print 'buildSampl: ', buildSampl if ((sampl == True) and (buildSampl == True)): universe = [] samplList = [] random.seed(8225) for i in range(rows): for j in range(cols): universe.append([i,j]) sizeUniverse = len(universe) samplSizeInt = int(samplSize * sizeUniverse) print 'Sampling mode activated.' print 'Using ', samplSizeInt, ' out of ', sizeUniverse, ' pixels.' for i in range(samplSizeInt): samplList.append(universe.pop(random.randint(0,len(universe)-1))) buildSampl = False sumValueGray = 0.0 if (sampl == False): #Working with the values for i in range(rows): for j in range(cols): #~ valueString = str(float(R_data_sat[i,j])/255.0)+' '+str(float(G_data_sat[i,j])/255.0)+' '+str(float(B_data_sat[i,j])/255.0) valueGray = rgb2gray((float(R_data_sat[i,j])/255.0),(float(G_data_sat[i,j])/255.0),(float(B_data_sat[i,j])/255.0),typeGray) sumValueGray = sumValueGray + valueGray valueString = str(taskId)+' '+str(valueGray) #~ outInput.write("%s "%valueString) sumValueString = str(taskId)+' '+str(sumValueGray/(rows*cols)) #~ outInput1par.write("%s "%sumValueString) outInput.write("%s "%sumValueString) else: #Working with the sampled values for idx in range(samplSizeInt): i = samplList[idx][0] j = samplList[idx][1] valueGray = rgb2gray((float(R_data_sat[i,j])/255.0),(float(G_data_sat[i,j])/255.0),(float(B_data_sat[i,j])/255.0),typeGray) sumValueGray = sumValueGray + valueGray valueString = str(valueGray) #~ outInput.write("%s "%valueString) sumValueString = str(sumValueGray/samplSizeInt) #~ outInput1par.write("%s "%sumValueString) outInput.write("%s "%sumValueString) #If we did not had a problem with missing task if finishLine == True: #Closing the line of the file outInput.write("\n") #~ outInput1par.write("\n") outTasks.write(str(taskId)+"\n") else: finishLine = True #If we are training (or we know the truth), then we also generate the truth if treina or verdade: selecName = '/home/eduardo/ForestWatchers/ann2besttile/results/tmpMosaic_n0/' + str(taskId) + '.tif' #Openning image (and testing) if os.path.exists(selecName) is False: print 'OUTPUT -> Task miss: ' + str(taskId) continue #~ fileSelec = gdal.Open(selecName, GA_ReadOnly) #~ if fileSelec is None: #~ print 'Could not open ' + selecName #~ sys.exit(1) #~ # Read band values from image #~ rows = fileSelec.RasterYSize #~ cols = fileSelec.RasterXSize #~ R_band_selec = fileSelec.GetRasterBand(1) #~ G_band_selec = fileSelec.GetRasterBand(2) #~ B_band_selec = fileSelec.GetRasterBand(3) #~ R_data_selec = R_band_selec.ReadAsArray(0, 0, cols, rows) #~ G_data_selec = G_band_selec.ReadAsArray(0, 0, cols, rows) #~ B_data_selec = B_band_selec.ReadAsArray(0, 0, cols, rows) #~ #Closing image #~ fileSelec = None #~ #~ if (sampl == False): #~ #Working with the values #~ for i in range(rows): #~ for j in range(cols): #~ valueGray = rgb2gray((float(R_data_selec[i,j])/255.0),(float(G_data_selec[i,j])/255.0),(float(B_data_selec[i,j])/255.0),'gleam') #~ valueString = str(valueGray) #~ outOutput.write("%s "%valueString) #~ else: #~ #Working with the values #~ for idx in range(samplSizeInt): #~ i = samplList[idx][0] #~ j = samplList[idx][1] #~ valueGray = rgb2gray((float(R_data_selec[i,j])/255.0),(float(G_data_selec[i,j])/255.0),(float(B_data_selec[i,j])/255.0),'gleam') #~ valueString = str(valueGray) #~ outOutput.write("%s "%valueString) #~ #~ #Closing line of the file #~ outOutput.write("\n") selectedTile = results[task].index(max(results[task])) if selectedTile == 0: selectedName = str(taskId) + ' 2011352' selectedFile = '1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0' elif selectedTile == 1: selectedName = str(taskId) + ' 2011353' selectedFile = '0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0' elif selectedTile == 2: selectedName = str(taskId) + ' 2011355' selectedFile = '0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0' elif selectedTile == 3: selectedName = str(taskId) + ' 2011357' selectedFile = '0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0' elif selectedTile == 4: selectedName = str(taskId) + ' 2011358' selectedFile = '0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0' elif selectedTile == 5: selectedName = str(taskId) + ' 2011359' selectedFile = '0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0' elif selectedTile == 6: selectedName = str(taskId) + ' 2011360' selectedFile = '0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0' elif selectedTile == 7: selectedName = str(taskId) + ' 2011361' selectedFile = '0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0' elif selectedTile == 8: selectedName = str(taskId) + ' 2011362' selectedFile = '0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0' elif selectedTile == 9: selectedName = str(taskId) + ' 2011363' selectedFile = '0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0' elif selectedTile == 10: selectedName = str(taskId) + ' 2011364' selectedFile = '0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0' elif selectedTile == 11: selectedName = str(taskId) + ' 2011365' selectedFile = '0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0' #~ outOutputClass.write("%s\n"%selectedFile) outOutput.write("%s\n"%selectedFile) selecOut.write("%s\n"%selectedName) # Close files outInput.close() outTasks.close() #~ outInput1par.close() if treina or verdade: outOutput.close() #~ outOutputClass.close() selecOut.close() statusGenInput = 0 print '\nend of genInput\n' return statusGenInput
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_train_txt(name, path):\n with open(path + '/test.txt', 'a') as file:\n file.write('/content/YOLO_metric/data/obj/' + name + '\\n')", "def train(self, trainfile):", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def make(input_filepath, output_filepath) -> None:\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def create_input_file(self, polymer_identifier, format, outpath):\n\n\t\tsmiles = self.get_smiles_from_identifier(polymer_identifier)\n\t\t\n\t\tresult = generate_input_files(smiles, format)\n\t\twith open(outpath, 'w+') as f:\n\t\t\tf.write(result)", "def _generate_input_file(self):\n if self.input_static:\n return\n\n if self._input_generator_name is None:\n logger.error(\"A testcase has neither a generator nor a static input\")\n self.input_generation_log = \"Generation failed. No generator specified.\"\n self.input_generation_successful = False\n elif self._input_generator is None:\n self.input_generation_log = \"Generation failed. Generator {} not found\".format(\n self._input_generator_name,\n )\n self.input_generation_successful = False\n else:\n generation_command = get_execution_command(self._input_generator.source_language, \"generator\")\n generation_command.extend(shlex.split(self._input_generation_parameters))\n stdout_redirect = \"output.txt\"\n\n try:\n generator_compiled = self._input_generator.compiled_file\n except:\n self.input_generation_log = \"Generation failed. Generator didn't compile. Log: {}\".format(\n self._input_generator.last_compile_log\n )\n self.save()\n return\n\n action = ActionDescription(\n commands=[generation_command],\n executables=[(\"generator\", generator_compiled)],\n stdout_redirect=stdout_redirect,\n output_files=[stdout_redirect],\n time_limit=settings.FAILSAFE_TIME_LIMIT,\n memory_limit=settings.FAILSAFE_MEMORY_LIMIT\n )\n success, execution_success, outputs, sandbox_datas = execute_with_input(action)\n if not success:\n logger.error(\"Generating input for testcase {} failed.\\n Sandbox data:\\n{}\".format(\n str(self),\n str(sandbox_datas[0]))\n )\n self.input_generation_log = \\\n \"System failed to generate the input. \" \\\n \"Check the logs for more details. \" \\\n \"This issue must be resolved by a system administrator\"\n self.input_generation_successful = False\n elif not execution_success:\n self.input_generation_log = \"Generation failed. {}.\".format(\n str(sandbox_datas[0])\n )\n self.input_generation_successful = False\n else:\n self._input_generated_file = outputs[stdout_redirect]\n self.input_generation_log = \"Generation successful.\"\n self.input_generation_successful = True\n self.save()", "def create_training_file(D_RAT):\r\n return create_arff_file(D_RAT, 0)", "def build_input_file(self, replica):\n\n file_name = self.inp_basename + \"_\" + \\\n str(replica.id) + \"_\" + \\\n str(replica.cycle) + \".md\"\n\n fo = open(file_name, \"wb\")\n for i in range(1,500):\n fo.write(str(random.randint(i, 500) + i*2.5) + \" \");\n if i % 10 == 0:\n fo.write(str(\"\\n\"));\n fo.close()", "def createInput(dirPath,gSettings):\n \n with open(os.path.join('../in','input.txt')) as f:\n inpFile = f.readlines()\n \n\n # Model settings\n model = gSettings[\"Model\"]\n inpFile[13] = \"insgrav: {:1d}\\n\".format(int(model[\"NS gravity\"][\"Flag\"]))\n inpFile[14] = \"isun: {:1d}\\n\".format(int(model[\"Lunisolar\"][\"Sun\"]))\n inpFile[15] = \"imoon: {:1d}\\n\".format(int(model[\"Lunisolar\"][\"Moon\"]))\n\n if model[\"Drag\"][\"Flag\"] == False:\n inpFile[16] = \"idrag: 0\\n\"\n else:\n dm = model[\"Drag\"][\"Model\"].lower()\n if dm == \"wertz\":\n idrag = 1\n elif dm == \"us76\":\n idrag = 2\n elif dm == \"j77\":\n idrag = 3\n elif dm == \"msis00\":\n idrag = 4\n else:\n raise ValueError('Value \"' + model[\"Drag\"][\"Model\"] + '\" invalid.')\n inpFile[16] = \"idrag: {:1d}\\n\".format(idrag)\n if model[\"Drag\"][\"Solar flux\"].lower() == \"constant\":\n inpFile[17] = \"iF107: 0\\n\"\n elif model[\"Drag\"][\"Solar flux\"].lower() == \"variable\":\n inpFile[17] = \"iF107: 1\\n\"\n else:\n raise ValueError('Value \"' + model[\"Drag\"][\"Solar flux\"] + '\" invalid.')\n\n if model[\"SRP\"][\"Flag\"] == False:\n inpFile[18] = \"iSRP: {:1d}\\n\".format(int(model[\"SRP\"][\"Flag\"]))\n else:\n inpFile[18] = \"iSRP: {:1d}\\n\".format(int(model[\"SRP\"][\"Flag\"]))\n if model[\"SRP\"][\"Eclipses\"]:\n inpFile[18] = \"iSRP: 2\\n\"\n \n if model[\"Lunisolar\"][\"Ephemerides\"] == \"DE431\":\n inpFile[19] = \"iephem: 1\\n\"\n elif model[\"Lunisolar\"][\"Ephemerides\"] == \"Meeus\":\n inpFile[19] = \"iephem: 2\\n\"\n else:\n raise ValueError('Value \"' + model[\"Lunisolar\"][\"Ephemerides\"] + '\" invalid.')\n \n inpFile[20] = \"gdeg: {:3d}\\n\".format(model[\"NS gravity\"][\"Degree\"])\n if model[\"NS gravity\"][\"Order\"] <= model[\"NS gravity\"][\"Degree\"]:\n inpFile[21] = \"gord: {:3d}\\n\".format(model[\"NS gravity\"][\"Order\"])\n else:\n raise ValueError(\"Order {0:d} of the gravity field is greater than degree {1:d}\".format(model[\"NS gravity\"][\"Order\"],model[\"NS gravity\"][\"Degree\"]))\n \n\n\n # Integration settings\n integ = gSettings[\"Integration\"]\n inpFile[29] = \"tol: {:22.15E}\\n\".format(integ[\"Tolerance\"])\n inpFile[30] = \"tspan: {:22.15E}\\n\".format(integ[\"Duration\"] * 365.25)\n inpFile[31] = \"tstep: {:22.15E}\\n\".format(integ[\"Step\"])\n inpFile[39] = \"eqs: {:2d}\\n\".format(integ[\"Equations\"])\n\n\n\n # Output settings\n inpFile[44] = \"verb: 0\\n\"\n inpFile[45] = \"out: \" + os.path.abspath(os.path.join(dirPath, ' '))\n\n\n with open(os.path.join(dirPath,'input.txt'),'w') as f:\n f.writelines(inpFile)", "def create_inputs_recipe():\n module_name, _ = os.path.splitext(os.path.basename(__file__))\n path = os.path.join(CREATED_INPUTS_PATH_FOR_TESTS, module_name)\n os.makedirs(path, exist_ok=True)\n os.chdir(path)\n os.makedirs(\"inputs/\", exist_ok=True)\n print('Current working directory:\\n {:s}'.format(os.getcwd()))\n\n for filename, _ in input_pars:\n print('Downloading files...')\n basename = filename.split(\"_\")[0] + \".fits\"\n sci_path = download_from_archive(basename)\n sci_ad = astrodata.open(sci_path)\n data_label = sci_ad.data_label()\n\n print('Reducing pre-processed data:')\n logutils.config(file_name='log_{}.txt'.format(data_label))\n p = GNIRSLongslit([sci_ad])\n p.prepare(bad_wcs=\"fix\")\n p.addDQ()\n p.addVAR(read_noise=True)\n p.ADUToElectrons()\n p.addVAR(poisson_noise=True)\n # p.flatCorrect()\n p.makeIRAFCompatible()\n\n os.chdir(\"inputs/\")\n processed_ad = p.writeOutputs().pop()\n os.chdir(\"../\")\n print('Wrote pre-processed file to:\\n'\n ' {:s}'.format(processed_ad.filename))", "def create_input_files(self, datasets_dict):\n ifname = self.keywords['inputfile']\n dirstem = os.path.dirname(ifname)\n basename = os.path.basename(ifname).split('.')[0]\n createdfiles=list()\n if dirstem == \"\":\n dirstem = os.getcwd()\n dkeys = datasets_dict.keys()\n dkeys.sort()\n dct=1\n for didx in dkeys:\n newfile = MASTFile()\n newfile.data = list(datasets_dict[didx])\n newname=\"%s/loop_%s_%s.inp\" % (dirstem, basename, str(dct).zfill(2))\n newfile.to_file(newname)\n #createdfiles.append(os.path.basename(newname))\n createdfiles.append(newname)\n dct=dct+1\n return createdfiles", "def generate(train_data_path, trained_model_path, num_output_files):\n # load the notes used to train the model\n\n train_data = data_preprocess.load_from_pickle(train_data_path)\n training_notes = train_data[\"data\"]\n note_translator = train_data[\"note_translator\"]\n\n net = networks.TransformerNet.load_checkpoint(trained_model_path)\n\n for i in range(num_output_files):\n prediction_output = generate_notes(net, training_notes, note_translator)\n create_midi(prediction_output, file_suffix=i)", "def but_pred(self):\n if not self.nn_obj and not self.path:\n tk.messagebox.showerror(\"Error\", \"Open file and create NN\")\n return\n elif not self.nn_obj:\n tk.messagebox.showerror(\"Error\", \"Create NN\")\n return\n elif not self.path:\n tk.messagebox.showerror(\"Error\", \"Open file first\")\n return\n out=pred(self.nn_obj, self.nn_in)\n if platform == \"linux\" or platform == \"linux2\":\n path=tk.filedialog.asksaveasfilename(filetypes = [('Prediction file','.txt')])\n elif platform == \"win32\":\n path=tk.filedialog.asksaveasfilename(filetypes = [('Prediction file','.txt')], defaultextension=\"*.*\")\n else:\n path=tk.filedialog.asksaveasfilename(filetypes = [('Prediction file','.txt')])\n np.savetxt(path, np.c_[np.array(self.nn_in), out], fmt='%1.3f')", "def generate_inputs_files(dataset_name='mit67', graph=None, one_hot_labels_list=None, bottlenecks=None):\n graph.write_graphmlz(join(DATA_DIR, 'graph.net'))\n indices = [i for i in range(len(one_hot_labels_list))]\n\n y = []\n for one_hot in one_hot_labels_list:\n y.append(one_hot_to_label(one_hot=one_hot))\n\n X = []\n for k, i in enumerate(indices):\n # compare_labels(bottleneck_file=bottlenecks[i], other=y[k])\n # Example: 'Home*winecellar*wine_storage_42_02_altavista.jpg.txt\n if not y[k] == bottlenecks[i].split('*')[0]:\n raise Exception('Feature representation not matching with one-hot representation')\n\n filename = bottlenecks[i].split('*')[1] + '*' +bottlenecks[i].split('*')[2]\n bottlenecks_values = load_bottleneck_values(bottlenecskpath=BOTTLENECK_PATH,\n bottleneck_file=filename)\n for values in bottlenecks_values:\n X.append(values)\n\n allx, tx, ally, ty, allx_indices, X_test_indices = train_test_split(X, y, indices, stratify=y,\n test_size=TESTING_PERCENTAGE)\n\n ally = [global_class_to_one_hot(global_class=ally_) for ally_ in ally]\n labels = graph.vs['label']\n verify_labels_order(graph_labels=labels, y_test_labels=ty, x_test_indices=X_test_indices)\n ty = [global_class_to_one_hot(global_class=ty_) for ty_ in ty]\n\n allx_indices = [i for i in range(len(allx_indices))]\n # x e y are samples with labels from training data\n # x_ e y_ are samples with no labels from training data\n x_, x, y_, y, x_train_indices, x_test_indices = train_test_split(allx, ally, allx_indices, stratify=ally,\n test_size=TESTING_PERCENTAGE)\n x = sparse.csr_matrix(x)\n tx = sparse.csr_matrix(tx)\n allx = sparse.csr_matrix(allx)\n y = np.array(y)\n ty = np.array(ty)\n ally = np.array(ally)\n\n save_object(file_name=join(DATA_DIR, 'ind.' + dataset_name + '.x'), object_=x)\n save_object(file_name=join(DATA_DIR, 'ind.' + dataset_name + '.tx'), object_=tx)\n save_object(file_name=join(DATA_DIR, 'ind.' + dataset_name + '.allx'), object_=allx)\n save_object(file_name=join(DATA_DIR, 'ind.' + dataset_name + '.y'), object_=y)\n save_object(file_name=join(DATA_DIR, 'ind.' + dataset_name + '.ty'), object_=ty)\n save_object(file_name=join(DATA_DIR, 'ind.' + dataset_name + '.ally'), object_=ally)\n save_object(file_name=join(DATA_DIR, 'ind.' + dataset_name + '.test.index'), object_=X_test_indices)\n save_graph_as_dict(graph=graph)", "def generate_files(input_path, output_path):\n profiles = pd.read_csv(f'{input_path}/Profile/Profile.csv')\n\n # call all predictors\n predicted = {f: pr.all_predictors[f].predict(profiles=profiles, base_folder=input_path)\n for f in pr.features if f in pr.all_predictors}\n\n for i, (index, row) in enumerate(profiles.iterrows()):\n file = open(os.path.join(output_path, f'{row[\"userid\"]}.xml'), 'w')\n gender = pr.get_gender(predicted['gender'][row[1]]) if row[1] in predicted['gender'] else predicted['age_group_alternative'][row[1]]\n open_ = predicted['open'][row[1]] if row[1] in predicted['open'] else 'null'\n conscientious = predicted['conscientious'][row[1]] if row[1] in predicted['conscientious'] else 'null'\n extrovert = predicted['extrovert'][row[1]] if row[1] in predicted['extrovert'] else 'null'\n agreeable = predicted['agreeable'][row[1]] if row[1] in predicted['agreeable'] else 'null'\n neurotic = predicted['neurotic'][row[1]] if row[1] in predicted['neurotic'] else 'null'\n age = predicted['age_group'][row[1]] if row[1] in predicted['age_group'] else 'null'\n\n file.write(f'<user id=\"{row[1]}\" ')\n file.write(f'age_group=\"{age}\" ')\n file.write(f'gender=\"{gender}\" ')\n file.write(f'extrovert=\"{extrovert}\" ')\n file.write(f'neurotic=\"{neurotic}\" ')\n file.write(f'agreeable=\"{agreeable}\" ')\n file.write(f'open=\"{open_}\" ')\n file.write(f'conscientious=\"{conscientious}\" />')\n file.close()", "def genInputFile(self, xdata, ydata):\n\n assert xdata.shape == ydata.shape, 'data shapes not the same'\n\n # build the string to save\n s = ' Interface input file\\n'\n\n for key, value in self.params.get(xdata).items():\n if type(value) is dict:\n for k, v in value.items():\n if v is not None:\n s += getParamString(key, k, v)\n\n elif value is not None:\n s += getParamString(key, '', value)\n\n s += \" END\\r\\n\"\n\n # write data\n\n NYstr = \" \" + \"{:6}\".format(\"NY\") + \"{:5}\".format(len(xdata))\n\n s += NYstr + \"\\r\\n\"\n\n data = np.concatenate((xdata, ydata))\n\n for i, c in enumerate(data):\n s += \"{:>11.4E}\".format(c) + '\\r\\n'\n\n with open(self.inputfile, 'w+') as of:\n of.write(s)", "def create_file(self):\n dir = os.path.join(str(Path.home()), \".data\")\n if(path.isdir(dir)):\n pass\n else:\n os.mkdir(dir)\n os.chdir(dir)\n\n if(self.lang == \"PYTHON\"):\n file = open(self.id+\".py\", \"w\")\n file.write(self.code)\n file.close()\n elif(self.lang == \"C\"):\n file = open(self.id+\".c\", \"w\")\n file.write(self.code)\n file.close()\n elif(self.lang == 'CPP'):\n file = open(self.id+\".cpp\", \"w\")\n file.write(self.code)\n file.close()\n elif(self.lang == 'JAVA'): \n file = open(self.id+\".java\", \"w\")\n file.write(self.code)\n file.close()\n elif(self.lang==\"JS\"):\n file = open(self.id+\".js\", \"w\")\n file.write(self.code)\n file.close()\n\n file = open(self.id+\"-input.txt\", \"w\")\n file.write(self.input)\n file.close()", "def createInputFile(self):\r\n\r\n input_variables = []\r\n\r\n for variable in self.modelDescription.modelVariables:\r\n if variable.causality == 'input':\r\n input_variables.append(variable)\r\n\r\n if len(input_variables) == 0:\r\n QMessageBox.warning(self,\r\n \"Cannot create input file\",\r\n \"The input file cannot be created because the model has no input variables\")\r\n return\r\n\r\n filename, _ = os.path.splitext(self.filename)\r\n\r\n filename, _ = QFileDialog.getSaveFileName(parent=self,\r\n caption=\"Save Input File\",\r\n directory=filename + '_in.csv',\r\n filter=\"Comma Separated Values (*.csv);;All Files (*.*)\")\r\n\r\n if not filename:\r\n return\r\n\r\n with open(filename, 'w') as f:\r\n\r\n # column names\r\n f.write('\"time\"')\r\n for variable in input_variables:\r\n f.write(',\"%s\"' % variable.name)\r\n f.write('\\n')\r\n\r\n # example data\r\n f.write(','.join(['0'] * (len(input_variables) + 1)) + '\\n')\r\n\r\n self.ui.inputFilenameLineEdit.setText(filename)", "def main():\n filepath = input(\"Enter the Source File: \")\n with open(filepath, encoding=\"utf-8\") as f:\n sentences = f.readlines()\n sentences = \" \".join(sentences)\n\n summary = summarize_sentences(sentences)\n\n filepath_index = filepath.find(\".txt\")\n outputpath = filepath[:filepath_index] + \"_lexRank.txt\"\n\n with open(outputpath, \"w\") as w:\n for sentence in summary:\n w.write(str(sentence) + \"\\n\")", "def generate():", "def main(input_filepath: str = \"./data\",\n output_filepath: str = \"./data\") -> None:\n logger = logging.getLogger(__name__)\n logger.info(\"making final data set from raw data\")\n\n raw_data_dir = path.abspath(input_filepath)\n if path.isdir(raw_data_dir):\n\n processed_data_dir = path.abspath(output_filepath)\n\n logger.info(\"start\")\n filenames = [\"train.txt\", \"valid.txt\", \"test.txt\"]\n create_index(filenames, raw_data_dir, processed_data_dir)\n prepare_datasets(filenames, raw_data_dir, processed_data_dir)\n\n else:\n logger.info(\"File or directory does not exist\")\n\n logger.info(\"finished\")", "def __init__(self, input_file):\r\n self.input_file = input_file\r\n self.no_process = 0\r\n self.ids = []\r\n self.weights = []", "def test_input_target_file(self):\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_target_file\"\n params[\"input_target\"] = \"file\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n files = list_files_folder(params[\"input\"], ext=\"fna.gz\")\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")", "def generate_nature(out):\n\n check_path(\n os.path.join(out, BASE),\n f\"The script should automatically generate this file. Something went wrong in the last step.\")\n\n nature = os.path.join(out, NATURE)\n shutil.copytree(\"evaluation/ablation/nature\", nature)\n shutil.copy(\"evaluation/src/utils.h\", nature)", "def _create_generate_input(self):\n self.keep_prob = 1.", "def generate_input_file(temp_type, out_file):\r\n\r\n file_path = os.path.realpath(__file__)\r\n dir_path = os.sep.join(file_path.split(os.sep)[:-1])\r\n\r\n if temp_type == 0:\r\n template = 'Template00_CompleteParameters.py'\r\n elif temp_type == 1:\r\n template = 'Template01_SingleRowCylindricalRollerBearing.py'\r\n elif temp_type == 3:\r\n template = 'Template03_CylindricalRollerThustBearing.py'\r\n elif temp_type == 4:\r\n template = 'Template04_BallOnDisk.py'\r\n elif temp_type == 5:\r\n template = 'Template05_PinOnDisk.py'\r\n elif temp_type == 6:\r\n template = 'Template06_4Ball.py'\r\n elif temp_type == 7:\r\n template = 'Template07_BallOn3Plates.py'\r\n elif temp_type == 8:\r\n template = 'Template08_RingOnRing.py'\r\n else:\r\n raise ValueError(\"temp_type value '{}' undefined\".format(temp_type))\r\n\r\n shutil.copy(os.sep.join([dir_path, 'UserInputTemplates', template]),\r\n out_file)\r\n return out_file", "def template_train_model(task_filename):\n task_filename_only = os.path.basename(task_filename)\n return {\n 'basename': 'generate_tasks',\n 'task_dep': ['generate_job_batch'],\n 'name': task_filename_only,\n #'file_dep': [task_filename], # does not work if mv\n 'targets': ['tests/'+task_filename_only],\n 'actions': [\n 'python pipeline_train_model.py '+task_filename,\n #'rm '+task_filename\n ],\n }", "def write_input(self):\n # load template, substitute parameters and write input file\n self.homog_core()\n self.write_mat_string()\n input_tmpl = open('base_input.txt')\n templ = Template(input_tmpl.read())\n file_string = templ.substitute(cool_frac = self.vfrac_cermet,\n r_core = self.r,\n core_z = self.z,\n r_refl = self.r + self.refl_t,\n refl_min = -self.refl_t,\n refl_max = self.z + self.refl_t,\n fuel_string = self.fuel_string,\n fuel_rho = self.rho,\n fuel_vol = self.core_vol,\n refl_vol = self.core_vol,\n thermal_power = self.Q_therm)\n # write the file\n filename = 'r_{0}_{1}.i'.format(round(self.vfrac_cermet, 3), \n round(self.r, 3))\n ifile = open(filename, 'w')\n ifile.write(file_string)\n ifile.close()\n\n return filename", "def main():\n if len(sys.argv) < 3:\n message = \"\"\"\n Usage: python generate_dataset.py <dataset_name> <number of files> <size of each file in bytes>\n \"\"\"\n print(message)\n sys.exit(0)\n dataset_name = sys.argv[1]\n file_number = int(sys.argv[2])\n file_size = int(sys.argv[3])\n\n if not os.path.exists(dataset_name):\n os.makedirs(dataset_name)\n\n for i in range(file_number):\n tmp_file = open('./' + dataset_name + '/' + dataset_name + '.file' + str(i), 'w+')\n tmp_file.write(os.urandom(file_size))\n tmp_file.close()" ]
[ "0.6576268", "0.6575241", "0.65125704", "0.65125704", "0.6435176", "0.6422982", "0.6407261", "0.6396108", "0.6367985", "0.63629234", "0.6240938", "0.6206771", "0.61674213", "0.6161097", "0.61383295", "0.6135501", "0.6114891", "0.60825133", "0.6082016", "0.6017317", "0.6014352", "0.60089856", "0.60066736", "0.5943773", "0.5940146", "0.59360844", "0.59056103", "0.5868209", "0.58428", "0.58278817" ]
0.66078776
0
Converts RGB values to gray scale depending on type of convertion
def rgb2gray(R,G,B,T): gammaCorrection = (1.0/2.2) if T == 'intensity': grayValue = (R+G+B)/3.0 elif T == 'gleam': grayValue = (R**(gammaCorrection)+G**(gammaCorrection)+B**(gammaCorrection))/3.0 elif T == 'luminance': grayValue = 0.3*R + 0.59*G + 0.11*B elif T == 'luma': grayValue = 0.2126*R**(gammaCorrection) + 0.7152*G**(gammaCorrection) + 0.0722*B**(gammaCorrection) elif T == 'lightness': Y = 0.2126*R + 0.7152*G + 0.0722*B if Y > ((6.0/29)**3): fY = Y**(1.0/3.0) else: fY = (1.0/3.0)*((29.0/6.0)**2)*Y + (4.0/29.0) grayValue = (116.0*fY-16.0)/100.0 elif T == 'value': grayValue = max(R,G,B) elif T == 'luster': grayValue = (max(R,G,B)+min(R,G,B))/2.0 else: print 'ERROR: Type of gray scale convertion not recognized!' sys.exit(1) return grayValue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rgb2gray(rgb):\n # this will translate a uint8 array into a float64 one\n grey = np.dot(rgb[..., :3], [0.299, 0.587, 0.114])\n # transform back if the input is a uint8 array\n if rgb.dtype.type is np.uint8:\n grey = round(grey).astype(np.uint8)\n return grey", "def convertImageToGrey(self):\n self.cnvImgTest.convertToGreyscale()", "def greyscale(c):\n return desaturate(c, 1)", "def greyScaleConversion(frame):\n return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)", "def rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])", "def make_grayscale(self):\n \n lum = self.luminance()\n# self.r = lum\n# self.g = lum\n# self.b = lum\n\n # Instead, we can call them method we already defined for setting RGB\n self.set_rgb(lum, lum, lum)", "def convert_grayscale(self):\n return self.image.convert(\"L\")", "def rgb2gray(rgb):\n gray = np.dot(rgb[...,:3], [0.2125, 0.7154, 0.0721])\n return gray.astype('float32')", "def convert_to_grey_scale(image):\n out = None\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n out = color.rgb2gray(image)\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def rgb2gray(rgb):\n return np.dot(rgb[..., :3], [0.299, 0.587, 0.144])", "def rgb2gray(data):\n grey = np.expand_dims(np.dot(data, [0.2990, 0.5870, 0.1140]), axis=3)\n return grey", "def convert_to_grayscale(image_rgb):\n red, green, blue = image_rgb[:, :, 0], image_rgb[:, :, 1], image_rgb[:, :, 2]\n image_gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue\n return image_gray", "def rgb2gray(img):\r\n return 0.2989 * img[..., 0] + 0.587 * img[..., 1] + 0.114 * img[..., 2]", "def convert_to_gray_scale(img):\r\n #reading image\r\n im = Image.open(\"filename\")\r\n\r\n if im.mode != \"L\":\r\n im = im.convert(\"L\")\r\n\r\n return img", "def make_grayscale(img: Image.Image) -> Image.Image:\n orig_mode = img.mode\n\n if orig_mode in [\"RGB\", \"CMYK\", \"YCbCr\", \"LAB\", \"HSV\"]:\n return img.convert(\"L\")\n elif orig_mode == \"RGBA\":\n return img.convert(\"LA\").convert(\"RGBA\")\n elif orig_mode == \"P\":\n # Using ITU-R 601-2 luma transform: L = R * 299/1000 + G * 587/1000 + B * 114/1000\n pal = img.getpalette()\n for i in range(len(pal) // 3):\n # Using ITU-R 601-2 luma transform\n gray = (pal[3 * i] * 299 + pal[3 * i + 1] * 587 + pal[3 * i + 2] * 114)\n gray = gray // 1000\n pal[3 * i: 3 * i + 3] = [gray, gray, gray]\n img.putpalette(pal)\n return img\n else:\n return img", "def getGrayscaleImage(imageRGB):\n return color.rgb2gray(imageRGB)", "def sRGBGrayscale(x):\n rellum=sRGBLuminance(x)\n return [rellum,rellum,rellum]", "def convert_when_colour(colour, img):\n if len(colour) == 3:\n if len(img.shape) == 2:\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n elif img.shape[2] == 1:\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n return img", "def togray(self,value):\n (red,green,blue) = self.unpack_value(value)\n \n gray = []\n for i in xrange(1024):\n graypx = (0.299*float(red[i]) + 0.587*float(green[i]) +\n 0.114*float(blue[i]))/255.\n gray.append(graypx)\n \n return gray", "def convert_to_grayscale(im_as_arr):\n grayscale_im = np.sum(np.abs(im_as_arr), axis=0)\n im_max = np.percentile(grayscale_im, 99)\n im_min = np.min(grayscale_im)\n grayscale_im = (np.clip((grayscale_im - im_min) / (im_max - im_min), 0, 1))\n grayscale_im = np.expand_dims(grayscale_im, axis=0)\n return grayscale_im", "def convert_to_gray(img):\n # split pixel into different b g r colors\n b, g, r = cv2.split(img)\n # calculate the gray color\n gray = 0.3 * r + 0.59 * g + 0.11 * b\n return gray", "def rgb2gray(images):\n return np.expand_dims(np.dot(images, [0.2989, 0.5870, 0.1140]), axis=3)", "def rgb2grayscale(image):\r\n\r\n assert image.ndim == 3 and image.shape[2] == 3\r\n\r\n gray_image = np.dot(image, [0.2989, 0.5870, 0.1140]).astype(np.uint8)\r\n\r\n return gray_image", "def greyScale(image):\n\t\n\tflush_message(\"Converting to greyscale...\")\n\tgrey = np.dot(image[...,:3], [0.299, 0.587, 0.144])\n\tprint \"done\"\n\treturn grey", "def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)", "def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)", "def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)", "def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)", "def to_grayscale(self):\n if len(self.img.shape) in (3, 4):\n gray = opencv.cvtColor(self.img, opencv.COLOR_BGR2GRAY)\n return Image(gray)\n else:\n assert len(self.img.shape) == 2\n return Image(self.img)", "def grayscale(img):\n\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)" ]
[ "0.7124209", "0.70213044", "0.70147705", "0.6998558", "0.69844764", "0.6971335", "0.6954955", "0.6914645", "0.68670905", "0.6861035", "0.6856123", "0.68298274", "0.6808763", "0.6797492", "0.67851967", "0.6773447", "0.6722287", "0.6699727", "0.6688521", "0.66259533", "0.6624553", "0.66204214", "0.65935117", "0.6532751", "0.6526986", "0.6526986", "0.6526986", "0.6526986", "0.65220195", "0.64940494" ]
0.74719244
0
Check if the exchange exist
def check_exchange_exists(self, exchange_name): try: yield from self.exchange_declare(exchange_name, passive=True) except asyncio.exceptions.ChannelClosed: return False exchange = self.http_client.get_exchange(self.VHOST, exchange_name) return exchange is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assertExchangeExists(self, exchange_name):\n exchange = self.http_client.get_exchange(self.VHOST, exchange_name)\n if not exchange or not self.check_exchange_exists(exchange_name):\n self.fail(\"Exchange {} does not exists\".format(exchange_name))", "def test_update_exchange_not_exists(self):\n values = {\"exchange_name\": \"111\", \"api_key\": \"111\", \"secret\": \"111\"}\n ret = self.app.update_exchange(20, values)\n self.assertIn(ret[0], \"error\")", "def test_delete_exchange_not_exists(self):\n ret = self.app.delete_exchange(20)\n self.assertIn(ret[0], \"error\")", "def exchange_checker(self, exchanges):\n self.logger.debug(\"Checking exchanges: '%s'\", exchanges)\n exchanges = exchanges.split(\",\")\n reference = self.get_exchanges()\n for exchange in exchanges:\n if exchange in reference:\n pass\n else:\n raise InvalidExchangeError(\"Invalid exchange: '{}'\".format(exchange))", "def test_add_exchange_empty_api(self):\n exchange_name = \"Testing\"\n api_key = \"\"\n secret = \"Testing\"\n new_exchange = self.app.add_exchange(exchange_name, api_key, secret)\n self.assertIn(new_exchange[0], \"error\")", "def setup_exchange(self):\n LOGGER.info('Setting the exchange with name :%s and type :%s',\n self._exchange, self._type)\n if self._channel is None:\n raise ChannelDoesntExist('The channel doesn''t exist')\n\n if len(self._exchange) < 3:\n raise ExchangeNameDoesntMatch('This exchange name does''nt match')\n # Check if the channel doesn't exist on rabbit\n\n list_rabbit_exchange = [] # Correct me\n if self._exchange in list_rabbit_exchange:\n raise ExchangeAlreadyExist('This exchange is already exist')\n\n # Check Me : self._channel.basic_qos(prefetch_count=1)\n self._channel.exchange_declare(exchange=self._exchange,\n type=self._type,\n durable=self._durable,\n auto_delete=self._auto_delete)", "def test_add_exchange_empty_name(self):\n exchange_name = \"\"\n api_key = \"Testing\"\n secret = \"Testing\"\n new_exchange = self.app.add_exchange(exchange_name, api_key, secret)\n self.assertEqual(new_exchange[\"statuscode\"], 0x1001)", "def test_add_exchange(self):\n exchange_name = \"Testing\"\n api_key = \"Testing\"\n secret = \"Testing\"\n new_exchange = self.app.add_exchange(exchange_name, api_key, secret)\n self.assertIs(new_exchange.exchange_name, \"Testing\")", "def exists(self, name):\n return self.endpoint.exists(name)", "def is_exchange_information_valid(exchange_info: Dict[str, Any]) -> bool:\n return exchange_info.get(\"status\", None) == \"TRADING\"", "def test_add_exchange_empty_secret(self):\n exchange_name = \"Testing\"\n api_key = \"Testing\"\n secret = \"\"\n new_exchange = self.app.add_exchange(exchange_name, api_key, secret)\n self.assertIn(new_exchange[0], \"error\")", "def is_exchange_supported(self, exchange_name):\n try:\n return AlphaVantage._EXCHANGE_SUPPORTED[exchange_name]\n except KeyError:\n return None", "def test_delete_exchange(self):\n new_exchange = self.app.add_exchange(\"test\", \"test\", \"test\")\n ret = self.app.delete_exchange(new_exchange.id)\n self.assertIn(ret[0], \"success\")", "def check_queue_exists(self, queue_name):\n try:\n yield from self.queue_declare(queue_name, passive=True)\n except asyncio.exceptions.ChannelClosed:\n return False\n return True", "def check_exchange(self, config: Dict[str, Any]) -> bool:\n exchange = config.get('exchange', {}).get('name').lower()\n if exchange not in ccxt.exchanges:\n\n exception_msg = f'Exchange \"{exchange}\" not supported.\\n' \\\n f'The following exchanges are supported: {\", \".join(ccxt.exchanges)}'\n\n logger.critical(exception_msg)\n raise OperationalException(\n exception_msg\n )\n # Depreciation warning\n if 'ccxt_rate_limit' in config.get('exchange', {}):\n logger.warning(\"`ccxt_rate_limit` has been deprecated in favor of \"\n \"`ccxt_config` and `ccxt_async_config` and will be removed \"\n \"in a future version.\")\n\n logger.debug('Exchange \"%s\" supported', exchange)\n return True", "def supplier_exist(supplier_name: str) -> bool:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from supplier where name = '{}'\".format(supplier_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n if len(data) == 0:\n return False\n return True", "def exists(self):\n return True", "def exists(self):\n return True", "async def exists(self, payload: TPayload) -> bool:", "def exist(self):", "def queue_exists(self, queue_name):\n # resp = self._channel.queue_declare(queue_name, passive=True,\n # callback=self._queue_exists_clb)\n try:\n resp = self._channel.queue_declare(queue_name, passive=True)\n except pika.exceptions.ChannelClosedByBroker as exc:\n self.connect()\n if exc.reply_code == 404: # Not Found\n return False\n else:\n self.logger.warning('Queue exists <{}>'.format(queue_name))\n return True", "def getExchange(self):\r\n\t\treturn self.exchange", "def test_update_exchange(self):\n new_exchange = self.app.add_exchange(\"test\", \"test\", \"test\")\n values = {\"exchange_name\": \"111\", \"api_key\": \"111\", \"secret\": \"111\"}\n ret = self.app.update_exchange(new_exchange.id, values)\n self.assertIn(ret[0], \"success\")", "def _has_endpoint(self, endpoint):\n return self.endpoints.filter(pk=endpoint.pk).exists()", "def query_exchange(self, exchange_name, alt_exchange_name=None):\n return self._query(exchange_name, \"exchange\", \"org.apache.qpid.broker\", alt_exchange_name)", "def _check_host_existence(self, hostname: str) -> bool:\n with self.lock:\n hosts = self.hosts.all()\n for host in hosts:\n if host['hostname'] == hostname:\n return True\n return False", "def test_update_exchange_value_empty(self):\n new_exchange = self.app.add_exchange(\"test\", \"test\", \"test\")\n values = {\"exchange_name\": \"\", \"api_key\": \"111\", \"secret\": \"111\"}\n ret = self.app.update_exchange(new_exchange.id, values)\n self.assertIn(ret[0], \"error\")", "def getExchange(self):\r\n\t\treturn self.pair.exchange", "def exists(self, proxy):\n return not self.database.zscore(self.key, proxy) == None", "def _query(self, name, _class, package, alt_exchange_name=None):\n try:\n obj_list = self.__session.getObjects(_class=_class, _package=package)\n found = False\n for obj in obj_list:\n if obj.name == name:\n found = True\n if alt_exchange_name != None:\n alt_exch_list = self.__session.getObjects(_objectId=obj.altExchange)\n if len(alt_exch_list) == 0 or alt_exch_list[0].name != alt_exchange_name:\n return False\n break\n return found\n except Exception:\n return False" ]
[ "0.76463896", "0.6932534", "0.6805306", "0.651784", "0.6458291", "0.6433818", "0.63781905", "0.62697124", "0.62050086", "0.60667396", "0.60465", "0.6019676", "0.5993576", "0.59078103", "0.581015", "0.5700562", "0.5681127", "0.5681127", "0.5673045", "0.564337", "0.5592218", "0.55616915", "0.5534623", "0.5485594", "0.54629123", "0.54532826", "0.5425518", "0.5423662", "0.54121566", "0.5409258" ]
0.81995
0
Check if the exchange exists
def assertExchangeExists(self, exchange_name): exchange = self.http_client.get_exchange(self.VHOST, exchange_name) if not exchange or not self.check_exchange_exists(exchange_name): self.fail("Exchange {} does not exists".format(exchange_name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_exchange_exists(self, exchange_name):\n try:\n yield from self.exchange_declare(exchange_name, passive=True)\n except asyncio.exceptions.ChannelClosed:\n return False\n exchange = self.http_client.get_exchange(self.VHOST, exchange_name)\n return exchange is not None", "def test_update_exchange_not_exists(self):\n values = {\"exchange_name\": \"111\", \"api_key\": \"111\", \"secret\": \"111\"}\n ret = self.app.update_exchange(20, values)\n self.assertIn(ret[0], \"error\")", "def test_delete_exchange_not_exists(self):\n ret = self.app.delete_exchange(20)\n self.assertIn(ret[0], \"error\")", "def exchange_checker(self, exchanges):\n self.logger.debug(\"Checking exchanges: '%s'\", exchanges)\n exchanges = exchanges.split(\",\")\n reference = self.get_exchanges()\n for exchange in exchanges:\n if exchange in reference:\n pass\n else:\n raise InvalidExchangeError(\"Invalid exchange: '{}'\".format(exchange))", "def exists(self, name):\n return self.endpoint.exists(name)", "def setup_exchange(self):\n LOGGER.info('Setting the exchange with name :%s and type :%s',\n self._exchange, self._type)\n if self._channel is None:\n raise ChannelDoesntExist('The channel doesn''t exist')\n\n if len(self._exchange) < 3:\n raise ExchangeNameDoesntMatch('This exchange name does''nt match')\n # Check if the channel doesn't exist on rabbit\n\n list_rabbit_exchange = [] # Correct me\n if self._exchange in list_rabbit_exchange:\n raise ExchangeAlreadyExist('This exchange is already exist')\n\n # Check Me : self._channel.basic_qos(prefetch_count=1)\n self._channel.exchange_declare(exchange=self._exchange,\n type=self._type,\n durable=self._durable,\n auto_delete=self._auto_delete)", "def test_add_exchange_empty_api(self):\n exchange_name = \"Testing\"\n api_key = \"\"\n secret = \"Testing\"\n new_exchange = self.app.add_exchange(exchange_name, api_key, secret)\n self.assertIn(new_exchange[0], \"error\")", "def test_add_exchange_empty_name(self):\n exchange_name = \"\"\n api_key = \"Testing\"\n secret = \"Testing\"\n new_exchange = self.app.add_exchange(exchange_name, api_key, secret)\n self.assertEqual(new_exchange[\"statuscode\"], 0x1001)", "def test_add_exchange(self):\n exchange_name = \"Testing\"\n api_key = \"Testing\"\n secret = \"Testing\"\n new_exchange = self.app.add_exchange(exchange_name, api_key, secret)\n self.assertIs(new_exchange.exchange_name, \"Testing\")", "def check_queue_exists(self, queue_name):\n try:\n yield from self.queue_declare(queue_name, passive=True)\n except asyncio.exceptions.ChannelClosed:\n return False\n return True", "def is_exchange_supported(self, exchange_name):\n try:\n return AlphaVantage._EXCHANGE_SUPPORTED[exchange_name]\n except KeyError:\n return None", "def test_add_exchange_empty_secret(self):\n exchange_name = \"Testing\"\n api_key = \"Testing\"\n secret = \"\"\n new_exchange = self.app.add_exchange(exchange_name, api_key, secret)\n self.assertIn(new_exchange[0], \"error\")", "def is_exchange_information_valid(exchange_info: Dict[str, Any]) -> bool:\n return exchange_info.get(\"status\", None) == \"TRADING\"", "def test_delete_exchange(self):\n new_exchange = self.app.add_exchange(\"test\", \"test\", \"test\")\n ret = self.app.delete_exchange(new_exchange.id)\n self.assertIn(ret[0], \"success\")", "def queue_exists(self, queue_name):\n # resp = self._channel.queue_declare(queue_name, passive=True,\n # callback=self._queue_exists_clb)\n try:\n resp = self._channel.queue_declare(queue_name, passive=True)\n except pika.exceptions.ChannelClosedByBroker as exc:\n self.connect()\n if exc.reply_code == 404: # Not Found\n return False\n else:\n self.logger.warning('Queue exists <{}>'.format(queue_name))\n return True", "def check_exchange(self, config: Dict[str, Any]) -> bool:\n exchange = config.get('exchange', {}).get('name').lower()\n if exchange not in ccxt.exchanges:\n\n exception_msg = f'Exchange \"{exchange}\" not supported.\\n' \\\n f'The following exchanges are supported: {\", \".join(ccxt.exchanges)}'\n\n logger.critical(exception_msg)\n raise OperationalException(\n exception_msg\n )\n # Depreciation warning\n if 'ccxt_rate_limit' in config.get('exchange', {}):\n logger.warning(\"`ccxt_rate_limit` has been deprecated in favor of \"\n \"`ccxt_config` and `ccxt_async_config` and will be removed \"\n \"in a future version.\")\n\n logger.debug('Exchange \"%s\" supported', exchange)\n return True", "async def exists(self, payload: TPayload) -> bool:", "def exists(self):\n return True", "def exists(self):\n return True", "def _has_endpoint(self, endpoint):\n return self.endpoints.filter(pk=endpoint.pk).exists()", "def _check_host_existence(self, hostname: str) -> bool:\n with self.lock:\n hosts = self.hosts.all()\n for host in hosts:\n if host['hostname'] == hostname:\n return True\n return False", "def supplier_exist(supplier_name: str) -> bool:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from supplier where name = '{}'\".format(supplier_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n if len(data) == 0:\n return False\n return True", "def query_exchange(self, exchange_name, alt_exchange_name=None):\n return self._query(exchange_name, \"exchange\", \"org.apache.qpid.broker\", alt_exchange_name)", "def exists(self):\r\n return bool(self.bucket.lookup(self.name))", "def getExchange(self):\r\n\t\treturn self.exchange", "def check_subscription_name_exists(self):\n return check_subscription_name_exists(self.project_id, self.subscription_name)", "def exists(self, proxy):\n return not self.database.zscore(self.key, proxy) == None", "def stack_exists(self, stack):\n try:\n self.cfn.describe_stacks(StackName=stack)\n return True\n except botocore.exceptions.ClientError as e:\n if \"does not exist\" in e.response['Error']['Message']:\n return False\n else:\n raise e", "async def _exists(self, key):\n with await self._connect() as redis:\n exists = await redis.exists(key)\n return True if exists > 0 else False", "def get_exchange(self, name: str, location: Location) -> Optional[ExchangeInterface]:\n exchanges_list = self.connected_exchanges.get(location)\n if exchanges_list is None:\n return None\n\n for exchange in exchanges_list:\n if exchange.name == name:\n return exchange\n\n return None" ]
[ "0.8322733", "0.67963177", "0.6719261", "0.6502801", "0.63569736", "0.6326286", "0.6242829", "0.6210772", "0.61285925", "0.604688", "0.6040342", "0.59260064", "0.5919058", "0.59123266", "0.57819825", "0.57609856", "0.56974137", "0.5673107", "0.5673107", "0.5597733", "0.5594339", "0.5573102", "0.55332536", "0.5492469", "0.5470285", "0.5437237", "0.5431789", "0.542912", "0.54106367", "0.5401084" ]
0.777473
1
Check if the queue exist
def check_queue_exists(self, queue_name): try: yield from self.queue_declare(queue_name, passive=True) except asyncio.exceptions.ChannelClosed: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_queue(self):\n return (os.path.exists(self._queue_path) and\n os.path.getsize(self._queue_path) > 0)", "def queue_exists(self, queue_name):\n # resp = self._channel.queue_declare(queue_name, passive=True,\n # callback=self._queue_exists_clb)\n try:\n resp = self._channel.queue_declare(queue_name, passive=True)\n except pika.exceptions.ChannelClosedByBroker as exc:\n self.connect()\n if exc.reply_code == 404: # Not Found\n return False\n else:\n self.logger.warning('Queue exists <{}>'.format(queue_name))\n return True", "def queue_exists(name: str) -> bool:\n try:\n batch = aws.client_with_default_region(\"batch\")\n\n return bool(\n batch.describe_job_queues(jobQueues = [name]) \\\n .get(\"jobQueues\"))\n except:\n return False", "def is_alive(self):\n try:\n stdout, stderr = self.run(0, \"rabbitmqctl\", \"list_queues\")\n for lines in stdout, stderr:\n for line in lines:\n if \"no_exists\" in line:\n return False\n return True\n except Exception:\n return False", "def queue_empty(self, queue_name):\n return self.queue_message_count(queue_name) == 0", "def in_queue(self):\n if self.get_db('jobid') is None:\n log.debug('jobid not found for calculation.')\n return False\n else:\n # get the jobid\n jobid = self.get_db('jobid')\n # see if jobid is in queue\n _, jobids_in_queue, _ = getstatusoutput('qselect',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n if str(jobid) in jobids_in_queue.split('\\n'):\n # get details on specific jobid in case it is complete\n status, output, err = getstatusoutput(['qstat', jobid],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if status == 0:\n lines = output.split('\\n')\n fields = lines[2].split()\n job_status = fields[4]\n if job_status == 'C':\n return False\n else:\n return True\n else:\n return False", "def test_dequeue_empty(self):\n \n r = self.store.dequeue('/queue/nonexist')\n assert r is None\n \n assert self.store.has_frames('/queue/nonexist') == False\n assert self.store.size('/queue/nonexist') == 0", "def _chk_empty(self, queue, receiver):\n try:\n msg = receiver.fetch(timeout=0)\n self.assert_(False, \"Queue \\\"%s\\\" not empty: found message: %s\" % (queue, msg))\n except Empty:\n pass", "def is_queued(self):\n qstat = self._grep_qstat('queued')\n if qstat:\n return True\n return False", "def testQueueisEmpty(self):\n self.mgr.isGoproBusy = True\n self.mgr.processMsgQueue()\n self.assertFalse( self.mgr.isGoproBusy )", "def any(self) -> bool:\n return len(self.queue) > 0", "def is_empty(self):\n return self.queue == []", "def test_queue_class_exists():\n assert Queue", "def request_already_queued(self, request: str):\n try:\n self.create_request_queue_if_not_exists()\n queue = []\n db = self.get_db_safely()\n cursor = db.cursor()\n cursor.execute(\n \"\"\"SELECT rowid FROM queue WHERE request = ?\"\"\",\n (request,))\n for row in cursor:\n queue.append(row)\n if len(queue) == 0:\n return False\n else:\n return True\n except sqlite3.Error:\n # This is a lie, but we don't want to try and enqueue something if we got an error here.\n return True", "def is_empty(self):\n return len(self.__queue) > 0", "def isEmpty(self):\n\t\tself.logger.debug('Check if queue job is empty')\n\t\tisEmpty = self.queue.empty()\n\t\tself.logger.debug('Queue job is empty ?: %s'%(isEmpty))\n\t\treturn isEmpty", "def test_is_empty(self):\n queue = Queue()\n self.assertEqual(queue.is_empty(), True)\n queue.enqueue(1)\n self.assertEqual(queue.is_empty(), False)", "def is_empty(self):\n return len(self.queue) == 0", "def is_empty(self):\n return len(self.queue) == 0", "def empty_queue(queue):\n return queue.front is None", "def is_empty(self):\n return not self._queue", "def isEmpty(self):\r\n if (len(self.queue) >= 1):\r\n return False\r\n else:\r\n return True", "def pop_queue(self, queue=None):\n if not queue:\n return False\n \n cur = self.conn.cursor()\n cur.execute(\"LOCK TABLE \" + queue + \" IN ACCESS EXCLUSIVE MODE;\")\n\n cur.execute(\"SELECT id FROM \" + queue + \" LIMIT 1;\")\n row = cur.fetchone()\n self.conn.commit()\n \n if row:\n cur.execute(\"DELETE FROM \" + queue + \" WHERE id='\"+str(row[0])+\"';\")\n return row[0]\n else:\n return False", "def check_ack_queue(self):\r\n try:\r\n while True:\r\n ack = self.ack_queue.get_nowait()\r\n self.handle_ack(ack)\r\n except queue.Empty:\r\n pass", "def empty(self):\r\n return self.queue == []", "def load_queue(self, queue=None):\n if not queue:\n return False\n elif queue == \"ready_queue\":\n table = \"tangerine\"\n condition = \" WHERE state='ready';\"\n elif queue == \"job_queue\":\n table = \"jobs\"\n condition = \"\"\n else:\n table = \"tangerine\"\n condition = \"\"\n \n cur = self.conn.cursor()\n cur.execute(\"LOCK TABLE \" + queue + \" IN ACCESS EXCLUSIVE MODE;\")\n \n cur.execute(\"SELECT COUNT(id) FROM \" + queue + \";\")\n\n # if the queue still has tasks return nothing\n if cur.fetchone()[0]:\n self.conn.rollback()\n else:\n cur.execute(\"SELECT COUNT(id) FROM \" + table + condition + \";\")\n\n # If the task table is empty return nothing\n if not cur.fetchone()[0]:\n self.conn.commit()\n return\n \n cur.execute(\"SELECT id FROM \" + table + condition + \";\")\n ids = (\"(\" + str(id[0]) + \")\" for id in cur.fetchall())\n cur.execute(\"INSERT INTO \" + queue + \" VALUES \" + \", \".join(ids) + \";\")\n self.conn.commit()", "def is_empty(self):\n return len(self.the_queue) == 0", "def empty(self) -> bool: \n if(self.queue is not None and len(self.queue) > 0):\n print(\"len > 0\" )\n return False\n else:\n print(\"len = 0\" )\n return True", "def test_queue_not_installed(): # pragma: windows\n nt.assert_equal(IPCComm.get_queue(), None)", "def empty(self):\n return not self.queue1" ]
[ "0.8026836", "0.78004205", "0.77506423", "0.73082227", "0.717858", "0.7174335", "0.7098522", "0.7097279", "0.703504", "0.69714975", "0.6923398", "0.6896911", "0.68656427", "0.68311435", "0.6799334", "0.6783198", "0.67765915", "0.67744863", "0.67744863", "0.6768916", "0.6742154", "0.6741202", "0.6732314", "0.671623", "0.67103535", "0.670797", "0.6685945", "0.6676098", "0.66738176", "0.6644042" ]
0.8107538
0
Delete the exchange but does not raise any exception if it fails The operation has a timeout as well.
def safe_exchange_delete(self, exchange_name, channel=None): channel = channel or self.channel full_exchange_name = self.full_name(exchange_name) try: yield from channel.exchange_delete(full_exchange_name, no_wait=False, timeout=1.0) except asyncio.TimeoutError: logger.warning('Timeout on exchange %s deletion', full_exchange_name, exc_info=True) except Exception: logger.error('Unexpected error on exchange %s deletion', full_exchange_name, exc_info=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_exchange(self):\n new_exchange = self.app.add_exchange(\"test\", \"test\", \"test\")\n ret = self.app.delete_exchange(new_exchange.id)\n self.assertIn(ret[0], \"success\")", "def test_delete_exchange_not_exists(self):\n ret = self.app.delete_exchange(20)\n self.assertIn(ret[0], \"error\")", "def delete_exchange(self, exchange_id):\n if exchange_id:\n self.exch_repo.delete(exchange_id=exchange_id)\n else:\n raise Exception(\"No exchange_id found for deleting exchange.\")", "def _remove_exchange(self, message):\n key = (message.remote, message.mid)\n\n if key not in self._active_exchanges:\n # Before turning this up to a warning, consider https://github.com/chrysn/aiocoap/issues/288\n self.log.info(\"Received %s from %s, but could not match it to a running exchange.\", message.mtype, message.remote)\n return\n\n messageerror_monitor, next_retransmission = self._active_exchanges.pop(key)\n next_retransmission.cancel()\n if message.mtype is RST:\n messageerror_monitor()\n self.log.debug(\"Exchange removed, message ID: %d.\", message.mid)\n\n self._continue_backlog(message.remote)", "async def send_delete(self, timeout, message, *args, **kwargs):\n msg = await self.send(message, *args, **kwargs)\n await self.delay(timeout, self.delete_message, msg)", "def safe_queue_delete(self, queue_name, channel=None):\n channel = channel or self.channel\n full_queue_name = self.full_name(queue_name)\n try:\n yield from channel.queue_delete(full_queue_name, no_wait=False, timeout=1.0)\n except asyncio.TimeoutError:\n logger.warning('Timeout on queue %s deletion', full_queue_name, exc_info=True)\n except Exception:\n logger.error('Unexpected error on queue %s deletion', full_queue_name, exc_info=True)", "async def delete(self, delete: TPayload) -> None:", "def do_delete_request(self, uri, headers, timeout_ms):\n return self._do_request('DELETE', uri, headers, None, timeout_ms, None)", "def _cleanup_method(self, queue_name, ep=None):\n if ep._chan is not None and not ep._chan._queue_auto_delete:\n # only need to delete if AMQP didn't handle it for us already!\n # @TODO this will not work with XOs (future)\n try:\n ch = self.container.node.channel(RecvChannel)\n ch._recv_name = NameTrio(get_sys_name(), \"%s.%s\" % (get_sys_name(), queue_name))\n ch._destroy_queue()\n except TransportError as ex:\n log.warn(\"Cleanup method triggered an error, ignoring: %s\", ex)", "def test_delete(self):\n responses.add(\n responses.Response(\n method='DELETE',\n url='https://connection.keboola.com/v2/storage/buckets/1?force=False&async=False',\n json={}\n )\n )\n bucket_id = '1'\n deleted_detail = self.buckets.delete(bucket_id, asynchronous=False)\n assert deleted_detail is None", "def abort(self):\n if self._handle.closed:\n return\n elif self._protocol is None:\n raise TransportError('transport not started')\n self._handle.close(self._on_close_complete)\n assert self._handle.closed", "def delete(self):\n self.call('DELETE', expect=error.NO_CONTENT)", "def delete_exchange(self, name: str, location: Location) -> tuple[bool, str]:\n if self.get_exchange(name=name, location=location) is None:\n return False, f'{location!s} exchange {name} is not registered'\n\n exchanges_list = self.connected_exchanges.get(location)\n if exchanges_list is None:\n return False, f'{location!s} exchange {name} is not registered'\n\n if len(exchanges_list) == 1: # if is last exchange of this location\n self.connected_exchanges.pop(location)\n else:\n self.connected_exchanges[location] = [x for x in exchanges_list if x.name != name]\n with self.database.user_write() as write_cursor: # Also remove it from the db\n self.database.remove_exchange(write_cursor=write_cursor, name=name, location=location) # noqa: E501\n self.database.delete_used_query_range_for_exchange(\n write_cursor=write_cursor,\n location=location,\n exchange_name=name,\n )\n return True, ''", "def _queue_delete(self, queue):\n\n queue.delete()", "def delete(self, _uri):\n print(\"Deleting '%s'\"%(_uri))\n response = self.__httpsRequest('DELETE', _uri, '')", "async def test_endpoint_delete_share_interface_error(self):\n with self.patch_handle_dropped_connection:\n with self.assertRaises(aiohttp.web.HTTPServiceUnavailable):\n await delete_share_handler(self.mock_request)\n self.handle_dropped_connection_mock.assert_called_once()", "def test_issue_delete_subscription(self):\n pass", "def _cancel_ack_timeout(self):\n if self._ack_handle.active():\n self._ack_handle.cancel()", "def delete(self, endpoint, ttl=5000, **kwargs):\n return self.__api_call('DELETE', endpoint, kwargs, ttl, True)", "def delete(api, url, payload=None, headers=None, auth=_KERBEROS_AUTH,\n proxies=None, retries=_NUM_OF_RETRIES, timeout=None):\n return call(api, url, 'delete', payload=payload,\n headers=headers, auth=auth, proxies=proxies, retries=retries,\n timeout=timeout)", "def test_delete_subscription(self):\n pass", "def delete(self, hostname):\n self.not_supported()", "def destroy(self):\n\n raise imap4.MailboxException(\"Permission denied.\")", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package()\n key = self.db.redis_key(pkg.filename)\n self.redis[key] = \"foobar\"\n self.db.delete(pkg)\n val = self.redis.get(key)\n self.assertIsNone(val)\n count = self.redis.scard(self.db.redis_set)\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def pop():\n task = connection.zrange(QUEUE_KEY, 0, 0)\n if not task:\n return False, 'No emails now!'\n msg_id = task[0]\n timestamp = connection.zscore(QUEUE_KEY, msg_id)\n now = datetime.datetime.now().timestamp()\n if timestamp < now or abs(timestamp - now) <= 1e-6:\n message = connection.get(msg_id)\n pipeline = connection.pipeline()\n pipeline.zrem(QUEUE_KEY, msg_id)\n pipeline.delete(msg_id)\n pipeline.execute()\n return True, message\n return False, \"It's too early now!\"", "def delete(self, **ctx_options):\n return self.delete_async(**ctx_options).get_result()", "def delete(self, key, dead_time=0):\n\n\t\tcheck_key(key)\n\t\tserver, key = yield self._get_server_for(key)\n\n\t\tif not server:\n\t\t\traise StopIteration(False)\n\n\t\tif dead_time is None:\n\t\t\tdead_time = 0\n\n\t\ttry:\n\t\t\tyield server.sendall(\"delete %s %d\\r\\n\" % (key, dead_time))\n\n\t\t\tres = yield server.read_line()\n\t\t\tif res != \"DELETED\":\n\t\t\t\tself._debuglog(\"expected 'DELETED', got %r\" % (res, ))\n\t\t\t\traise StopIteration(False)\n\n\t\t\traise StopIteration(True)\n\n\t\texcept tcp.ConnectionClosedException:\n\t\t\tserver.mark_dead()", "def delete(self, path):\n raise imap4.MailboxException(\"Permission denied.\")", "async def test_delete(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Set schedule to be interval based\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'deletetest'\n interval_schedule.process_name = \"sleep1\"\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(5)\n\n # Delete a scheduled task\n await scheduler.delete_schedule(interval_schedule.schedule_id)\n\n # Assert that process was deleted\n try:\n await scheduler.delete_schedule(interval_schedule.schedule_id)\n assert False\n except ScheduleNotFoundError:\n pass\n\n await self.stop_scheduler(scheduler)", "def _delete(self, uri, headers=None):\n if self.openam_url[-1:] == '/':\n openam_path = self.openam_url + uri\n else:\n openam_path = self.openam_url + \"/\" + uri\n\n try:\n data = requests.delete(openam_path, headers=headers, timeout=self.timeout, verify=self.verify)\n except requests.exceptions.RequestException as e:\n data = {'error': e}\n return data" ]
[ "0.7276048", "0.71246606", "0.6664925", "0.607659", "0.5633418", "0.563197", "0.5606057", "0.55958897", "0.5558147", "0.55456984", "0.55141497", "0.54579496", "0.5420487", "0.53386253", "0.53213537", "0.531655", "0.52993995", "0.52779245", "0.52039284", "0.51991016", "0.51729065", "0.51591337", "0.51471454", "0.5128447", "0.51154774", "0.5107359", "0.5107041", "0.5083128", "0.50803924", "0.5072017" ]
0.75520235
0
this test will fail is there is no step function deployed on aws
def test_stepfunction_exists2(): sf = boto3.client('stepfunctions') res= sf.list_state_machines() sfn_name = res['stateMachines'][0]['name'] # some existing step function assert Job.stepfunction_exists(sfn_name) is True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_execute_deployment(self):\n pass", "def test_launch_deployment(self):\n pass", "def test_get_deployment_run(self):\n pass", "def TestOneStep(self):\n pass", "def test_aws_provisioner(host):\n\n assert True", "def test_no_such_step(self):\n with self.assertRaises(Exception):\n self.run_step('FAKE-STEP.no-exists')", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_create_deployment(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_get_scenario(self):\n pass", "def test_publish_deployment_run(self):\n pass", "def test_retest_deployment_run(self):\n pass", "def test_get_scenarios(self):\n pass", "def lambda_handler(Event, Context):\n if 'StateMachineArn' in Event.keys():\n step_function_arn = Event['StateMachineArn']\n r = step_function_client.start_execution(\n stateMachineArn=step_function_arn,\n input=json.dumps({\"last_updated\": \"\"}))\n\n else:\n stepfunctions = [os.getenv(\"CHARGEBEEDOWNLOADARN\"), os.getenv(\"EXCHANGERATESDOWNLOADARN\")]\n\n for stepfunction in stepfunctions:\n step_function_arn = stepfunction\n r = step_function_client.start_execution(\n stateMachineArn=step_function_arn,\n input=json.dumps({\"last_updated\": \"\"}))", "def test_create_scenario(self):\n pass", "def test_get_deployment_runs(self):\n pass", "def test_create_scenario1(self):\n pass", "def test_functions_created():\n check_function_exists(task_19_4, \"send_commands_to_devices\")", "def test_update_deployment(self):\n pass", "def test_install(self):\n pass", "def step_impl(context):\n pass", "def step_impl(context):\n pass", "def test_create_deployment_entire(self):\n pass", "def test_service_initiated():\n assert \"ready\" in bkt_outcome_unwind.index()", "def test_deploy_workflow_definition(self):\n pass", "def run(self, steps_function, api):\n raise NotImplementedError", "def test_delete_deployment_run(self):\n pass", "def test_invoke_skip_plugin(mock_boto3_client, mock_boto3_resource):\n from odl_datalake_ingestion import lambda_handler\n mock_context = MockContext()\n mock_event[\"Records\"][0][\"s3\"][\"object\"][\"key\"] = \"servicedesk/customer/ca_sdm/tb_call_req/2018-07-02/call_req.csv\"\n mock_boto3_client.return_value.head_object.return_value = {\n \"ResponseMetadata\": {\n \"HTTPHeaders\": {\n \"content-length\": 1024,\n \"content-type\": \"text/plain\",\n \"last-modified\": \"Sun, 1 Jan 2006 12:00:00 GMT\"\n }\n }\n }\n lambda_handler(mock_event, mock_context)" ]
[ "0.68600166", "0.6702098", "0.656663", "0.65146595", "0.6469141", "0.6326048", "0.63145554", "0.63145554", "0.63077724", "0.62984747", "0.6284911", "0.62324506", "0.6199514", "0.61387306", "0.6086996", "0.6077555", "0.6043144", "0.59923697", "0.5979849", "0.5977083", "0.59760696", "0.59294754", "0.5928568", "0.5928568", "0.5902653", "0.58838314", "0.58597076", "0.5853689", "0.58212316", "0.5796424" ]
0.7515801
0
Raise NotAuthenticated exception if not authenticated.
def check_authentication(self, request): if not self.request.user.is_authenticated: raise NotAuthenticated()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _authenticated_or_die(self):\n if not self._authenticated:\n raise Exception('The client is not authenticated!')", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def auth_error():\n return unauthorized('Invalid credentials')", "def authenticate(request):\n if not current_user.is_authenticated:\n raise NoAuthProvided()\n if current_user.is_locked or not current_user.active:\n raise UnauthorizedError(\n 'Authentication failed for <User '\n f'username=`{current_user.username}`>. '\n 'Wrong credentials or locked account')\n return current_user", "def authenticate(self):\n resp = Response(None, 401)\n abort(401, description='Please provide proper credentials', response=resp)", "def _is_authenticated(self, request):\n # Authenticate the request as needed.\n auth_result = self._meta.authentication.is_authenticated(request)\n\n if isinstance(auth_result, HttpResponse):\n raise ImmediateHttpResponse(response=auth_result)\n\n if not auth_result is True:\n raise ImmediateHttpResponse(response=http.HttpUnauthorized())", "def unauthorized():\n return HttpError(401)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def get_authenticated_denied(self):", "def test_is_unauthenticated(self):\n response = self.post_question()\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def _handle_authentication_error(self):\n response = make_response('Access Denied')\n response.headers['WWW-Authenticate'] = self.auth.get_authenticate_header()\n response.status_code = 401\n return response", "def authenticate():\n return abort(401)", "def test_unauthenticated_request(self):\n url = self.get_url(self.active_user.id)\n response = self.client.get(url)\n\n expected_status_code = 401\n self.assertEqual(response.status_code, expected_status_code)", "def handle_exception(self, e):\n\t\t# if isinstance(exc, (exc.NotAuthenticated, exc.AuthenticationFailed)):\n\t\t# \t# WWW-Authenticate header for 401 responses, else coerce to 403\n\t\t# \tauth_header = self.get_authenticate_header(self.request)\n\n\t\t# \tif auth_header:\n\t\t# \t\texc.auth_header = auth_header\n\t\t# \telse:\n\t\t#\t\texc.status_code = status.HTTP_403_FORBIDDEN\n\t\tself.raise_uncaught_exception(e)", "def test_authenticate_no_credentials(self):\n \n self.assertRaises(\n ValueError, \n self.authenticator.authenticate\n )", "def test_retrive_user_unauthenticated(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_not_authenticated(self):\n response = self.client.get(telemetry_url)\n self.assertEqual(403, response.status_code)", "def is_authenticated(self):\n return False", "def test_not_authenticated_uri(self):\n request = self.factory.get(self.uri)\n response = self.view(request)\n response.render()\n self.assertEqual(response.status_code, 401,\n 'Expected Response Code 401, received {0} instead.'\n .format(response.status_code))", "def check_authentication():\r\n\r\n #TODO: Reservation based authentication\r\n try:\r\n authenticated_user()\r\n except Exception as e:\r\n return e\r\n\r\n return True", "def before_request() -> None:\n if current_user.is_anonymous() or not current_user.is_allowed():\n abort(401)", "def test_call_unauthenticated(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(401)\n with self.assertRaises(APIError):\n data = client.call(**self.build_parameters)", "def error_on_unauthorized():\n\n username = get_jwt_identity()\n user = Login.query.filter_by(username=username).first()\n\n if user is None:\n raise APIError(400, \"User {username} does not exist on this server\".format(username=username))\n elif user.role is not Role.admin:\n raise APIError(401, \"Only administrators have access to this page\")", "def authenticate(self):\n abort(\n 401,\n description=self.exception,\n www_authenticate=(\"WWW-Authenticate\", 'Basic realm=\"%s\"' % __package__),\n )", "def token_auth_error():\n logger.debug(\"Token authentication failed.\")\n return unauthorized(\"Invalid credentials.\")" ]
[ "0.81067973", "0.7579304", "0.7579304", "0.68916255", "0.6863911", "0.6831236", "0.67285824", "0.67270446", "0.67251", "0.67042917", "0.6684563", "0.6644959", "0.661338", "0.6590485", "0.6588753", "0.6570428", "0.6554263", "0.652594", "0.652594", "0.652594", "0.64416474", "0.64374423", "0.64360934", "0.6417142", "0.6389476", "0.63838303", "0.6363835", "0.6259482", "0.6245948", "0.62442446" ]
0.7580139
1
Return whether this is a getting action.
def is_get(self): return self.action in ["list", "retrieve"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __bool__(self):\n return bool(self._actions)", "def match_action(self, action):\n\n return hasattr(self, self._action_handler_name(action))", "def is_action(self) -> bool:\n return self.is_action_str(self.content)", "def action_required(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action_required\")", "def is_authorized(self, request, obj=None):\r\n if request.method == 'GET':\r\n return True\r\n else:\r\n return False", "def _use_get(self) -> bool:\n\n strategy = self._lazy_strategy\n return strategy.use_get", "def helper_action_get_request_is_wrong(self, action_name):\n wrong = not util.safe_string_compare(action_name, self.last_request_get_dict[\"action\"][0])\n return wrong", "def poll(cls, context):\r\n return context.object.animation_data.action is not None", "def probe_action(self) -> Optional[pulumi.Input[Union['ExecActionArgs', 'HTTPGetActionArgs', 'TCPSocketActionArgs']]]:\n return pulumi.get(self, \"probe_action\")", "def is_getter(self):\n return self._is_getter", "def actions_required(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"actions_required\")", "def actions_required(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"actions_required\")", "def actions_required(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"actions_required\")", "def actions_required(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"actions_required\")", "def requested() -> bool:\n\treturn _flag.is_set()", "def action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action\")", "def obtain_action(self):\r\n\t\treturn", "def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")", "def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")", "def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")", "def is_on(self):\n return bool(getattr(self.resource, self.variable))", "def actions_required(self) -> Optional[str]:\n return pulumi.get(self, \"actions_required\")", "def _get_action(self):\n return self.__action", "def is_github_actions():\n return \"GITHUB_ACTIONS\" in os.environ and os.environ[\"GITHUB_ACTIONS\"] == \"true\"", "def has(self, ActionClass):\n for action in self.h:\n if isinstance(action, ActionClass):\n return True\n return False", "def get_action(self):\n return self.__action", "def get(self):\n self.get_or_post(method='GET')", "def is_update_active(self):\n return self._update_action", "def getAction(self, state):\n util.raiseNotDefined()", "def getAction(self, state):\n util.raiseNotDefined()" ]
[ "0.66465306", "0.6514637", "0.63705033", "0.61986583", "0.61492366", "0.611826", "0.6019523", "0.6010379", "0.6007403", "0.60029685", "0.5971159", "0.5971159", "0.5971159", "0.5971159", "0.59573185", "0.58928037", "0.58693737", "0.58690596", "0.58690596", "0.58690596", "0.5834591", "0.5832836", "0.5815887", "0.5757986", "0.56764054", "0.5668177", "0.565198", "0.5642074", "0.56285805", "0.56285805" ]
0.85034436
0
Return whether this is an updatting action.
def is_update(self): return self.action in ["update", "partial_update"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_update_active(self):\n return self._update_action", "def isConfirmedDataUp(self):\n return self.mhdr.mtype == CO_DATA_UP", "def isUnconfirmedDataUp(self):\n return self.mhdr.mtype == UN_DATA_UP", "def isUp ( self ) :\n return not self.isDown()", "def __bool__(self):\n return bool(self._actions)", "def can_make_action(self) -> bool:\n return not(self.has_pending_action or self.is_dead())", "def is_up(self):\n \n return self.is_level('up')", "def is_on(self):\n if self.is_update_locked():\n return self.graceful_state\n if self._state['action'] == 1 and self._state['state'] == 2:\n return True\n return False", "def oneupped(self):\n\n oneup = self.oneups.filter_by(author_id=session[\"active_persona\"]).first()\n\n if oneup is None or oneup.state < 0:\n return False\n else:\n return True", "def IsWiredUp(self):\n return self.wired.IsUp()", "def canSwipeUp (self) :\n return(self.canSwipeBase())", "def is_up_to_date(self) -> bool:\n if self._uptodate is None:\n return True\n return self._uptodate()", "def is_action(self) -> bool:\n return self.is_action_str(self.content)", "def is_up(self):\n data = self.vxprint()\n return self.name in data and data[self.name].STATE == \"ACTIVE\"", "def is_migrated(self) -> bool:\n return pulumi.get(self, \"is_migrated\")", "def is_on(self) -> bool:\n return self.event.is_tripped", "def can_update_order_items(self) -> bool:\n return self.is_created or self.is_pending", "def can_mark_as_undone(self):\n if (not self.archived) and self.event_store.done:\n return True\n return False", "def isup(sourcename) :\n return s.isUp(sourcename)", "def is_update_active_no_pause(self):\n return self._update_action_without_pause", "def should_update(self):\n if self.last_updated is None:\n return True\n\n now = dt_util.utcnow()\n update_due_at = self.last_updated.replace(tzinfo=pytz.UTC) + datetime.timedelta(minutes=2)\n return now > update_due_at", "def is_action_applied(instance: Algorithm) -> bool:\n if len(get_results(instance)) == 0:\n return False\n return True", "def isUpdateExpected(self):\n return self.__isUpdateExpected", "def needs_update(self) -> bool:\n return False", "def replied(self):\n return bool(self.replied_at is not None)", "def isTransmitted(self) -> bool:\r\n\r\n return self.__is_transmitted", "def _can_do_updates(self):\n return True", "def is_up(self):\n return True", "def is_up(self):\n return True", "def pull_up(self):\n return False" ]
[ "0.65594274", "0.6467985", "0.63728833", "0.63440377", "0.61977506", "0.61768883", "0.6129319", "0.6117427", "0.6098021", "0.6055959", "0.60226405", "0.60025406", "0.5969736", "0.5941043", "0.5921024", "0.5878445", "0.58667946", "0.58482164", "0.58346134", "0.58339983", "0.5779585", "0.5743217", "0.57302827", "0.5718122", "0.5705738", "0.56984043", "0.5667196", "0.565263", "0.565263", "0.56485915" ]
0.6551283
1
Filter queryset to users based on permissions.
def get_queryset(self, *args, **kwargs): qs = super().get_queryset(*args, **kwargs) user = self.request.user if not user.is_authenticated: return qs.none() if self.is_get and not user.has_perm("users.view_user"): return qs.filter(pk=user.pk) if self.is_update and not user.has_perm("users.change_user"): return qs.filter(pk=user.pk) return qs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_queryset(self, request, queryset, view):\n if view.action == \"retrieve\" and request.method == \"GET\":\n return queryset.model.objects.all()\n\n filtered_queryset = super().filter_queryset(request, queryset, view)\n org_users = set(\n [group.team.organization for group in request.user.groups.all()] +\n [o.user for o in filtered_queryset]\n )\n\n return queryset.model.objects.filter(user__in=org_users, user__is_active=True)", "def filter_granted(self, queryset):\n granted_runs = ContainerRun.filter_by_user(self.request.user)\n\n return queryset.filter(run_id__in=granted_runs)", "def get_queryset(self, *args, **kwargs):\n qs = super().get_queryset(*args, **kwargs).filter(user=self.request.user)\n return qs", "def filter_queryset(self, request, queryset, view):\n\n if view.action == \"list\":\n # Return widgets from xform user has perms to\n return self._xform_filter_queryset(request, queryset, view, \"object_id\")\n\n return super().filter_queryset(request, queryset, view)", "def filter_queryset(self, queryset):\n user = self.request.user\n if user.is_superuser:\n return super().filter_queryset(queryset)\n return queryset.filter(collaborators=user)", "def filter_queryset(self, request, queryset, view):\n if view.action == \"list\":\n users = request.GET.get(\"users\")\n if users:\n users = users.split(\",\")\n return queryset.filter(user__username__in=users)\n if not request.user.is_anonymous:\n return queryset.filter(user__username=request.user.username)\n\n return queryset.none()\n\n return queryset", "def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.filter(user=request.user)", "def get_queryset(self):\n return super().get_queryset().filter(user=self.request.user)", "def filter_granted(self, queryset):\n granted_containers = Container.filter_by_user(self.request.user)\n\n return queryset.filter(app__container_id__in=granted_containers)", "def filter_queryset(self, request, queryset, view):\n owner = request.query_params.get(\"owner\")\n\n if owner:\n kwargs = {self.owner_prefix + \"__username__iexact\": owner}\n\n return queryset.filter(**kwargs)\n\n return queryset", "def filter_granted(self, queryset):\n granted_containers = Container.filter_by_user(self.request.user)\n\n return queryset.filter(container_id__in=granted_containers)", "def filter_queryset(self, request, queryset, view):\n if str(request.query_params.get(\"orgs\")).lower() == \"false\":\n organization_user_ids = OrganizationProfile.objects.values_list(\n \"user__id\", flat=True\n )\n queryset = queryset.exclude(id__in=organization_user_ids)\n\n return queryset", "def get_queryset(self):\n filter_kwargs = {self.bound_to_user_field_name: self.request.auth.user}\n return super(BoundToUserMixin, self).get_queryset().filter(**filter_kwargs)", "def get_queryset(self):\n queryset = super(BaseViewSet, self).get_queryset()\n user = self.request.user\n return queryset.filter(user=user)", "def get_queryset(self):\n queryset = User.objects.all()\n if self.action == 'list':\n return queryset.filter(is_active=True)\n return queryset", "def queryset(self):\n User = get_user_model()\n return User.objects", "def get_queryset(self, request):\n qs = super().get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(created_by=request.user)", "def get_queryset(self, request):\n qs = super().get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(created_by=request.user)", "def query_filters_restricted (self) :\n user = self.user_restriction\n if user is not None :\n return Q.created_by == user", "def granted_users(self):\n return [\n u\n for u in User.objects.filter(is_active=True)\n if ManagedObject.objects.filter(UserAccess.Q(u) & Q(id=self.id)).exists()\n ]", "def apply_authorization_limits(self, request, object_list):\n return object_list.filter(user=request.user)", "def queryset(self, request):\n qs = super(ShortURLAdmin, self).queryset(request)\n if request.user.has_perm('deflect.list_all'):\n return qs\n return qs.filter(creator=request.user)", "def users_with_role(self):\r\n entries = User.objects.filter(\r\n courseaccessrole__role=self._role_name,\r\n courseaccessrole__org=self.org,\r\n courseaccessrole__course_id=self.course_key\r\n )\r\n return entries", "def get_queryset(self):\n qs = super(RetiresmartzViewSet, self).get_queryset()\n # Check user object permissions\n user = SupportRequest.target_user(self.request)\n return qs.filter_by_user(user)", "def get_queryset(self):\n return get_user_model().objects.none()", "def get_queryset(self):\n return filterUsersByName( self.request.query_params.get('username', None) )", "def filter_queryset(self, request, queryset, view):\n if request and request.user.is_anonymous:\n return queryset.filter(shared=True)\n\n return queryset", "def get_queryset(self):\n user_requested = self.kwargs['user']\n self.check_object_permissions(self.request, user_requested)\n return Poll.objects.filter(created_by__username=user_requested)", "def filter_queryset(self, request, queryset, view):\n if request.user.is_anonymous:\n return queryset.filter(Q(shared_data=True))\n return queryset", "def get_queryset(self):\n qs = super().get_queryset()\n qs.filter(company=self.request.user.company)\n return qs" ]
[ "0.7223497", "0.70795846", "0.7036792", "0.6972066", "0.6936853", "0.6928938", "0.6916886", "0.6859023", "0.6803199", "0.67962486", "0.6774986", "0.6733345", "0.6714067", "0.6703698", "0.66519463", "0.66389406", "0.65361154", "0.65361154", "0.6525295", "0.65201217", "0.65171826", "0.6489933", "0.6472953", "0.6453229", "0.6440424", "0.6429478", "0.6426218", "0.6422239", "0.6406567", "0.6399077" ]
0.72607553
0
Detect obstacles by deploying Canny filter on the depth_colormap. Prerequisites
def detect_obstacle(depth_image, color_image, depth_colormap, depth_scale=0.001): height, width = int(color_image.shape[0]), int(color_image.shape[1]) font = cv2.FONT_HERSHEY_SIMPLEX text_position = int(height/10), int(width/10) # We will be removing the background of objects more than # clipping_distance_in_meters meters away clipping_distance_in_meters = 5 clipping_distance = 5 / depth_scale threshold = 0.1 / depth_scale # First, remove the background bg_removed = remove_background( depth_image, depth_colormap, clipping_distance_in_meters) # Deploy the Canny filter cannied = cv2.Canny(bg_removed, 20, 100) # Cut off the line at the border cannied_bool = np.logical_and( cannied == 255, depth_image < (clipping_distance-threshold)) new_cannied = np.zeros((height, width), dtype=np.uint8) new_cannied[cannied_bool] = 255 # Perform morphological operations kernel = np.ones((21, 21), np.uint8) img = cv2.dilate(new_cannied, kernel, 1) img = cv2.erode(img, kernel, 1) # Find Contours contours, _ = cv2.findContours( img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) # Filter out contours that have 0 depth-value filtered_contours = [] for c in contours: unpack = list(tuple(reversed(item)) for sublist in c for item in sublist) filtering = list(filter(lambda x: depth_image[x] > 0, unpack)) filtered_contours.append(filtering) # Blank image for draw contours. contour_img = np.zeros( (new_cannied.shape[0], new_cannied.shape[1], 3), dtype=np.uint8) # Determine the closest obstacle and its distance to the robot distance = [] for c in filtered_contours: # Determine the most extreme points along the contour all_distance = list(map(lambda x: depth_image[x], c)) if all_distance: distance.append(mean(all_distance)) if filtered_contours and distance: cv2.putText(contour_img, str(min(distance)), text_position, font, 1, (255, 255, 255), 1, cv2.LINE_AA) # Approximate contours to polygons contours_poly = [None]*len(contours) for i, c in enumerate(contours): contours_poly[i] = cv2.approxPolyDP(c, 3, True) # Draw polygonal contours for i in range(len(contours_poly)): color = (rng.randint(0, 256), rng.randint(0, 256), rng.randint(0, 256)) cv2.drawContours(contour_img, contours_poly, i, color) # stacked = np.hstack((color_image, depth_colormap, contour_img)) # Return the contours image and the list of contours return contour_img, filtered_contours
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vis_detections(color_image, depth_colormap, class_col, dets_col, thresh=0.5):\n\n for cls_ind, class_name in enumerate(class_col):\n dets = dets_col[cls_ind]\n\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n continue\n\n for i in inds:\n bbox = [int(e) for e in dets[i, :4]]\n score = dets[i, -1]\n \n cv2.rectangle(color_image, (bbox[0], bbox[1]),\n (bbox[2], bbox[3]), (0, 0, 255), 3)\n cv2.rectangle(depth_colormap, (bbox[0], bbox[1]),\n (bbox[2], bbox[3]), (0, 0, 255), 3)\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n color_image = cv2.putText(color_image, '{:s} {:.3f}'.format(class_name, score),\n (bbox[0], max(bbox[1] - 2, 1)), font, 0.5, (255, 255, 255), 2)\n depth_colormap = cv2.putText(depth_colormap, '{:s} {:.3f}'.format(class_name, score),\n (bbox[0], max(bbox[1] - 2, 1)), font, 0.5, (255, 255, 255), 2)\n \n # Stack both images horizontally\n images = np.hstack((color_image, depth_colormap))\n\n # Show images\n cv2.imshow('RealSense', images)", "def demo(sess, net, color_image, depth_colormap):\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, color_image)\n timer.toc()\n print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))\n\n # Visualize detections for each class\n CONF_THRESH = 0.7\n NMS_THRESH = 0.3\n dets_col = []\n cls_col = []\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n dets_col.append(dets)\n cls_col.append(cls)\n\n vis_detections(color_image, depth_colormap, cls_col, dets_col, thresh=CONF_THRESH)\n\n depth_col, bbox_col = calc_histogram(depth_image, cls_col, dets_col, thresh=CONF_THRESH)\n print(\"box depth:\", depth_col[0], \"sucker depth:\", depth_col[1])\n print(\"box bbox:\", bbox_col[0], \"sucker bbox\", bbox_col[1])", "def hysterisis_canny(img,weak):\n\n m,n = img.shape\n img_copy = np.copy(img)\n vis = [[False for j in range(n)] for i in range(m)]\n\n for i,j in it.product(range(m),range(n)):\n if img_copy[i,j] == 255:\n dfs_canny(i,j,img_copy,vis,weak,m,n)\n\n img_copy[img_copy < 255] = 0\n\n return img_copy", "def exec_canny(img_copy):\n edges = cv2.Canny(img_copy, 100, 200, L2gradient=True)\n return edges", "def get_obstacles(image):\n\n ih, iw = image.shape[:2]\n image_copy = image.copy()\n\n #resize the image to the size of arena\n image = cv2.resize(image, ARENA_SIZE, interpolation=cv2.INTER_CUBIC)\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\n #replace all black pixels to white pixels\n gray[np.where(gray == 0)]= 255\n\n #get the thresholded binary image\n ret,threshold = cv2.threshold(gray,200,255,cv2.THRESH_BINARY_INV)\n\n #find all the countours in the binary image\n _, contours, heiarchy = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n cont = []\n\n #create a mask to draw contours on\n blocks = mask = np.zeros(threshold.shape[:2], np.uint8)\n\n #create a dictionary to hold image roi of all puzzle peices\n blocks_roi = {}\n\n #iterate through all contours\n for i, c in enumerate(contours[1:]):\n\n #find the minimum area fitting rectangle of the contour\n rect = cv2.minAreaRect(c)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n\n #create the copy of the mask\n mask_copy = mask.copy()\n\n #draw the rectangle on the mask\n cv2.drawContours(mask_copy, [box], -1, (255,255,255), 3)\n\n #floodfill the rectangle\n cv2.floodFill(mask_copy, None, (0,0), 255)\n mask_inv = cv2.bitwise_not(mask_copy)\n blocks = cv2.add(blocks, mask_inv)\n\n _, contours, heiarchy = cv2.findContours(blocks, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n obstacles = {}\n\n for c in contours:\n x,y,w,h = cv2.boundingRect(c)\n obstacles.update({(int(x+w/2), int(y+h/2)): BLOCK_SIZE})\n #obstacles.update({(int(x+w/2), int(y+h/2)): (w, h)}) # for unknown block sizes\n bottom_r = remap((x+w, y+h), ARENA_SIZE, (iw,ih))\n top_l = remap((x, y), ARENA_SIZE, (iw,ih))\n blocks_roi.update({(int(x+w/2), int(y+h/2)): image_copy[top_l[1]:bottom_r[1], top_l[0]:bottom_r[0]]})\n\n return obstacles, blocks_roi", "def _detect_obstacles(self):\n def _distance(point, line_point1, line_point2):\n \"\"\"calcuate the distance between a point and a line\"\"\"\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance\n\n def _acute_angle(point, line_point1, line_point2):\n \"\"\"detetrmine if the point is whithin the boundary of the line through law of cosines\"\"\"\n base_line = np.linalg.norm(line_point1-line_point2)\n assert base_line > 0, \"check the library useage\"\n line1 = np.linalg.norm(point - line_point1)\n line2 = np.linalg.norm(point - line_point2)\n cos_angle_1 = (base_line**2 + line1**2 - line2**2)/(2*base_line*line1)\n cos_angle_2 = (base_line**2 + line2**2 - line1**2)/(2*base_line*line2)\n if cos_angle_1 * cos_angle_2 > 0:\n return True\n else:\n return False\n\n if self.obstacles != \"None\": # if user assigned some obstacles\n for line in self.env_config: \n line_point1, line_point2 = np.array(line[0]), np.array(line[1])\n point = np.array(self.state[:2])\n distance = _distance(point, line_point1, line_point2)\n acute_angle = _acute_angle(point, line_point1, line_point2)\n if distance <= 0.02 and acute_angle:\n self.adsorption = True\n break\n else:\n self.adsorption = False", "def detect_object(world):\n # create the map with only the obstucale to non-zero\n world_hsv = cv2.cvtColor(world, cv2.COLOR_BGR2HSV)\n mask_red = cv2.inRange(world_hsv, low_red, up_red)\n occupancy_grid = np.array(mask_red)\n world_rows, world_cols, _ = world.shape\n\n # create the mask in order to find the goal\n world_hsv = cv2.cvtColor(world, cv2.COLOR_BGR2HSV)\n mask_goal = cv2.inRange(world_hsv, low_blue, up_blue)\n goal_x, goal_y = (15, 15) # goal by default\n\n # look for the obstacle and increase there size\n for i in range(world_rows):\n for j in range(world_cols):\n occupancy_grid[i][j] = int(occupancy_grid[i][j] / 255)\n if mask_goal[i][j] > 200:\n goal_x, goal_y = (i, j)\n object_grid = [[goal_x, goal_y]]\n return object_grid, occupancy_grid", "def get_objects(color, depth, threshold1, threshold2):\n\n gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n surf = cv2.xfeatures2d.SURF_create(500)\n\n # find and draw the keypoints\n kp = surf.detect(blur,None)\n\n pts = [p.pt for p in kp]\n xpts = []\n ypts = []\n\n # evaluate the keypoints and only save the keypoints who are between the given threshold\n depth_values = []\n for i in range(0,len(pts)):\n xco = int(pts[i][0])\n yco = int(pts[i][1])\n depth_value = depth[yco][xco]\n if depth_value >= float(threshold1) and depth_value <= float(threshold2):\n xpts.append(xco)\n ypts.append(yco)\n depth_values.append(depth_value)\n\n # make histogram of x coordinates of the saved keypoints\n n, distr, _ = plt.hist(xpts)\n plt.savefig('hist.png')\n\n # evaluate the histogram and make seperate arrays for the different objects\n objectarray = []\n temp = []\n for i in range(len(n)):\n if n[i] > 0:\n temp.append(distr[i])\n temp.append(distr[i+1])\n else:\n if len(temp)!=0:\n objectarray.append(temp)\n temp = []\n objectarray.append(temp)\n\n objects = []\n\n # determine the objects with the previous calculated arrays\n for i in range(len(objectarray)):\n y_values = []\n min_x = int(np.amin(objectarray[i]))\n max_x = int(np.amax(objectarray[i]))\n\n for j in range(len(xpts)):\n if xpts[j] > min_x and xpts[j] < max_x:\n y_values.append(ypts[j])\n\n min_y = int(np.amin(y_values))\n max_y = int(np.amax(y_values))\n x = min_x\n y = min_y\n w = max_x - min_x\n h = max_y - min_y\n\n depth_mean = round(get_depth_mean(depth, x, y, w, h), 3)\n\n object = DetectedObject(x, y, w, h, depth_mean)\n objects.append(object)\n\n return objects", "def get_obstacles_map(obstacles, placed_pecies):\n \n #create a mask image to draw the obstacles on\n blocks = np.zeros(ARENA_SIZE[::-1], np.uint8)\n\n #get the grid points where the robot needs to placed\n grid = get_grid(ARENA_SIZE)\n\n #draw the obstacles and their safety region on the map\n for i in obstacles.keys():\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(obstacles[i][0]/4), i[1]-int(obstacles[i][1]/4)), (i[0]+int(obstacles[i][0]/4), i[1]+int(obstacles[i][1]/4)), 255, -1)\n\n #draw the obstacles and their safety region on the map\n for i in placed_pecies.keys():\n try:\n if not i == grid[5]:\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n else:\n cv2.rectangle(blocks, (int(i[0]-7.4*placed_pecies[i][0]/4), int(i[1]-7.4*placed_pecies[i][1]/4)),\n (int(i[0]+7.4*placed_pecies[i][0]/4), int(i[1]+7.4*placed_pecies[i][1]/4)), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(placed_pecies[i][0]/4), i[1]-int(placed_pecies[i][1]/4)), (i[0]+int(placed_pecies[i][0]/4), i[1]+int(placed_pecies[i][1]/4)), 255, -1)\n except Exception as e:\n print(e)\n\n return cv2.bitwise_not(blocks)", "def find_gate_posts(img, display_results=False):\n\n greyscale_image = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_GRAY2BGR)\n cm_image = cv2.applyColorMap(greyscale_image, cv2.COLORMAP_VIRIDIS)\n\n kernel = np.ones((5, 5), np.uint8)\n\n # cm_image = cv2.erode(cm_image, kernel, iterations=1)\n kernel = np.ones((5, 5), np.uint8)\n cm_image = cv2.dilate(cm_image, kernel, iterations=3)\n kernel = np.ones((4, 4), np.uint8)\n cm_image = cv2.erode(cm_image, kernel, iterations=1)\n\n cm_image = cv2.medianBlur(cm_image, 5) # Removes salt and pepper noise\n\n cm_copy_image = cm_image\n cv2.copyTo(cm_image, cm_copy_image)\n\n mask = mask_sonar_image(cm_image, display_results)\n\n cm_circles = cv2.findContours(mask, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n cm_circles = list(filter(lambda x: (cv2.contourArea(x) > 200\n and cv2.contourArea(x) < 5000),\n cm_circles))\n cm_circles = sorted(cm_circles,\n key=lambda x: (arc_circ(x)),\n reverse=False)\n\n cm_circles = list(filter(lambda x: (cv2.arcLength(x, True)**2/(4\n * math.pi*cv2.contourArea(x)) > 2.5), cm_circles))\n\n if len(cm_circles) < 1:\n print(\"Not enough circles found\")\n return None\n\n filtered_circles = cm_circles[0:1]\n\n circle_positions = []\n for circle in filtered_circles: # find center of circle code\n M = cv2.moments(circle)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n circle_positions.append((cX, cY, arc_circ(circle), cv2.arcLength(\n circle, True)**2/(4*math.pi*cv2.contourArea(circle))))\n\n if display_results:\n cv2.drawContours(cm_copy_image, filtered_circles, -1, (0, 255, 0), 2)\n cv2.imshow(\"found_gate_posts\", cm_copy_image)\n cv2.waitKey(0)\n\n return circle_positions", "def detect(image):\n markers = []\n # Stage 1: Detect edges in image\n gray = cvtColor(image, COLOR_BGR2GRAY)\n clahe = createCLAHE(clipLimit=1, tileGridSize=(6, 6))\n cl1 = clahe.apply(gray)\n _, thresh = threshold(cl1, 60, 255, THRESH_OTSU)\n blurred = GaussianBlur(thresh, (5, 5), 0)\n edges = Canny(blurred, 75, 100)\n\n # Stage 2: Find contours\n contours = findContours(edges, RETR_TREE, CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=contourArea, reverse=True)[:]\n\n for contour in contours:\n # Stage 3: Shape check\n perimeter = arcLength(contour, True)\n approx = approxPolyDP(contour, 0.01*perimeter, True)\n\n if len(approx) == QUADRILATERAL_POINTS:\n area = contourArea(approx)\n # (x, y, w, h) = boundingRect(approx)\n # ar = float(h) / float(w)\n # if area > 100 and ar >= 0.8 and ar <= 1.2:\n if area > 700:\n # putText(image, str(area), (10, 30), FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n drawContours(image, [contour], -1, (0, 255, 0), 1)\n\n # Stage 4: Perspective warping\n topdown_quad = get_topdown_quad(thresh, approx.reshape(4, 2))\n\n # Stage 5: Border check\n if topdown_quad[int((topdown_quad.shape[0]/100.0)*5), int((topdown_quad.shape[1]/100.0)*5)] > BLACK_THRESHOLD:\n continue\n\n # Stage 6: Get marker pattern\n marker_pattern = None\n\n try:\n marker_pattern = get_marker_pattern(topdown_quad, THRESHOLD_PERCENT)\n except:\n continue\n\n if not marker_pattern:\n continue\n\n # Stage 7: Match marker pattern\n marker_found, marker_rotation, marker_name = match_marker_pattern(marker_pattern)\n\n if marker_found:\n markers.append([marker_name, marker_rotation])\n\n return markers, image", "def test_thresh_color(images):\n for img in images:\n # Get the stack bounds to draw onto the main image\n stack_bounds = get_stack_bounds(img)\n\n # Get all the sub-images for each stack\n stack_images = get_stack_images(img)\n\n SIZE = (200, 300)\n filtered_imgs = []\n\n # Loop through all the stacks\n for stack_bound, stack_img in zip(stack_bounds, stack_images):\n #Draw the rectangle for the current stack\n disp = deepcopy(img)\n located_stacks_img = draw_rect(np.copy(disp), stack_bound, [0,0,0])\n cv2.imshow('Filtering stack', located_stacks_img)\n\n # Convert the current stack image into hsv\n stack_img_hsv = cv2.cvtColor(stack_img, cv2.COLOR_BGR2HSV)\n for i, color in enumerate(COLORS):\n contours = thresh_color(stack_img, stack_img_hsv, COLORS[color])\n\n # Draw the contours\n stack2 = deepcopy( stack_img)\n cont_img = cv2.drawContours(stack2, contours, -1, (255,255,255), 2)\n # cont_img = cv2.resize(cont_img, SIZE)\n\n # Put the number of contours as text\n txt = '{}:{}'.format(color, len(contours))\n print(txt)\n\n # Display the contour information to the screen\n cv2.imshow(txt, scale_image(cont_img, 9))\n filtered_imgs.append(cont_img)\n cv2.moveWindow(txt, 180*i, 600)\n # cv2.imshow('filtered_images', np.hstack(filtered_imgs))\n print()\n # Skip to the next image\n if cv2.waitKey(0) == ord('1'):\n break\n cv2.destroyAllWindows()", "def applyCanny( self, img):\n img = togray( img )\n res = cv2.Canny( img ,480,500)\n return res", "def dfs_canny(i,j,img,vis,weak,m,n):\n if i >= 0 and i < m and j >= 0 and j < n and vis[i][j] == False and img[i,j] != 0:\n\n if img[i,j] == weak:\n img[i,j] = 255\n\n vis[i][j] = True\n\n dfs_canny(i,j-1,img,vis,weak,m,n)\n dfs_canny(i,j+1,img,vis,weak,m,n)\n dfs_canny(i-1,j,img,vis,weak,m,n)\n dfs_canny(i+1,j,img,vis,weak,m,n)\n dfs_canny(i-1,j-1,img,vis,weak,m,n)\n dfs_canny(i-1,j+1,img,vis,weak,m,n)\n dfs_canny(i+1,j-1,img,vis,weak,m,n)\n dfs_canny(i+1,j+1,img,vis,weak,m,n)", "def vision(image):\n vis_map = resize(image, alpha, beta)\n print(\"Resized map from the blue mask\")\n\n world = rotate(vis_map)\n\n plt.figure()\n plt.imshow(world[:, :, ::-1])\n plt.show()\n object_grid, occupancy_grid = detect_object(world)\n print(\"Result of the red mask\")\n plt.figure()\n plt.imshow(occupancy_grid)\n plt.show()\n return object_grid, occupancy_grid, world", "def detect_bridge():\n # Initialize color ranges for detection\n color_range = [Color(\"Brug\", [0, 0, 0], [0, 255, 107]),\n Color(\"Gat\", [0, 0, 0], [0, 0, 255]),\n Color(\"Rand\", [0, 0, 185], [0, 0, 255]),\n Color(\"White-ish\", [0, 0, 68], [180, 98, 255])]\n\n cam = Recognize(color_range)\n cam.run()", "def detect(self, img: np.ndarray) -> np.ndarray:\n filtered = self.filter(img)\n\n # Detect edges\n edges = cv2.Canny(np.uint8(filtered * 255), self.canny_lo, self.canny_hi)\n\n # Obtain the gradients for filtering.\n # We only require local gradients, so obtaining them only when required would make sense.\n dx = cv2.Scharr(filtered, cv2.CV_32F, 1, 0)\n dy = cv2.Scharr(filtered, cv2.CV_32F, 0, 1)\n\n # Now the stroke width transform already detects lo-hi-lo edges for us, but it is an extremely slow\n # implementation I did.\n if self._stroke_filter:\n gradients = (dx, dy)\n for y in range(0, edges.shape[0]):\n for x in range(0, edges.shape[1]):\n if edges[y, x] == 0:\n continue\n ray = swt_process_pixel((x, y), edges, gradients, min_length=5, max_length=20)\n if ray is None:\n edges[y, x] = 0\n else:\n edges = non_line_suppression(filtered, edges, dx, dy)\n\n if self._morphological_filtering:\n edges = cv2.morphologyEx(edges, cv2.MORPH_BLACKHAT, np.ones(shape=(5, 5)))\n edges = cv2.medianBlur(edges, 3)\n\n if self.detect_lines:\n lines = cv2.HoughLinesP(edges, 1, np.pi / 90, self.hough_line_support,\n minLineLength=self.hough_line_length, maxLineGap=self.hough_line_gap)\n edge_lines = np.zeros_like(edges)\n if lines is None:\n return edge_lines\n for line in lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(edge_lines, (x1, y1), (x2, y2), 255, 2)\n else:\n edge_lines = edges\n\n return edge_lines", "def process_depth_image(self, data):\n try:\n depth_image = self.bridge.imgmsg_to_cv2(data)\n\n # reset min_val to big value\n self.min_val = 100\n\n # find min dist, and note the x for that location\n def find_min_val(depth_image):\n for x in range(0, LAST_ROW):\n counter = 0\n for y in depth_image[x]:\n counter += 1\n if y < self.min_val:\n self.obstacle_x = counter\n self.min_val = y\n if self.min_val <= MIN_THRESHOLD:\n return\n\n find_min_val(depth_image)\n\n self.dist = -1\n self.median = -1\n\n bounding_rect = depth_image[self.y:(self.y + self.h), self.x:(self.x+self.w)]\n median_depth = np.nanmedian(bounding_rect)\n\n # set dist to median of bounding box, if box is not 0 in area, and if not a box of NaNs (otherwise self.dist = -1)\n if len(bounding_rect) > 0 and math.isnan(median_depth) == False:\n self.dist = self.median = median_depth\n\n except CvBridgeError, e:\n rospy.loginfo(e)", "def get_classification_simulator(self, image):\n\n r_channel = image[:,:,2]\n g_channel = image[:,:,1]\n\n\n\n # Threshold color channel\n s_rgy_min = 50\n s_thresh_min = 245\n s_thresh_max = 255\n \n #s_binary = np.zeros_like(r_channel)\n r_binary = np.zeros_like(r_channel)\n g_binary = np.zeros_like(r_channel)\n y_binary = np.zeros_like(r_channel)\n \n #s_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) | ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n \n r_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & (g_channel <= s_rgy_min)] = 1\n g_binary[((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max)) & (r_channel <= s_rgy_min)] = 1\n y_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n\n #res = cv2.bitwise_and(img,img,mask = s_binary)\n \n #maxx=image.shape[1]\n maxy=image.shape[0]\n \n y_top=0\n window_size_y=50\n y_bottom=y_top+window_size_y\n \n max_color=0\n tf_color=TrafficLight.UNKNOWN\n \n while (y_bottom< maxy):\n #print(img[y_top:y_bottom,:,:])\n rs= r_binary[y_top:y_bottom,:].sum()\n gs= g_binary[y_top:y_bottom,:].sum()\n ys= y_binary[y_top:y_bottom,:].sum()\n if (rs>max_color):\n max_color=rs\n tf_color=TrafficLight.RED\n if (gs>max_color):\n max_color=gs\n tf_color=TrafficLight.GREEN\n if (ys>max_color):\n max_color=ys\n tf_color=TrafficLight.YELLOW\n y_top+=window_size_y\n y_bottom+=window_size_y\n \n if (max_color<100):\n tf_color=TrafficLight.UNKNOWN\n \n\n\n return tf_color", "def canny (im,t1,t2,gamma):\n\tim2 = im.copy()\n\tedges = cv2.GaussianBlur(im2, (15,15),gamma)\n\tedges = cv2.normalize(edges,edges,0,255,cv2.NORM_MINMAX)\n\tedges = cv2.Canny(np.uint8(edges),t1,t2)\n\treturn edges", "def edge_detect_canny(gray_img):\n\n edges = cv2.Canny(gray_img, 100, 200)\n\n return edges", "def run_canny_edge_detection(image, min_threshold=100, max_threshold=150):\n return cv2.Canny(image, min_threshold, max_threshold)", "def detectBlocksInDepthImage(self):\n depth_range_dict = {'1':[173,178],'2':[169,172],'3':[165,169],'4':[159,163],'5':[156,158],'6':[147,155],'7':[139,146],'8':[132,138]}\n depth_frame = self.DepthFrameRaw\n rgb_frame = self.VideoFrame\n rgb_frame = cv2.resize(rgb_frame, (640,480))\n depth_frame = cv2.resize(depth_frame, (640, 480))\n np.clip(depth_frame,0,2**10 - 1,depth_frame)\n depth_frame >>= 2\n depth_frame = depth_frame.astype(np.uint8)\n filt_block = []\n for k,v in depth_range_dict.items():\n thresh = cv2.inRange(depth_frame,v[0],v[1])\n cv2.imwrite(\"/home/student/armlab-w20/log/img.jpeg\", thresh)\n _ , contours, _ = cv2.findContours(thresh, 1, 2)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 400 and area < 700:\n block = []\n rect = cv2.minAreaRect(cnt)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n color = self.determine_color(rgb_frame, box)\n org = (box[0][0], box[0][1])\n rgb_frame = cv2.putText(rgb_frame, color, org,cv2.FONT_HERSHEY_SIMPLEX , 0.5 ,(0,0,0),2, cv2.LINE_AA)\n rgb_frame = cv2.drawContours(rgb_frame,[box],0,(0,0,0),0)\n self.VideoFrame = rgb_frame\n block.append(box)\n block.append(int(k))\n block.append(color)\n filt_block.append(block)\n return filt_block", "def find_roads(\n probability_map,\n *,\n input_threshold=0.3,\n max_roads=None,\n min_strength=0.17, #0.2,\n num_angles=720,\n roads_min_angle=np.pi/8,\n roads_min_distance=50,\n debugimage=None, # for debugging ...\n debugprint=None): # for debugging ...\n\n # shorthand\n im = probability_map\n\n # the angles to be used in the Hough transform\n theta = np.linspace(-np.pi/2, np.pi/2, num_angles)\n\n # normalize almost anything to grayscale\n if im.ndim == 3:\n if im.shape[2] == 4:\n im = im[:,:,:3] # throw away alpha\n im = im.mean(axis=2) # convert RGB to grayscale\n\n if debugimage: debugimage('original', im, 0, 1, 'jet')\n\n assert im.ndim == 2\n\n if debugimage:\n hspace, _, _ = hough_line(im, theta)\n debugimage('original_hough_hspace', hspace)\n\n # create monochrome/binary input map\n im[im >= input_threshold] = 1\n im[im < input_threshold] = 0\n\n if debugimage: debugimage('threshold_applied', im)\n\n # Hough transform\n hspace, angles, distances = hough_line(im, theta)\n\n hspace = np.asarray(hspace, dtype=np.float32)\n hspace /= hspace.max() # normalize\n\n if debugimage: debugimage('hough_hspace', hspace)\n\n # convolution filters, rectangular, tuned for widths of 12, 32, 48 pixels\n w12 = np.concatenate([-np.ones((6)), np.ones((12)), -np.ones((6))])\n w32 = np.concatenate([-np.ones((16)), np.ones((32)), -np.ones((16))])\n w48 = np.concatenate([-np.ones((24)), np.ones((48)), -np.ones((24))])\n\n # convolve\n im12 = ndi.filters.convolve1d(hspace, w12, axis=0)\n im32 = ndi.filters.convolve1d(hspace, w32, axis=0)\n im48 = ndi.filters.convolve1d(hspace, w48, axis=0)\n\n # normalize signal strengths for different road widths\n im12 /= 12\n im32 /= 32\n im48 /= 48\n\n ca = (None, None, 'jet',)\n if debugimage: debugimage('hough_hspace_conv12', im12, *ca)\n if debugimage: debugimage('hough_hspace_conv32', im32, *ca)\n if debugimage: debugimage('hough_hspace_conv48', im48, *ca)\n if debugimage:\n debugimage('hough_hspace_combined',\n np.hstack([im12, im32, im48]), *ca)\n\n # compute possible roads of all widths, sorted by signal strength\n seq = np.stack((im12, im32, im48)).flatten()\n sor = np.argsort(seq)\n roads = np.column_stack((\n seq,\n np.tile(np.tile(angles, distances.shape[0]), 3),\n np.tile(np.repeat(distances, angles.shape[0]), 3),\n np.repeat([12, 32, 48], distances.shape[0] * angles.shape[0])\n ))[sor][::-1]\n\n # columns: strength, angle, distance, width\n found_roads = np.asarray([]).reshape(0, 4)\n\n # find as many as strong roads as desired, while dropping roads that are too\n # similar to roads already found (non-max suppression)\n for i in range(roads.shape[0]):\n if roads[i,0] < min_strength:\n break\n a = roads[i,1]\n d = roads[i,2]\n close = (\n np.logical_or(\n np.logical_and(\n np.abs(found_roads[:,1]-a) < roads_min_angle,\n np.abs(found_roads[:,2]-d) < roads_min_distance),\n np.logical_and(\n np.pi - np.abs(found_roads[:,1]-a) < roads_min_angle,\n np.abs(found_roads[:,2]+d) < roads_min_distance)))\n if not np.any(close):\n found_roads = np.vstack((found_roads, roads[i]))\n if max_roads is not None and found_roads.shape[0] >= max_roads:\n break\n\n return found_roads, im.shape", "def generate_contours(edges, colorimg, img):\n cv2.destroyAllWindows()\n cv2.namedWindow(\"Contours\")\n lengthlimit = 20\n arealimit = 20\n\n # create trackbars for length and area filters\n cv2.createTrackbar(\"length\", \"Contours\", 0, 200, nothing)\n cv2.createTrackbar(\"area\", \"Contours\", 0, 2000, nothing)\n\n # find all possible contours in the image\n ret, thresh = cv2.threshold(edges, 127, 255, 0)\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n while 1:\n cv2.imshow(\"Contours\", colorimg)\n\n # filter based on area and length\n lengthlimit = cv2.getTrackbarPos(\"length\", \"Contours\")\n arealimit = cv2.getTrackbarPos(\"area\", \"Contours\")\n\n for idx, contour in enumerate(contours):\n if len(contour) > lengthlimit:\n area = cv2.contourArea(contour)\n if area > arealimit:\n isconvex = cv2.isContourConvex(contour)\n circle = cv2.minEnclosingCircle(contour)\n cv2.drawContours(colorimg, contours, idx, (0, 255, 0), 2)\n else:\n cv2.drawContours(colorimg, contours, idx, (0, 0, 255), 2)\n else:\n cv2.drawContours(colorimg, contours, idx, (0, 0, 255), 2)\n\n k = cv2.waitKey(1) & 0xFF\n # print k\n if k == 27:\n exit()\n if k == 32:\n show_final(colorimg, contours, arealimit, lengthlimit)", "def find_img2d_candidates(image, **kwargs):\n\n # filter_kernel = np.array([[-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225]])\n\n filter_kernel = np.array([[-2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2/324, -2/324, -2/324],\n [-2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -2/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -2/324, -2/324],\n [-2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2/324, -2/324, -2/324]])\n\n res = sg.convolve2d(image, filter_kernel, mode='same', boundary='fill', fillvalue=0)\n coord_x, coord_y = find_max_coords(np.absolute(res))\n\n return coord_x, coord_y", "def membrane_detect(img_grayscale):\n \"\"\"input: grayscale image\"\"\"\n \"\"\"output: binary image\"\"\"\n\n #sharpened image:\n im_sharp= unsharp_mask(img_grayscale, radius=2, amount=2)\n\n # Equalization threshold:\n p2, p98 = np.percentile(im_sharp, (2, 98))\n im_eq = exposure.rescale_intensity(img_grayscale, in_range=(p2, p98))\n\n #Gaussian:\n im_gaus=gaussian_filter(im_eq, sigma=2.7)\n\n #Edge detection: \n im_edge=feature.canny(im_gaus, sigma=1)\n\n #remove small objects:\n im_clean1 = morphology.remove_small_objects(im_edge, 200, in_place=True, connectivity=50)\n\n #close:\n phantom = im_clean1\n phantom[10:30, 200:210] = 0\n selem_c = disk(10)\n im_closed = closing(phantom, selem_c)\n\n #dialated:\n selem_d = disk(4)\n im_dialated=dilation(im_closed, selem_d)\n\n\n #remove small objects:\n im_final = morphology.remove_small_objects(im_dialated, 1700, in_place=True, connectivity=200)\n\n return im_final", "def find_cars(img, scale):\n img_boxes = [] # Clears img_boxes so we don't keep unwanted heatmap history\n count = 0\n draw_img = np.copy(img)\n\n # Make a heatmap of zeros\n heatmap = np.zeros_like(img[:, :, 0])\n\n # IMPORTANT : reading *.jpeg's (scaled 0-255, aka scaling needed), but\n # # trained on *.png's (scaled 0-1, aka scaling not needed)\n if img.dtype == 'uint8':\n img = img.astype(np.float32) / 255 # aka scaling needed\n\n img_tosearch = img[ystart:ystop, :, :]\n ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')\n\n if scale != 1: # resize whole image instead of separate windows\n imshape = ctrans_tosearch.shape\n ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))\n\n ch1 = ctrans_tosearch[:, :, 0]\n ch2 = ctrans_tosearch[:, :, 1]\n ch3 = ctrans_tosearch[:, :, 2]\n\n # Define blocks and steps as above\n # These hold the number of HOG cells\n nxblocks = (ch1.shape[1] // pix_per_cell) - 1 # Note : '//' causes integers to be result, instead of floats\n nyblocks = (ch1.shape[0] // pix_per_cell) - 1\n # How many features per block are we going to be extracting\n nfeat_per_block = orient * cell_per_block ** 2\n window = 64\n nblocks_per_window = (window // pix_per_cell) - 1\n # aka 75% overlap between cells\n cells_per_step = 2 # Instead of overlap, define how many cells to step\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step\n\n # Compute individual channel HOG features for the entire image\n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)\n\n for xb in range(nxsteps):\n for yb in range(nysteps):\n count += 1\n ypos = yb * cells_per_step\n xpos = xb * cells_per_step\n # Extract HOG for this patch\n hog_feat1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat3 = hog3[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos * pix_per_cell\n ytop = ypos * pix_per_cell\n\n # Extract the image patch\n subimg = cv2.resize(ctrans_tosearch[ytop:ytop + window, xleft:xleft + window], (64, 64))\n\n # Get colour features\n spatial_features = bin_spatial(subimg, size=spatial_size)\n hist_features = color_hist(subimg, nbins=hist_bins)\n\n # Scale features and make a prediction\n test_features = X_scaler.transform(\n np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))\n test_prediction = svc.predict((test_features))\n\n if test_prediction == 1:\n xbox_left = np.int(xleft * scale)\n ytop_draw = np.int(ytop * scale)\n win_draw = np.int(window * scale)\n cv2.rectangle(draw_img, (xbox_left, ytop_draw + ystart),\n (xbox_left + win_draw, ytop_draw + win_draw + ystart), (0, 0, 255))\n img_boxes.append(\n ((xbox_left, ytop_draw + ystart), (xbox_left + win_draw, ytop_draw + win_draw + ystart)))\n heatmap[ytop_draw + ystart:ytop_draw + win_draw + ystart, xbox_left:xbox_left + win_draw] += 1\n\n return draw_img, img_boxes, heatmap", "def main():\n cam = Realsense()\n # cam.access_intr_and_extr()\n profile = cam.pipeline.start(cam.config)\n depth_sensor = profile.get_device().first_depth_sensor()\n depth_scale = depth_sensor.get_depth_scale()\n align_to = rs.stream.color\n align = rs.align(align_to)\n\n objp = np.zeros((3*4,3), np.float32)\n objp[:,:2] = np.mgrid[0:4,0:3].T.reshape(-1,2)\n axis = np.float32([[1,0,0], [0,1,0], [0,0,-1]]).reshape(-1,3)\n # print(objp)\n\n try:\n while (True):\n # detect ArUco markers in RGB images\n frames = cam.pipeline.wait_for_frames()\n aligned_frames = align.process(frames)\n color_frame = aligned_frames.get_color_frame()\n color_image = np.asanyarray(color_frame.get_data()) \n frame = color_image\n font = cv2.FONT_HERSHEY_SIMPLEX\n corners, ids, rvecs, tvecs = cam.detect_markers_realsense(frame)\n \n if np.all(ids != None): # if markers are detected\n for i in range(0, ids.size):\n aruco.drawAxis(frame, cam.newcameramtx, cam.dist, rvecs[i],\n tvecs[i], 0.1) # Draw axis\n aruco.drawDetectedMarkers(frame, corners) # draw square around markers\n\n ###### DRAW ID #####\n strg = ''\n for i in range(0, ids.size):\n strg += str(ids[i][0])+', '\n\n cv2.putText(frame, \"Id: \" + strg, (0,25), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n\t ###### Output marker positions in camera frame ######\n \t # output tvec\n y0 = 60\n dy = 40\n for i in range(0, ids.size):\n y = y0 + i*dy\n cv2.putText(frame, str(tvecs[i][0]), (0, y), font, 1, (0,255,0),\n 2, cv2.LINE_AA)\n\n else:\n ##### DRAW \"NO IDS\" #####\n cv2.putText(frame, \"No Ids\", (0,64), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (4,3), None)\n if ret == True:\n corners2 = cv2.cornerSubPix(gray, corners,(11,11), (-1,-1),\n cam.criteria)\n corners2 = corners2[::-1]\n # print(corners2)\n # print(objp)\n frame = cv2.drawChessboardCorners(frame, (4,3), corners2, ret)\n # Find the rotation and translation vectors.\n _, rvecs, tvecs = cv2.solvePnP(objp, corners2, cam.newcameramtx,\n cam.dist)\n rot, _ = cv2.Rodrigues(rvecs)\n # print(rot)\n # project 3D points to image plane\n imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs,\n cam.newcameramtx, cam.dist)\n frame = draw(frame, corners2, imgpts)\n\n # Display the resulting frame\n cv2.imshow('frame',frame)\n cv2.waitKey(5)\n\n # When everything done, release the capture\n cv2.destroyAllWindows()\n\n finally:\n cam.pipeline.stop()", "def __init__(self):\n\n self.__hsv_threshold_hue = [25.899280575539567, 29.11573068569659]\n self.__hsv_threshold_saturation = [185.74640287769785, 255.0]\n self.__hsv_threshold_value = [137.80585700930783, 255.0]\n\n self.hsv_threshold_output = None\n\n self.__find_contours_input = self.hsv_threshold_output\n self.__find_contours_external_only = False\n\n self.find_contours_output = None\n\n self.__filter_contours_contours = self.find_contours_output\n self.__filter_contours_min_area = 30.0\n self.__filter_contours_min_perimeter = 30.0\n self.__filter_contours_min_width = 0.0\n self.__filter_contours_max_width = 1000.0\n self.__filter_contours_min_height = 0.0\n self.__filter_contours_max_height = 1000.0\n self.__filter_contours_solidity = [0, 100]\n self.__filter_contours_max_vertices = 1000000.0\n self.__filter_contours_min_vertices = 0.0\n self.__filter_contours_min_ratio = 0.0\n self.__filter_contours_max_ratio = 1000.0\n\n self.filter_contours_output = None" ]
[ "0.6465045", "0.63361573", "0.618783", "0.61272085", "0.6077512", "0.60599184", "0.60292345", "0.6024568", "0.601719", "0.5986781", "0.59664744", "0.5910309", "0.58662456", "0.58420956", "0.5835442", "0.58258045", "0.5802556", "0.5799526", "0.5788895", "0.5763871", "0.57510483", "0.57481074", "0.5744353", "0.5741554", "0.5721295", "0.5696525", "0.56769806", "0.56660163", "0.56635094", "0.5659737" ]
0.766101
0
CounterPointer a model defined in Swagger
def __init__(self, counter_type: str=None, counter: int=None): # noqa: E501 self.swagger_types = { 'counter_type': str, 'counter': int } self.attribute_map = { 'counter_type': 'counterType', 'counter': 'counter' } self._counter_type = counter_type self._counter = counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_counter(self, counter, entity):", "def api_callcounter():\n try:\n return jsonify({'callcounter': get_model().call_counter})\n except Exception as e:\n response = jsonify({'error': 'API error'})\n response.status_code = 400\n return response", "def __init__(self, counter):\n self.counter: Counter = counter", "def __init__(self):\n self.id = None\n self.typeInfo['id'] = 'string'\n \"\"\"Name of the counter.\"\"\"\n self.name = None\n self.typeInfo['name'] = 'string'\n \"\"\"Source of the counter.\"\"\"\n self.source = None\n self.typeInfo['source'] = 'string'\n \"\"\"Value in case of snmp or other specific counters.\"\"\"\n self.value = None\n self.typeInfo['value'] = 'string'\n \"\"\"zone id of counter\"\"\"\n self.zoneid = None\n self.typeInfo['zoneid'] = 'string'", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_produto': 'int',\n 'id_pessoa': 'int',\n 'id_parentesco': 'int',\n 'tipo_portador': 'str',\n 'nome_impresso': 'str',\n 'id_tipo_cartao': 'int',\n 'flag_ativo': 'int',\n 'data_cadastro_portador': 'str',\n 'data_cancelamento_portador': 'str'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_produto': 'idProduto',\n 'id_pessoa': 'idPessoa',\n 'id_parentesco': 'idParentesco',\n 'tipo_portador': 'tipoPortador',\n 'nome_impresso': 'nomeImpresso',\n 'id_tipo_cartao': 'idTipoCartao',\n 'flag_ativo': 'flagAtivo',\n 'data_cadastro_portador': 'dataCadastroPortador',\n 'data_cancelamento_portador': 'dataCancelamentoPortador'\n }\n\n self._id_conta = None\n self._id_produto = None\n self._id_pessoa = None\n self._id_parentesco = None\n self._tipo_portador = None\n self._nome_impresso = None\n self._id_tipo_cartao = None\n self._flag_ativo = None\n self._data_cadastro_portador = None\n self._data_cancelamento_portador = None", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def __init__(self): # noqa: E501\n self.openapi_types = {\n }\n\n self.attribute_map = {\n }", "def __init__(self, label=None, status_counts=None):\n self.swagger_types = {\n 'label': str,\n 'status_counts': StatusCounts\n }\n\n self.attribute_map = {\n 'label': 'label',\n 'status_counts': 'statusCounts'\n }\n\n self._label = label\n self._status_counts = status_counts", "def _increment(cls, counter_name: str, counter_category: str = None) -> int:\n counter_key = {\n \"_id\": counter_category if counter_category else cls.__collection__.name\n }\n counter_update = {\n \"$inc\": {f\"{counter_name}.counter\": 1},\n \"$set\": {f\"{counter_name}.last_update_time\": datetime.datetime.utcnow()},\n }\n counter_element = cls.__counters__.find_one_and_update(\n counter_key,\n counter_update,\n return_document=pymongo.ReturnDocument.AFTER,\n upsert=True,\n )\n return counter_element[counter_name][\"counter\"]", "def updateCounter(self):\n self.counter = self.counter + 1\n self.syncDataStructure[\"+\"][str(self.instanceID)] = self.counter", "def increment_counter(self) -> None:", "def __init__(self, client):\n self.client = client\n self.definitions = client.swagger_spec.definitions", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_pessoa': 'int',\n 'id_cartao': 'int',\n 'id_bandeira': 'int',\n 'id_tipo_cartao': 'int',\n 'numero_cartao': 'str',\n 'nome_plastico': 'str',\n 'cvv2': 'str',\n 'data_geracao': 'str',\n 'data_validade': 'str',\n 'cpf': 'str',\n 'tipo_portador': 'str',\n 'trilha1': 'str',\n 'trilha2': 'str',\n 'trilha_cvv1': 'str',\n 'trilha_cvv2': 'str',\n 'flag_virtual': 'int',\n 'nome_bandeira': 'str',\n 'flag_titular': 'int',\n 'sequencial_cartao': 'int',\n 'id_status': 'int',\n 'descricao_status_cartao': 'str',\n 'data_status': 'str',\n 'id_estagio': 'int',\n 'descricao_estagio': 'str',\n 'data_estagio': 'str',\n 'numero_bin': 'str',\n 'id_produto': 'int',\n 'descricao_produto': 'str',\n 'id_status_conta': 'int',\n 'descricao_status_conta': 'int',\n 'data_embossing': 'str',\n 'codigo_desbloqueio': 'str',\n 'nome_pessoa': 'str',\n 'tipo_pessoa': 'str',\n 'data_nascimento': 'str',\n 'id_endereco': 'int',\n 'id_tipo_endereco': 'int',\n 'descricao_tipo_endereco': 'str',\n 'cep': 'str',\n 'logradouro': 'str',\n 'numero_endereco': 'str',\n 'complemento_endereco': 'str',\n 'bairro': 'str',\n 'cidade': 'str',\n 'uf': 'str',\n 'pais': 'str',\n 'senha_criptografada': 'str',\n 'icvv': 'str',\n 'id_status_impressao': 'int'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_pessoa': 'idPessoa',\n 'id_cartao': 'idCartao',\n 'id_bandeira': 'idBandeira',\n 'id_tipo_cartao': 'idTipoCartao',\n 'numero_cartao': 'numeroCartao',\n 'nome_plastico': 'nomePlastico',\n 'cvv2': 'cvv2',\n 'data_geracao': 'dataGeracao',\n 'data_validade': 'dataValidade',\n 'cpf': 'cpf',\n 'tipo_portador': 'tipoPortador',\n 'trilha1': 'trilha1',\n 'trilha2': 'trilha2',\n 'trilha_cvv1': 'trilhaCVV1',\n 'trilha_cvv2': 'trilhaCVV2',\n 'flag_virtual': 'flagVirtual',\n 'nome_bandeira': 'nomeBandeira',\n 'flag_titular': 'flagTitular',\n 'sequencial_cartao': 'sequencialCartao',\n 'id_status': 'idStatus',\n 'descricao_status_cartao': 'descricaoStatusCartao',\n 'data_status': 'dataStatus',\n 'id_estagio': 'idEstagio',\n 'descricao_estagio': 'descricaoEstagio',\n 'data_estagio': 'dataEstagio',\n 'numero_bin': 'numeroBin',\n 'id_produto': 'idProduto',\n 'descricao_produto': 'descricaoProduto',\n 'id_status_conta': 'idStatusConta',\n 'descricao_status_conta': 'descricaoStatusConta',\n 'data_embossing': 'dataEmbossing',\n 'codigo_desbloqueio': 'codigoDesbloqueio',\n 'nome_pessoa': 'nomePessoa',\n 'tipo_pessoa': 'tipoPessoa',\n 'data_nascimento': 'dataNascimento',\n 'id_endereco': 'idEndereco',\n 'id_tipo_endereco': 'idTipoEndereco',\n 'descricao_tipo_endereco': 'descricaoTipoEndereco',\n 'cep': 'cep',\n 'logradouro': 'logradouro',\n 'numero_endereco': 'numeroEndereco',\n 'complemento_endereco': 'complementoEndereco',\n 'bairro': 'bairro',\n 'cidade': 'cidade',\n 'uf': 'uf',\n 'pais': 'pais',\n 'senha_criptografada': 'senhaCriptografada',\n 'icvv': 'icvv',\n 'id_status_impressao': 'idStatusImpressao'\n }\n\n self._id_conta = None\n self._id_pessoa = None\n self._id_cartao = None\n self._id_bandeira = None\n self._id_tipo_cartao = None\n self._numero_cartao = None\n self._nome_plastico = None\n self._cvv2 = None\n self._data_geracao = None\n self._data_validade = None\n self._cpf = None\n self._tipo_portador = None\n self._trilha1 = None\n self._trilha2 = None\n self._trilha_cvv1 = None\n self._trilha_cvv2 = None\n self._flag_virtual = None\n self._nome_bandeira = None\n self._flag_titular = None\n self._sequencial_cartao = None\n self._id_status = None\n self._descricao_status_cartao = None\n self._data_status = None\n self._id_estagio = None\n self._descricao_estagio = None\n self._data_estagio = None\n self._numero_bin = None\n self._id_produto = None\n self._descricao_produto = None\n self._id_status_conta = None\n self._descricao_status_conta = None\n self._data_embossing = None\n self._codigo_desbloqueio = None\n self._nome_pessoa = None\n self._tipo_pessoa = None\n self._data_nascimento = None\n self._id_endereco = None\n self._id_tipo_endereco = None\n self._descricao_tipo_endereco = None\n self._cep = None\n self._logradouro = None\n self._numero_endereco = None\n self._complemento_endereco = None\n self._bairro = None\n self._cidade = None\n self._uf = None\n self._pais = None\n self._senha_criptografada = None\n self._icvv = None\n self._id_status_impressao = None", "def _increase_counter(self, response):\n response_id = response.meta['__id']\n spot = self._request_registry[response_id]\n spot['counter'] = spot.get('counter', 0) + 1", "def inspect_model_fields(self, model: ModelRepresentation) -> None:\n c = model.count()\n title(f\"{model.name} ({c})\")\n print(model.fields_info())", "def test_creation_doc():\n value = 42\n doc = \"I am an int\"\n\n num_a = param.Integer(value=value, doc=doc)\n assert num_a.value == value\n assert num_a.doc == doc", "def __init__(self):\n self.swagger_types = {\n 'owner_id': 'str',\n 'created_at': 'datetime',\n 'identifier': 'str',\n 'identifier_type': 'str',\n 'default_language': 'str',\n 'optional_identifier': 'str',\n 'id': 'str',\n 'v': 'float',\n 'id': 'str',\n 'case_records': 'list[str]'\n }\n\n self.attribute_map = {\n 'owner_id': '_ownerId',\n 'created_at': '_createdAt',\n 'identifier': 'identifier',\n 'identifier_type': 'identifierType',\n 'default_language': 'defaultLanguage',\n 'optional_identifier': 'optionalIdentifier',\n 'id': '_id',\n 'v': '__v',\n 'case_records': 'caseRecords'\n }\n\n self._owner_id = None\n self._created_at = None\n self._identifier = None\n self._identifier_type = None\n self._default_language = None\n self._optional_identifier = None\n self._id = None\n self._v = None\n self._id = None\n self._case_records = None", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'date_created': 'datetime',\n 'date_modified': 'datetime',\n 'version': 'int',\n 'division': 'DomainEntityRef',\n 'campaign_status': 'str',\n 'callable_time_set': 'DomainEntityRef',\n 'contact_list': 'DomainEntityRef',\n 'dnc_lists': 'list[DomainEntityRef]',\n 'always_running': 'bool',\n 'contact_sorts': 'list[ContactSort]',\n 'messages_per_minute': 'int',\n 'errors': 'list[RestErrorDetail]',\n 'sms_config': 'SmsConfig',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'date_created': 'dateCreated',\n 'date_modified': 'dateModified',\n 'version': 'version',\n 'division': 'division',\n 'campaign_status': 'campaignStatus',\n 'callable_time_set': 'callableTimeSet',\n 'contact_list': 'contactList',\n 'dnc_lists': 'dncLists',\n 'always_running': 'alwaysRunning',\n 'contact_sorts': 'contactSorts',\n 'messages_per_minute': 'messagesPerMinute',\n 'errors': 'errors',\n 'sms_config': 'smsConfig',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._date_created = None\n self._date_modified = None\n self._version = None\n self._division = None\n self._campaign_status = None\n self._callable_time_set = None\n self._contact_list = None\n self._dnc_lists = None\n self._always_running = None\n self._contact_sorts = None\n self._messages_per_minute = None\n self._errors = None\n self._sms_config = None\n self._self_uri = None", "def _increment_viewcount(model, model_id: int, request):\n object_key = model.__name__ + ':' + str(model_id)\n\n redis = get_redis_connection('traffic_stats')\n view_count = redis.get(object_key)\n if not view_count:\n # Cache miss. Get the view count from the database and cache it.\n try:\n view_count = int(model.objects.get(identifier=model_id).view_count)\n except ObjectDoesNotExist:\n # If the object doesn't even exist in the database, don't track it.\n return\n except FieldDoesNotExist:\n log.error(\n 'Cannot track model {} because it has no view_count field. '\n 'Views for this model will be lost.'.format(model.__name__)\n )\n return -1\n redis.set(object_key, view_count)\n else:\n view_count = int(view_count)\n\n # Only increment the view count if the user has not visited the resource in\n # the last few minutes. Prevents metrics gaming shenanigans.\n ip = _get_user_ip(request)\n if not _is_recent_visitor(ip, object_key):\n redis.incr(object_key)\n view_count += 1\n _mark_recent_visitor(ip, object_key)\n\n # Update the last access time of the model.\n # Store in a sorted set so we can easily find the oldest keys.\n timestamp = time.time()\n redis.execute_command(\n 'ZADD model-last-accessed {} {}'.format(timestamp, object_key)\n )\n return view_count", "def counter(self) -> int:", "def counter(self) -> int:", "def __init__(self):\n self.swagger_types = {\n 'app_id': 'int',\n 'app_sw_rev': 'str',\n 'avg_hops': 'float',\n 'avg_latency': 'int',\n 'charge': 'int',\n 'estimated_latency_to_mote': 'int',\n 'hw_model': 'int',\n 'hw_rev': 'int',\n 'id': 'int',\n 'join_sys_time': 'datetime',\n 'last_voltage': 'int',\n 'lost_packet_count': 'int',\n 'mac_address': 'str',\n 'max_current': 'int',\n 'max_num_links': 'int',\n 'max_num_neighbors': 'int',\n 'need_neighbor': 'bool',\n 'num_good_neighbors': 'int',\n 'num_joins': 'int',\n 'num_links': 'int',\n 'num_neighbors': 'int',\n 'num_parents': 'int',\n 'power_cost_rx_link': 'int',\n 'power_cost_tx_link': 'int',\n 'reliability': 'float',\n 'rx_packet_count': 'int',\n 'stack_sw_rev': 'str',\n 'state': 'str',\n 'state_reason': 'str',\n 'state_sys_time': 'datetime',\n 'used_current': 'int'\n }\n\n self.attribute_map = {\n 'app_id': 'appId',\n 'app_sw_rev': 'appSwRev',\n 'avg_hops': 'avgHops',\n 'avg_latency': 'avgLatency',\n 'charge': 'charge',\n 'estimated_latency_to_mote': 'estimatedLatencyToMote',\n 'hw_model': 'hwModel',\n 'hw_rev': 'hwRev',\n 'id': 'id',\n 'join_sys_time': 'joinSysTime',\n 'last_voltage': 'lastVoltage',\n 'lost_packet_count': 'lostPacketCount',\n 'mac_address': 'macAddress',\n 'max_current': 'maxCurrent',\n 'max_num_links': 'maxNumLinks',\n 'max_num_neighbors': 'maxNumNeighbors',\n 'need_neighbor': 'needNeighbor',\n 'num_good_neighbors': 'numGoodNeighbors',\n 'num_joins': 'numJoins',\n 'num_links': 'numLinks',\n 'num_neighbors': 'numNeighbors',\n 'num_parents': 'numParents',\n 'power_cost_rx_link': 'powerCostRxLink',\n 'power_cost_tx_link': 'powerCostTxLink',\n 'reliability': 'reliability',\n 'rx_packet_count': 'rxPacketCount',\n 'stack_sw_rev': 'stackSwRev',\n 'state': 'state',\n 'state_reason': 'stateReason',\n 'state_sys_time': 'stateSysTime',\n 'used_current': 'usedCurrent'\n }\n\n self._app_id = None\n self._app_sw_rev = None\n self._avg_hops = None\n self._avg_latency = None\n self._charge = None\n self._estimated_latency_to_mote = None\n self._hw_model = None\n self._hw_rev = None\n self._id = None\n self._join_sys_time = None\n self._last_voltage = None\n self._lost_packet_count = None\n self._mac_address = None\n self._max_current = None\n self._max_num_links = None\n self._max_num_neighbors = None\n self._need_neighbor = None\n self._num_good_neighbors = None\n self._num_joins = None\n self._num_links = None\n self._num_neighbors = None\n self._num_parents = None\n self._power_cost_rx_link = None\n self._power_cost_tx_link = None\n self._reliability = None\n self._rx_packet_count = None\n self._stack_sw_rev = None\n self._state = None\n self._state_reason = None\n self._state_sys_time = None\n self._used_current = None", "def d_model(request) -> int:\n return request.param", "def from_dict(cls, dikt) -> 'CounterPointer':\n return util.deserialize_model(dikt, cls)", "def serialize(self):\n return {\n \"id\": self.id,\n \"counter\": self.get()\n }", "def performance(self, id):" ]
[ "0.58467513", "0.5743902", "0.54858905", "0.5383966", "0.5349426", "0.5337461", "0.5337461", "0.5337461", "0.5337461", "0.5337461", "0.5311203", "0.5290875", "0.51737857", "0.5157004", "0.51550364", "0.51355237", "0.5131821", "0.51205987", "0.51058364", "0.50097054", "0.4963995", "0.49609765", "0.49552426", "0.4938188", "0.4938188", "0.49333853", "0.4928898", "0.4917317", "0.49075115", "0.48848918" ]
0.5896417
0
Gets the counter_type of this CounterPointer.
def counter_type(self) -> str: return self._counter_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_perfcount_type(self):\n return self._perfcount_type", "def get_type(self):\n return self._TYPE", "def getType(self):\n return self._type", "def get_type(self):\n return self._type", "def get_type(self):\n return self._type", "def type(self) -> MetricType:\n return self._type", "def GetType(self):\r\n\r\n return self._type", "def getType(self):\n return self.type_", "def get_type(self):\n return self._type_obj", "def getType(self):\n return self.type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type" ]
[ "0.68725675", "0.6516205", "0.64693546", "0.6462432", "0.6462432", "0.64550483", "0.6409898", "0.6399005", "0.6373705", "0.636375", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509", "0.6335509" ]
0.78557086
0
Sets the counter_type of this CounterPointer.
def counter_type(self, counter_type: str): allowed_values = ["character", "byte", "page"] # noqa: E501 if counter_type not in allowed_values: raise ValueError( "Invalid value for `counter_type` ({0}), must be one of {1}" .format(counter_type, allowed_values) ) self._counter_type = counter_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SetType(self, ct_type):\r\n\r\n self._type = ct_type", "def set_type(self, type):\n self._type = type", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self.type = type", "def __init__(self, counter_type: str=None, counter: int=None): # noqa: E501\n self.swagger_types = {\n 'counter_type': str,\n 'counter': int\n }\n\n self.attribute_map = {\n 'counter_type': 'counterType',\n 'counter': 'counter'\n }\n self._counter_type = counter_type\n self._counter = counter", "def type(self, type):\n self._type = type", "def type(self, type):\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type" ]
[ "0.6586995", "0.64445937", "0.6394954", "0.6394954", "0.6363235", "0.62764454", "0.62764454", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395", "0.62655395" ]
0.77912813
0
Gets the counter of this CounterPointer.
def counter(self) -> int: return self._counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getCounter(self):\n return self.i", "def current(self):\n return self.counter.count", "def get_counter(self):\n counter = self.driver.find_element_by_name(self.COUNTER_NAME)\n return int(counter.text)", "def _get_counter(cls, counter_name: str, counter_category: str = None) -> int:\n counter_key = {\n \"_id\": counter_category if counter_category else cls.__collection__.name\n }\n counter_element = cls.__counters__.find_one(counter_key)\n return counter_element[counter_name][\"counter\"] if counter_element else 0", "def _get_counters(self):\n return self.__counters", "def _get_counters(self):\n return self.__counters", "def _get_counters(self):\n return self.__counters", "def _get_counters(self):\n return self.__counters", "def counter_from_time(self):\n return self.__counter_from_time", "def getAndUpdateCounter(self):\n red = self.dbConnect()\n curr_counter=0\n if 'counter_value' in red:\n curr_counter = int(red.get('counter_value').decode('UTF-8'))\n print(\"incrementing counter...\")\n print(\"older value: \" + str(curr_counter))\n red.set('counter_value', curr_counter + 1)\n else:\n # just an arbitrary value\n red.set('counter_value', 14433)\n return curr_counter", "def current_value(self):\n return self.current_counter.value", "def get_counter():\n try:\n # Setup counter in database\n counter = Counter.query.one_or_none()\n if counter is None:\n counter = Counter()\n db.session.add(counter)\n db.session.commit()\n return counter\n except Exception as err:\n app.logger.error(err)", "def value(self,counterName):\n if self.counters.has_key(counterName): result=self.counters[counterName]\n else: result=0\n return result", "def get():\n global __internal_state_index_counter\n __internal_state_index_counter += long(1)\n return __internal_state_index_counter", "def counter(self) -> int:", "def counter(self) -> int:", "def refcount(self):\n return self._refcount", "def getID(self):\n global COUNT, C_LOCK\n with C_LOCK:\n COUNT += 1\n return COUNT", "def count(self):\n return clone_counter._count", "def length(self):\n return self.counter", "def incr_counter(cls, cname):\n if not cname in cls.__counters: cls.__counters[cname] = -1\n cls.__counters[cname] += 1\n return cls.__counters[cname]", "def _get_counter(metric: str) -> int:\n if metric not in db:\n db[metric] = 0\n return db[metric]", "def get_current(self) -> int:\n return self._current", "def count(self):\n with self._block:\n counter = re.search(r'count=(\\d+) ', repr(self))\n return int(counter.group(1))", "def segment_counter(self):\n return self._data_writer.get_segment_counter()", "def counter_callback(from_index):\n from_node = manager.IndexToNode(from_index)\n return data['counter'][from_node]", "def read_counter(self, path):\n self.cursor.execute('SELECT * FROM \"counter\" WHERE \"fullpath\"=?', (path,))\n row = self.cursor.fetchone()\n count = 0\n if row != None : count = row[1]\n # print 'read_counter:', path, count\n return count", "def get_program_counter(self):\n return self.get_thread().program_counter", "def get_count(self):\n return self._count", "def get_counter(self, name: str, attributes: Attributes = None):\n key = _generate_key_name(name, attributes)\n if key not in self.map:\n self.map[key] = self._create_counter(name)\n return self.map[key]" ]
[ "0.7747119", "0.69934356", "0.6839439", "0.670072", "0.66731805", "0.66731805", "0.66731805", "0.66731805", "0.6605191", "0.64229405", "0.6273553", "0.62684166", "0.6229281", "0.6121677", "0.61209786", "0.61209786", "0.6081115", "0.6050365", "0.6046811", "0.5928756", "0.58936256", "0.5850139", "0.58231753", "0.5803907", "0.57996297", "0.578396", "0.5761344", "0.57496506", "0.5715965", "0.56662065" ]
0.7122936
1
Sets the counter of this CounterPointer.
def counter(self, counter: int): if counter is None: raise ValueError("Invalid value for `counter`, must not be `None`") # noqa: E501 self._counter = counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setCount(self, num):\n self.count=num", "def set_sequence(self, counter):\n self.seq_counter = counter", "def inc_counter(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def set_count(self, count):\n self._count = count", "def set_counter_increase(self, val=1):\r\n return self._arm.set_counter_increase(val)", "def set_count(c):\n global count\n count = c", "def increment_counter(self) -> None:", "def updateCounter(self):\n self.counter = self.counter + 1\n self.syncDataStructure[\"+\"][str(self.instanceID)] = self.counter", "def set(self, val: int) -> None:\n self.val = val\n self.notes = []", "def increase_counter(self):\n self.values = self.values + 1", "def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)", "def reset_counter(self) -> None:", "def write(self, value: int):\n self.data[self.pointer] = value", "def update_counter(self, counter, entity):", "def emit_counter(self, category, name, pid, timestamp, counter, value):\n event = self._create_event('C', category, name, pid, 0, timestamp)\n event['args'] = {counter: value}\n self._events.append(event)", "def segment_counter(self, _):\n raise NotImplementedError(\n \"We do not support externally altering the segment counter\")", "def direct_count(self, direct_count):\n\n self._direct_count = direct_count", "def api_call_counter(self, api_call_counter):\n\n self._api_call_counter = api_call_counter", "def increment(self):\n self.data[self.pointer] += 1\n self.data[self.pointer] %= 256", "def set_pc(self, value):\n\n self.program_counter.set_value(str(value))", "def increase_count(self, number=1):\n self.count += number", "def inc( self ):\n self.count += 1", "def set_current(self, val: int) -> None:\n self._bin_iter.set_current(val)", "def i(self, i):\n\n self._i = i", "def inc(self):\n self._value += 1", "def reset_counter(self):\n self.counter = 0\n self.highlight_input()", "def setMancount(self, cnt):\n self.__mancount=cnt", "def inc(self):\n \n self.count += 1", "def count(self, value):\n \n self._count = int(value)", "def count(self, count):\n\n self._count = count" ]
[ "0.6262257", "0.61544836", "0.6107164", "0.60725594", "0.60548764", "0.5941396", "0.59272134", "0.5889299", "0.5781394", "0.57757884", "0.5772924", "0.5759819", "0.5737988", "0.57238454", "0.56709605", "0.5640289", "0.559615", "0.5585219", "0.5573069", "0.554153", "0.55058885", "0.5492513", "0.5475336", "0.5463029", "0.54581505", "0.545138", "0.5442515", "0.5441484", "0.5433794", "0.54036605" ]
0.6550495
0
Context manager that sets the MAPPINGS_USE_NESTED setting with the given value, default True
def mappings_use_nested(value=True): old_setting = Settings.MAPPINGS_USE_NESTED try: Settings.MAPPINGS_USE_NESTED = value yield finally: Settings.MAPPINGS_USE_NESTED = old_setting
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isNest(self):\n\t\tif self.nestInfo == None:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def test_documentation_popxl_nested_session_contexts(self):\n filename = \"nested_session_contexts.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def default_nested(self, data, many, **kwargs):\n if not data.get(\"metadata\"):\n data[\"metadata\"] = {}\n if not data.get(\"pids\"):\n data[\"pids\"] = {}\n\n return data", "def test_nested_contextmanager(self):\n code = \"\"\"\n from contextlib import contextmanager\n\n @contextmanager\n def outer():\n @contextmanager\n def inner():\n yield 2\n yield inner\n\n with outer() as ctx:\n ctx #@\n with ctx() as val:\n val #@\n \"\"\"\n context_node, value_node = extract_node(code)\n value = next(value_node.infer())\n context = next(context_node.infer())\n assert isinstance(context, nodes.FunctionDef)\n assert isinstance(value, nodes.Const)", "def test_type_mapping_nested_with_disabled_parameter(registry):\n with mappings_use_nested(True):\n mapping = type_mapping(registry[TYPES], 'TestingNestedEnabled')\n assert mapping\n assert 'properties' in mapping\n assert mapping['properties']['object_options'].get('type', 'object') != 'nested' # neither enabled\n assert mapping['properties']['disabled_array_of_objects_in_calc_prop'].get('type', 'object') != 'nested'\n assert mapping['properties']['enabled_array_of_objects_in_calc_prop']['type'] == 'nested' # enabled\n assert mapping['properties']['nested_options']['type'] == 'nested' # enabled", "def nesting_depth(self):\n self._nesting_depth += 1\n if self._nesting_depth > MAX_VARIANT_NESTING_DEPTH:\n raise MessageError('nesting depth > %d' % MAX_VARIANT_NESTING_DEPTH)\n try:\n yield\n finally:\n self._nesting_depth -= 1", "def begin_nested(self) -> SessionTransaction:\n return self.begin(nested=True)", "def validate_setup_for_nested_quota_use(ctxt, resources,\n nested_quota_driver,\n fix_allocated_quotas=False):\n try:\n project_roots = get_all_root_project_ids(ctxt)\n\n # Now that we've got the roots of each tree, validate the trees\n # to ensure that each is setup logically for nested quotas\n for root in project_roots:\n root_proj = get_project_hierarchy(ctxt, root,\n subtree_as_ids=True)\n nested_quota_driver.validate_nested_setup(\n ctxt,\n resources,\n {root_proj.id: root_proj.subtree},\n fix_allocated_quotas=fix_allocated_quotas\n )\n except exceptions.VersionNotAvailable:\n msg = _(\"Keystone version 3 or greater must be used to get nested \"\n \"quota support.\")\n raise exception.CinderException(message=msg)\n except exceptions.Forbidden:\n msg = _(\"Must run this command as cloud admin using \"\n \"a Keystone policy.json which allows cloud \"\n \"admin to list and get any project.\")\n raise exception.CinderException(message=msg)", "def nested_dict():\n return defaultdict(nested_dict)", "def nested(*contexts):\n with ExitStack() as stack:\n for ctx in contexts:\n stack.enter_context(ctx())\n yield contexts", "def build_nested_blocks(self):\n pass", "def as_context(self, walker, set_parent_map=True):\n return _VisitorMapContextManager(self, walker, set_parent_map)", "def annotate_depth(self, limit=None):\n queryset = self\n stack = []\n for p in queryset:\n try:\n prev_p = stack[-1]\n except IndexError:\n prev_p = None\n if prev_p is not None:\n while (p.prefix not in prev_p.prefix) or p.prefix == prev_p.prefix:\n stack.pop()\n try:\n prev_p = stack[-1]\n except IndexError:\n prev_p = None\n break\n if prev_p is not None:\n prev_p.has_children = True\n stack.append(p)\n p.depth = len(stack) - 1\n if limit is None:\n return queryset\n return filter(lambda p: p.depth <= limit, queryset)", "def is_multi_level(self):\n return self.is_flag_set(StatefulParser.FLAGS.MULTI_LEVEL)", "def go_deeper(cls, *args, **kwargs):\n\t\treturn True", "def nest(self, name):\n step_result = self(name, [])\n with self.m.context(name_prefix=name, increment_nest_level=True):\n yield step_result", "def test_nested():\n # pylint: disable=no-member\n assert issubclass(NestedSchema, graphene.ObjectType)\n assert isinstance(NestedSchema.name, graphene.String)\n assert isinstance(NestedSchema.leaf, graphene.Field)\n assert str(NestedSchema.leaf.type) == \"Leaf\"\n assert isinstance(NestedSchema.leaf.type.value, graphene.String)\n assert isinstance(NestedSchema.leaf.type.leaflets, graphene.List)", "def _extra_context(self):\r\n return {}", "def _log_nested_outer(self):\n def _log_nested_inner():\n logging.info('info nested')\n return _log_nested_inner", "def test_nested(self):\n self.insert_row()\n\n instance = Band.objects(Band.manager).first().run_sync()\n dictionary = instance.to_dict()\n if engine_is(\"cockroach\"):\n self.assertDictEqual(\n dictionary,\n {\n \"id\": dictionary[\"id\"],\n \"name\": \"Pythonistas\",\n \"manager\": {\n \"id\": instance[\"manager\"][\"id\"],\n \"name\": \"Guido\",\n },\n \"popularity\": 1000,\n },\n )\n else:\n self.assertDictEqual(\n dictionary,\n {\n \"id\": 1,\n \"name\": \"Pythonistas\",\n \"manager\": {\"id\": 1, \"name\": \"Guido\"},\n \"popularity\": 1000,\n },\n )", "def _maybe_wrap_cms_in_parens(\n node: Node, mode: Mode, features: Collection[Feature]\n) -> None:\n if (\n Feature.PARENTHESIZED_CONTEXT_MANAGERS not in features\n or Preview.wrap_multiple_context_managers_in_parens not in mode\n or len(node.children) <= 2\n # If it's an atom, it's already wrapped in parens.\n or node.children[1].type == syms.atom\n ):\n return\n colon_index: Optional[int] = None\n for i in range(2, len(node.children)):\n if node.children[i].type == token.COLON:\n colon_index = i\n break\n if colon_index is not None:\n lpar = Leaf(token.LPAR, \"\")\n rpar = Leaf(token.RPAR, \"\")\n context_managers = node.children[1:colon_index]\n for child in context_managers:\n child.remove()\n # After wrapping, the with_stmt will look like this:\n # with_stmt\n # NAME 'with'\n # atom\n # LPAR ''\n # testlist_gexp\n # ... <-- context_managers\n # /testlist_gexp\n # RPAR ''\n # /atom\n # COLON ':'\n new_child = Node(\n syms.atom, [lpar, Node(syms.testlist_gexp, context_managers), rpar]\n )\n node.insert_child(1, new_child)", "def test_get_activities_from_recursive_contexts(self):\n from .mockers import context_query\n from .mockers import create_context\n from .mockers import subscribe_contextA, create_contextA, user_status_contextA\n from .mockers import subscribe_contextB, create_contextB, user_status_contextB\n username = 'messi'\n username_not_me = 'xavi'\n self.create_user(username)\n self.create_user(username_not_me)\n self.create_context(create_context, permissions=dict(read='public', write='restricted', subscribe='restricted', invite='restricted'))\n self.create_context(create_contextA, permissions=dict(read='subscribed', write='subscribed', subscribe='restricted', invite='restricted'))\n self.create_context(create_contextB, permissions=dict(read='subscribed', write='subscribed', subscribe='restricted', invite='restricted'))\n self.admin_subscribe_user_to_context(username, subscribe_contextA)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_contextA)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_contextB)\n self.create_activity(username, user_status_contextA)\n self.create_activity(username_not_me, user_status_contextA)\n self.create_activity(username_not_me, user_status_contextB)\n\n res = self.testapp.get('/contexts/%s/activities' % (context_query['context']), '', oauth2Header(username), status=200)\n result = json.loads(res.text)\n self.assertEqual(len(result), 2)\n self.assertEqual(result[0].get('actor', None).get('username'), 'xavi')\n self.assertEqual(result[0].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[0].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])\n self.assertEqual(result[1].get('actor', None).get('username'), 'messi')\n self.assertEqual(result[1].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[1].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])\n\n res = self.testapp.get('/contexts/%s/activities' % (context_query['context']), '', oauth2Header(username_not_me), status=200)\n result = json.loads(res.text)\n self.assertEqual(len(result), 3)\n self.assertEqual(result[0].get('actor', None).get('username'), 'xavi')\n self.assertEqual(result[0].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[0].get('contexts', None)[0]['url'], subscribe_contextB['object']['url'])\n self.assertEqual(result[1].get('actor', None).get('username'), 'xavi')\n self.assertEqual(result[1].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[1].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])\n self.assertEqual(result[2].get('actor', None).get('username'), 'messi')\n self.assertEqual(result[2].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[2].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])", "def patch_config(self_config, indict):\n for key in self_config:\n if isinstance(self_config[key], Section) \\\n and key in indict and isinstance(indict[key], Section):\n self_config[key].parent = self_config\n self_config[key].main = self_config.main\n self_config.comments[key] = indict.comments[key]\n self_config.inline_comments[key] = indict.inline_comments[key]\n patch_config(self_config[key], indict[key])", "def test_too_deeply_nested(self) -> None:\n nested_action = TestNestedMenuAction()\n nested2_action = TestNested2MenuAction()\n nested3_action = TooDeeplyNestedAction()\n\n actions_registry.register(self.test_menu_action)\n actions_registry.register(nested_action)\n actions_registry.register(nested2_action)\n\n with self.assertRaises(DepthLimitExceededError):\n actions_registry.register(nested3_action)", "def flag_inner_classes(obj):\n for tup in class_members(obj):\n tup[1]._parent = obj\n tup[1]._parent_inst = None\n tup[1].__getattr__ = my_getattr\n flag_inner_classes(tup[1])", "def test_modifier_with_nested_modifier():\n k = u\"A\"\n v = u\"zero >=1.0 sum(nested(constant 1.0 >2 zero), buck 10.0 0.1 32.0)\"\n \n parser = ConfigParser(io.StringIO())\n\n potential_forms = [\n PMT(u'nested', [PFI(u'constant', [1.0], MRD(u'>',0.0),\n PFI(u'zero', [], MRD(u'>', 2), None))], MRD(u'>', 0.0), None),\n PFI(u'buck', [10.0, 0.1, 32.0], MRD(u'>', 0.0), None)]\n\n expect = PFI(u'zero', [], MRD(u'>', 0.0),\n PMT(u'sum', potential_forms, MRD(u'>=', 1.0), None))\n\n expect = PairPotentialTuple(species = k, potential_form_instance = expect)\n actual = parser._parse_multi_range(k, v)\n assert DeepDiff(expect, actual) == {}", "def nested_dict_walker(fn, coll):\n return walk_values_rec(iffy(is_mapping, fn), coll)", "def _calculate_nested_inclusion(self, method: str, mode: str | None, model: Self) -> bool:\n # EMPTY RULE: If there is no value in the model it should NOT be INCLUDED.\n if not model:\n return False\n\n # METHOD RULE: When HTTP method is DELETE or GET nested objects should NOT be INCLUDED.\n if method in ['DELETE', 'GET']:\n return False\n\n # STAGED RULE: Any nested object provided by developer should be INCLUDED.\n if model._staged is True:\n return True\n\n # POST RULE: When method is POST all nested object should be INCLUDED.\n if method == 'POST':\n return True\n\n # Current Object Restrictions:\n # * The method is PUT\n # * The nested object was NOT added via the stage_xxx method\n # * The nested object contains a ID or Name Field\n # * The nested object could either have been set during initializing the object or fetched.\n\n # CM and TI endpoint behaves differently. Start with rules based on the parent type,\n # then add more specific rules.\n\n if self._cm_type is True:\n #\n # CM PARENT TYPES\n #\n\n # Nested Types:\n # * CM Types (Artifact, Artifact Type, Case, Note, Task, Workflow Event/Template)\n # * Attributes\n # * Group (currently read-only)\n # * Tags\n # * Users\n\n # Coverage:\n # * Downloaded from API (e.g., case.get(id=123))\n # * Added on instantiation\n # * Added with stage_xxx() method\n\n if model._cm_type is True:\n # RULE: Short-Circuit Nested CM Types\n # Nested CM types are updated through their direct endpoints and should\n # never be INCLUDED when updating the parent. For new nested CM types\n # added with the stage_xxx() method, the STAGED RULE would trigger\n # before this rule.\n return False\n\n if model._shared_type is True:\n # RULE: Nested Tags\n # Nested tags on a parent CM type behave as REPLACE mode and need to be\n # INCLUDED to prevent being removed.\n return True\n\n # RULE: Nested Attributes w/ APPEND mode\n # Nested attributes on a parent CM type use the mode feature. When the mode\n # is APPEND and has been UPDATED, then the attributes should be INCLUDED.\n # For new nested objects added with the stage_xxx() method, the STAGED\n # RULE would trigger first.\n # A secondary PATTERN consideration is that attributes can be immediately\n # updated using the attribute.updated() method. While this isn't as\n # efficient as updating them all in one request, it's is a simpler\n # development design pattern.\n\n if mode == 'replace':\n # RULE: Nested Attributes w/ REPLACE mode\n # Nested attributes on a parent CM type use the mode feature. When the mode\n # is REPLACE the attributes should be INCLUDED.\n return True\n\n # RULE: Nested Attributes w/ DELETE mode\n # Nested attributes on a parent CM type use the mode feature. When the mode\n # is DELETE the attribute should NOT be INCLUDED. Any attribute that was\n # added by the developer using the stage_xxx() method would have hit the\n # STAGED RULE above and would be INCLUDED.\n # A secondary PATTERN consideration is that attributes can be immediately\n # deleted using the attribute.delete() method. While this isn't as\n # efficient as deleting them all in one request, it's is a simpler\n # development design pattern.\n\n # All non-matching nested object that did not match a rule above will NOT be INCLUDED.\n return False\n\n #\n # TI PARENT TYPES (Groups, Indicators, Victim, and Victims Assets)\n #\n\n # Nested Types:\n # * Associations (Groups, Indicators, Victim Assets)\n # * Attributes\n # * Security Labels\n # * Tags\n\n # Coverage:\n # * Downloaded from API\n # * Added on instantiation\n # * Added with stage_xxx() method\n\n if mode == 'append' and self._associated_type:\n # RULE: Nested Object w/ APPEND mode\n # Nested object on a parent CM type use the mode feature. When the mode\n # is APPEND and not STAGED the object should NOT be INCLUDED.\n return True\n\n if mode == 'replace':\n # RULE: Nested Object w/ REPLACE mode\n # Nested object on a parent TI type use the mode feature. When the mode\n # is REPLACE the object should be INCLUDED.\n return True\n\n # * security_label -> delete (support id or name only)\n # * tag -> delete (support id or name only)\n if (\n mode == 'delete'\n and (model._shared_type is True or self._associated_type is True)\n and (model.id is not None or model.name is not None) # type: ignore\n ):\n # RULE: Nested Shared Object w/ DELETE mode (TAGS, SECURITY LABELS)\n # Nested shared object on a parent TI type use the mode feature. When the mode\n # is DELETE the shard object should not be INCLUDED. Any object that was\n # added by the developer would have hit the STAGED RULE above and would\n # be INCLUDED.\n return True\n\n # * associated -> delete (support id only)\n # * attribute -> delete (support id only)\n # RULE: Nested Object w/ DELETE mode\n # Nested object on a parent TI type use the mode feature. When the mode\n # is DELETE the object should not be INCLUDED. Any object that was\n # added by the developer would have hit the STAGED RULE above and would\n # be INCLUDED.\n\n # All non-matching nested object that did not match a rule above will NOT be INCLUDED.\n return False", "def _get_nested_dict(dictionary, key, nested_config=None):\n if key not in dictionary:\n nested = {}\n if nested_config:\n _fill_zero_counters_dict(nested_config, nested)\n dictionary[key] = nested\n return nested\n return dictionary[key]", "def flatten_dict(nested):\n flattened = {}\n for key, value in nested.items():\n if isinstance(value, Mapping):\n for subkey, subval in value.items():\n newkey = '.'.join([key, subkey])\n flattened[newkey] = subval\n flatten_dict(flattened)\n else:\n flattened[key] = value\n mappings = [isinstance(value, Mapping) for key, value in flattened.items()]\n if len(set(mappings)) == 1 and set(mappings).pop() is False:\n return flattened\n else:\n return flatten_dict(flattened)" ]
[ "0.51234853", "0.5110324", "0.51092035", "0.5021755", "0.49843925", "0.49771792", "0.4961515", "0.46479183", "0.46340314", "0.45778015", "0.4566229", "0.45511296", "0.4512281", "0.4510831", "0.44475397", "0.4445361", "0.44344524", "0.4433714", "0.43684018", "0.43653566", "0.43551973", "0.43279555", "0.43261814", "0.43182367", "0.4310659", "0.4307153", "0.43059516", "0.42891562", "0.42612043", "0.42548746" ]
0.65204585
0
Test basic mapping properties for each item type
def test_type_mapping(registry, item_type): with mappings_use_nested(False): mapping = type_mapping(registry[TYPES], item_type) assert mapping assert 'properties' in mapping if item_type == 'TestingLinkTargetElasticSearch': assert mapping['properties']['reverse_es'].get('type', 'object') != 'nested' # should not occur here # check calculated properties on objects/arrays of objects are mapped correctly if item_type == 'TestingCalculatedProperties': assert mapping['properties']['nested']['properties']['key']['type'] == 'text' assert mapping['properties']['nested']['properties']['value']['type'] == 'text' assert mapping['properties']['nested']['properties']['keyvalue']['type'] == 'text' assert mapping['properties']['nested2']['properties']['key']['type'] == 'text' assert mapping['properties']['nested2']['properties']['value']['type'] == 'text' assert mapping['properties']['nested2']['properties']['keyvalue']['type'] == 'text'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_items(self):\n self.assertEqual([(\"described_model_type\", self.expected_described_model)], list(self.mapped_model.items()))", "def test_create_mapping_drops_unmappable_properties(registry):\n test_item_type = \"embedding_test\"\n expected_embeds = [\"pattern_property_embed\", \"additional_property_embed\"]\n expected_mapped_property = \"should_be_mapped\"\n mapping = type_mapping(registry[TYPES], test_item_type)\n mapped_properties = mapping.get(\"properties\")\n assert mapped_properties\n for expected_embed in expected_embeds:\n mapped_embed = mapped_properties.get(expected_embed)\n assert mapped_embed\n mapped_embed_properties = mapped_embed.get(\"properties\", {})\n assert len(mapped_embed_properties.keys()) == 1\n assert mapped_embed_properties.get(expected_mapped_property)", "def test_get_item(self):\n self.assertEqual(self.expected_described_model, self.mapped_model[\"described_model_type\"])", "def test_map_field_base_case(self):\n field = 'title'\n mapping = {\n 'type': 'text',\n 'index': True\n }\n\n actual = mapper._map_field(mapping, field)\n expected = {\n 'properties': {\n 'title': {\n 'type': 'text',\n 'index': True\n }\n }\n }\n self.assertEqual(actual, expected)", "def test_types(self):\n field_types = (\n ('clip_id', int), ('created_at', datetime.datetime),\n ('description', str), ('filename', str),\n ('format', smscsv.MediaFormat), ('media_id', int), ('title', str)\n )\n for item in self.items:\n for name, type_ in field_types:\n self.assertIsInstance(getattr(item, name), type_)", "def test_create_mapping_correctly_maps_embeds(registry, item_type):\n mapping = type_mapping(registry[TYPES], item_type)\n assert mapping\n type_info = registry[TYPES].by_item_type[item_type]\n schema = type_info.schema\n embeds = add_default_embeds(item_type, registry[TYPES], type_info.embedded_list, schema)\n # assert that all embeds exist in mapping for the given type\n for embed in embeds:\n mapping_pointer = mapping\n split_embed = embed.split('.')\n for idx, split_ in enumerate(split_embed):\n # see if this is last level of embedding- may be a field or object\n if idx == len(split_embed) - 1:\n if 'properties' in mapping_pointer and split_ in mapping_pointer['properties']:\n final_mapping = mapping_pointer['properties']\n else:\n final_mapping = mapping_pointer\n if split_ != '*':\n assert split_ in final_mapping\n else:\n assert 'properties' in final_mapping or final_mapping.get('type') == 'object'\n else:\n assert split_ in mapping_pointer['properties']\n mapping_pointer = mapping_pointer['properties'][split_]", "def test_list_properties(self):\n pass", "def requires_mapping(self):", "def test_map(self):\n\n test_cases = [\n Case(\n description=\"lists of objects\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"baz\"}],\n args=[\"title\"],\n kwargs={},\n expect=[\"foo\", \"bar\", \"baz\"],\n ),\n Case(\n description=\"missing argument\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"baz\"}],\n args=[],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"too many arguments\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"baz\"}],\n args=[\"title\", \"\"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"missing property\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"heading\": \"baz\"}],\n args=[\"title\"],\n kwargs={},\n expect=[\"foo\", \"bar\", None],\n ),\n Case(\n description=\"value not an array\",\n val=123,\n args=[\"title\"],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"array contains non object\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, 5, []],\n args=[\"title\"],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"undefined left value\",\n val=self.env.undefined(\"test\"),\n args=[\"title\"],\n kwargs={},\n expect=[],\n ),\n Case(\n description=\"undefined argument\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"baz\"}],\n args=[self.env.undefined(\"test\")],\n kwargs={},\n expect=[None, None, None],\n ),\n ]\n\n self._test(Map, test_cases)", "def _empty_mapping(self):\r\n return self.type2test()", "def test_properties_get(self):\n pass", "def testAddingPropertyFields(self):\n map_sheet = self.properties[PROPERTY_SHEET]\n for key, value in PROPS.items():\n self.failUnless(map_sheet.hasProperty(key) and list(map_sheet.getProperty(key)) == value)", "def test_items(self):\n obs = self.tester.items()\n self.assertTrue(isinstance(obs, Iterable))\n exp = {('physical_location', 'ANL'), ('has_physical_specimen', True),\n ('has_extracted_data', True), ('sample_type', 'ENVO:soil'),\n ('required_sample_info_status', 'completed'),\n ('collection_timestamp', datetime(2011, 11, 11, 13, 00, 00)),\n ('host_subject_id', '1001:M7'),\n ('description', 'Cannabis Soil Microbiome'),\n ('season_environment', 'winter'), ('assigned_from_geo', 'n'),\n ('texture', '64.6 sand, 17.6 silt, 17.8 clay'),\n ('taxon_id', '1118232'), ('depth', 0.15),\n ('host_taxid', '3483'), ('common_name', 'root metagenome'),\n ('water_content_soil', 0.164), ('elevation', 114), ('temp', 15),\n ('tot_nitro', 1.41), ('samp_salinity', 7.15), ('altitude', 0),\n ('env_biome',\n 'ENVO:Temperate grasslands, savannas, and shrubland biome'),\n ('country', 'GAZ:United States of America'), ('ph', 6.94),\n ('anonymized_name', 'SKB8'), ('tot_org_carb', 5),\n ('description_duplicate', 'Burmese root'),\n ('env_feature', 'ENVO:plant-associated habitat'),\n ('latitude', 74.0894932572),\n ('longitude', 65.3283470202)}\n self.assertEqual(set(obs), exp)", "def test_values(self):\n self.assertEqual([self.expected_described_model], list(self.mapped_model.values()))", "def test_get(self):\n self.assertEqual(self.expected_described_model, self.mapped_model.get(\"described_model_type\"))", "def test_mapping_types(self):\n table_schema = {\n 'my_bigint': sql_types.BIGINT(),\n 'my_boolean': sql_types.BOOLEAN(),\n 'my_char': sql_types.CHAR(16),\n 'my_clob': sql_types.CLOB(),\n 'my_date': sql_types.DATE(),\n 'my_datetime': sql_types.DATETIME(),\n 'my_decimal': sql_types.DECIMAL(10, 5),\n 'my_float': sql_types.FLOAT(),\n 'my_integer': sql_types.INTEGER(),\n 'my_nchar': sql_types.NCHAR(16),\n 'my_nvarchar': sql_types.NVARCHAR(16),\n 'my_null': sql_types.NullType(),\n 'my_numeric': sql_types.NUMERIC(),\n 'my_real': sql_types.REAL(),\n 'my_smallint': sql_types.SMALLINT(),\n 'my_text': sql_types.TEXT(),\n 'my_timestamp': sql_types.TIMESTAMP(),\n 'my_varchar': sql_types.VARCHAR(16),\n }\n document_type = 'some_document_type'\n mapping = Mapping(document_type, table_schema)\n self.assertDictEqual(\n mapping.mapping,\n {\n document_type: {\n 'properties': {\n '_metadata': {\n 'type': 'object',\n 'index': 'no',\n 'properties': {\n 'filename': {\n 'type': 'string',\n 'index': 'no',\n },\n 'table': {\n 'type': 'string',\n 'index': 'no',\n },\n },\n },\n 'my_bigint': {'type': 'long'},\n 'my_boolean': {'type': 'boolean'},\n 'my_char': {'type': 'string'},\n 'my_clob': {'type': 'string'},\n 'my_datetime': {'type': 'date'},\n 'my_float': {'type': 'float'},\n 'my_integer': {'type': 'long'},\n 'my_nchar': {'type': 'string'},\n 'my_nvarchar': {'type': 'string'},\n 'my_real': {'type': 'double'},\n 'my_smallint': {'type': 'integer'},\n 'my_text': {'type': 'string'},\n 'my_timestamp': {'type': 'date'},\n 'my_varchar': {'type': 'string'},\n },\n },\n },\n )", "def test_core_functionality(self):\n # Test typing\n self.run_map_collection(\n _map_collection=self.example_map\n )", "def test_init(self, testdata: TestData) -> None:\n for data in testdata['observation_type']:\n observation_type = ObservationType(**data)\n for key, value in data.items():\n assert getattr(observation_type, key) == value", "def test_basic_types(self):\n\t\tyield self.check_setget(\"a_string\", \"some random string\")\n\t\tyield self.check_setget(\"an_integer\", 42)\n\t\tyield self.check_setget(\"a_long\", long(1<<30))\n\t\tyield self.check_setget(\"a_dict\", { \"foo\" : \"bar\", \"baz\" : \"quux\" })", "def test_apply_scalar_map(self):\n super(TestObjDict, self).test_apply_scalar_map(_as_obj=True)", "def test_check_map(self):\r\n\r\n header, mapping_data = check_map(self.valid_mapping_data_golay)\r\n\r\n expected_header =\\\r\n ['SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Description']\r\n expected_mapping_data =\\\r\n [['s1', 'AACTCGTCGATG', 'ATTCGATART', 's1_description'],\r\n ['s2', 'agcAGCACTTGT', 'ATTCGATART', 's2_description'],\r\n ['s3', 'ACCGCAGAGTCA', 'YATGCTGCCTCCCGTAGGAGT', 's3_description']]\r\n\r\n self.assertEquals(header, expected_header)\r\n self.assertEquals(mapping_data, expected_mapping_data)", "def test_type_mapping_nested(registry):\n with mappings_use_nested(True):\n mapping = type_mapping(registry[TYPES], 'TestingLinkTargetElasticSearch')\n assert mapping\n assert 'properties' in mapping\n # if type is defined on this field, it should beg object, NOT nested since it is not enabled on this field\n assert mapping['properties']['reverse_es'].get('type', 'object') == 'object'", "def test_metadata_fonts_items_dicts(self):\n for x in self.metadata.get('fonts', None):\n self.assertEqual(type(x), type({}), msg=\"type(%s) is not dict\" % x)", "def test_get_types(self):\n pass", "def test_queryable_fields_map(self):\n # Just check one of them and make sure it worked.\n position_schema = MATERIALIZED_TABLE_QUERYABLE_FIELDS_MAP[\n MELTED_SCHEMA_KEY__POSITION]\n self.assertEquals(position_schema['type'], 'Integer')", "def _do_mapping(self):\n pass", "def test_map_basics(self):\n self.assertDigitizerMapBasics(self.map, self.dgroup)", "def test_mapper_func(data, schema):\n pass", "def testabilities(self):\n for ability in AmuletAbility.typelist:\n a = AmuletAbility(ability)\n self.assertEqual(a.type, ability)\n if ability != 'Attribute':\n self.assert_(ability in str(a))\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))", "def test_getitem_required(self):\n self.assertEqual(self.tester['physical_location'], 'ANL')\n self.assertEqual(self.tester['collection_timestamp'],\n datetime(2011, 11, 11, 13, 00, 00))\n self.assertTrue(self.tester['has_physical_specimen'])" ]
[ "0.72993106", "0.717978", "0.687299", "0.67445046", "0.67349356", "0.6609082", "0.63543266", "0.6196516", "0.6173583", "0.6149421", "0.61348146", "0.6124618", "0.6093486", "0.60665125", "0.6055508", "0.6018713", "0.5910689", "0.58788294", "0.5799283", "0.5745749", "0.5725811", "0.568587", "0.5679048", "0.5665233", "0.56614774", "0.5658917", "0.5641021", "0.5639311", "0.56298393", "0.5624259" ]
0.76476127
0
Tests that mapping a field with a list of dicts in it maps with type=nested only if told to do so on the schema. For this case it is not specified, so if object is expected.
def test_type_mapping_nested(registry): with mappings_use_nested(True): mapping = type_mapping(registry[TYPES], 'TestingLinkTargetElasticSearch') assert mapping assert 'properties' in mapping # if type is defined on this field, it should beg object, NOT nested since it is not enabled on this field assert mapping['properties']['reverse_es'].get('type', 'object') == 'object'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_type_mapping_nested_with_disabled_parameter(registry):\n with mappings_use_nested(True):\n mapping = type_mapping(registry[TYPES], 'TestingNestedEnabled')\n assert mapping\n assert 'properties' in mapping\n assert mapping['properties']['object_options'].get('type', 'object') != 'nested' # neither enabled\n assert mapping['properties']['disabled_array_of_objects_in_calc_prop'].get('type', 'object') != 'nested'\n assert mapping['properties']['enabled_array_of_objects_in_calc_prop']['type'] == 'nested' # enabled\n assert mapping['properties']['nested_options']['type'] == 'nested' # enabled", "def test_nested():\n # pylint: disable=no-member\n assert issubclass(NestedSchema, graphene.ObjectType)\n assert isinstance(NestedSchema.name, graphene.String)\n assert isinstance(NestedSchema.leaf, graphene.Field)\n assert str(NestedSchema.leaf.type) == \"Leaf\"\n assert isinstance(NestedSchema.leaf.type.value, graphene.String)\n assert isinstance(NestedSchema.leaf.type.leaflets, graphene.List)", "def test_oneof_variable_dict_or_list():\n\n class DataMap(ce.ExtendedMappingSchema):\n field = ce.ExtendedSchemaNode(ce.ExtendedInteger())\n\n class DataItem(DataMap):\n id = ce.ExtendedSchemaNode(ce.ExtendedString())\n\n class DataSeq(ce.ExtendedSequenceSchema):\n item = DataItem()\n\n class DataVarMap(ce.ExtendedMappingSchema):\n var_id = DataMap(variable=\"<var_id>\")\n\n class DataOneOf(ce.OneOfKeywordSchema):\n _one_of = [DataVarMap, DataSeq]\n\n class DataMapDrop(ce.ExtendedMappingSchema):\n field = ce.ExtendedSchemaNode(ce.ExtendedInteger(), missing=colander.drop)\n\n class DataItemDrop(DataMapDrop):\n id = ce.ExtendedSchemaNode(ce.ExtendedString())\n\n class DataSeqDrop(ce.ExtendedSequenceSchema):\n item = DataItemDrop()\n\n class DataVarMapDrop(ce.ExtendedMappingSchema):\n var_id = DataMapDrop(variable=\"<var_id>\")\n\n class DataOneOfDrop(ce.OneOfKeywordSchema):\n _one_of = [DataVarMapDrop, DataSeqDrop]\n\n valid_map = {\"id-1\": {\"field\": 1}, \"id-2\": {\"field\": 2}}\n valid_list = [{\"id\": \"id-1\", \"field\": 1}, {\"id\": \"id-2\", \"field\": 2}]\n\n evaluate_test_cases([\n (DataOneOf, valid_map, valid_map),\n (DataOneOf, valid_list, valid_list),\n (DataOneOf, {}, colander.Invalid), # missing 'field', so empty is not valid because we check sub-schemas\n (DataOneOf, [], []), # missing 'field'+'id' so empty is not valid\n (DataOneOfDrop, {}, colander.Invalid), # valid now because 'field' can be omitted\n (DataOneOfDrop, [], []), # valid because empty list is allowed\n (DataOneOf(default={}), \"bad-format\", colander.Invalid), # not drop, default only if not provided\n (DataOneOf(default={}), None, colander.Invalid), # value 'None' (JSON 'null') is still \"providing\" the field\n (DataOneOf(missing=colander.drop), \"bad-format\", colander.drop), # would be dropped by higher level schema\n (DataOneOf(default={}, missing=colander.drop), colander.null, {}), # result if value not \"provided\" use default\n (DataOneOfDrop(default={}), colander.null, {}), # value not provided uses default\n (DataOneOf, {\"id-1\": {\"field\": \"ok\"}, \"id-2\": {\"field\": \"123\"}}, colander.Invalid),\n (DataOneOf, [{\"id\": 1, \"field\": \"ok\"}, {\"id\": \"id-2\", \"field\": 123}], colander.Invalid),\n (DataOneOf, {\"id-1\": [1, 2, 3]}, colander.Invalid),\n (DataOneOf, [{\"id\": \"id-1\"}], colander.Invalid),\n ])", "def check_if_nested(data):\n if isinstance(data, dict):\n for k in data:\n if isinstance(data[k], (list, dict)):\n return True\n elif isinstance(data, list):\n for i in data:\n if isinstance(i, (list, dict)):\n return True\n return False", "def _validate_nested_list_type(self, name, obj, nested_level, *args):\n if nested_level <= 1:\n self._validate_list_type(name, obj, *args)\n else:\n if obj is None:\n return\n if not isinstance(obj, list):\n raise TypeError(self.__class__.__name__ + '.' + name + ' contains value of type ' +\n type(obj).__name__ + ' where a list is expected')\n for sub_obj in obj:\n self._validate_nested_list_type(name, sub_obj, nested_level - 1, *args)", "def is_nullable_doc(doc: Dict[str, Any], field_path: Tuple) -> bool:\n\n field = field_path[0]\n\n # if field is inside\n if field in doc:\n\n value = doc[field]\n\n if value is None:\n return True\n\n # if no fields left, must be non-nullable\n if len(field_path) == 1:\n return False\n\n # otherwise, keep checking the nested fields\n remaining_fields = field_path[1:]\n\n # if dictionary, check additional level of nesting\n if isinstance(value, dict):\n return is_nullable_doc(doc[field], remaining_fields)\n\n # if list, check if any member is missing field\n if isinstance(value, list):\n\n # count empty lists of nested objects as nullable\n if len(value) == 0:\n return True\n\n return any(is_nullable_doc(x, remaining_fields) for x in doc[field])\n\n # any other types to check?\n # raise ValueError(\"Nested type not 'list' or 'dict' encountered\")\n return True\n\n return True", "def test_list_instead_of_dict() -> None:\n dict_ = {\n \"resourceType\": \"Observation\",\n \"status\": \"final\",\n \"code\": {\"coding\": [{\"system\": \"test\", \"code\": \"test\"}]},\n }\n subject = { # Dictionary -> OK\n \"reference\": \"Patient/475\",\n \"display\": \"REF\",\n }\n dict_[\"subject\"] = subject\n r4.from_dict(dict_)\n with pytest.raises(pydantic.ValidationError):\n dict_[\"subject\"] = [subject] # As a list -> not expected\n r4.from_dict(dict_)", "def test_nested_list_arg(self):\r\n myNestedType = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)), 3)\r\n\r\n myType = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n\r\n myManualNestedType = TypedListType(TypedListType(\r\n TypedListType(myType)))\r\n\r\n self.assertTrue(myNestedType == myManualNestedType)", "def test_type_mapping(registry, item_type):\n with mappings_use_nested(False):\n mapping = type_mapping(registry[TYPES], item_type)\n assert mapping\n assert 'properties' in mapping\n if item_type == 'TestingLinkTargetElasticSearch':\n assert mapping['properties']['reverse_es'].get('type', 'object') != 'nested' # should not occur here\n\n # check calculated properties on objects/arrays of objects are mapped correctly\n if item_type == 'TestingCalculatedProperties':\n assert mapping['properties']['nested']['properties']['key']['type'] == 'text'\n assert mapping['properties']['nested']['properties']['value']['type'] == 'text'\n assert mapping['properties']['nested']['properties']['keyvalue']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['key']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['value']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['keyvalue']['type'] == 'text'", "def test_map_field_recursive_case(self):\n field = 'content.title'\n mapping = {\n 'type': 'text',\n 'index': True\n }\n\n actual = mapper._map_field(mapping, field)\n expected = {\n 'properties': {\n 'content': {\n 'properties': {\n 'title': {\n 'type': 'text',\n 'index': True\n }\n }\n }\n }\n }\n self.assertEqual(actual, expected)", "def nested_lookup(doc, field):\n value = doc\n keys = field.split(\".\")\n try:\n for k in keys:\n if isinstance(value, (list, tuple)):\n # assuming we have a list of dict with k as one of the keys\n stype = set([type(e) for e in value])\n if not stype:\n return None\n assert len(stype) == 1 and stype == {dict}, \"Expecting a list of dict, found types: %s\" % stype\n value = [e[k] for e in value if e.get(k)]\n # can't go further ?\n return value\n else:\n value = value[k]\n except KeyError:\n return None\n\n return value", "def test_issue_114(asserter):\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"a\": {\n \"type\": \"array\",\n \"items\": {\n \"b\": {\n \"type\": \"string\"\n }\n }\n }\n }\n }\n value = {\"a\": []}\n expected = value\n asserter(schema, value, expected)", "def test_init_with_nested_dicts(self):\n regex = 'mappings can not be nested'\n with self.assertRaisesRegex(ValueError, regex):\n query = DataQuery({'A': {'B': 'C'}}, D='x')", "def test__is_many_not_a_list(self):\n is_many = BaseResource._is_many(dict())\n self.assertFalse(is_many)", "def dict_type(verifield, required):\n if verifield is None: return True\n if not isinstance(verifield, dict): return False\n all_of = [value or True for value in verifield.values() if isinstance(value, required) or value is None]\n return not verifield or (all(all_of or [False]) and len(all_of) == len(verifield))", "def check_types(dict_):\n if dict_['UNTREATED']['types'] != dict_['TREATED']['types']:\n for i in range(len(dict_['UNTREATED']['types'])):\n if isinstance(dict_['TREATED']['types'][i], list):\n dict_['UNTREATED']['types'][i] = dict_['TREATED']['types'][i]\n if isinstance(dict_['UNTREATED']['types'][i], list):\n dict_['TREATED']['types'][i] = dict_['UNTREATED']['types'][i]\n\n return dict_", "def test_oneof_optional_default_with_nested_required():\n class MappingSchema(ce.ExtendedMappingSchema):\n value = ce.ExtendedSchemaNode(ce.ExtendedInteger()) # strict int, no auto convert to str\n\n class OneOfDifferentNested(ce.OneOfKeywordSchema):\n _one_of = [\n ce.ExtendedSchemaNode(ce.ExtendedString()), # strict string, no auto convert from int\n MappingSchema()\n ]\n\n class OneOfRequiredDefaultStr(ce.ExtendedMappingSchema):\n field = OneOfDifferentNested(default=\"1\") # match first schema of OneOf\n\n class OneOfRequiredDefaultMap(ce.ExtendedMappingSchema):\n field = OneOfDifferentNested(default={\"value\": 1}) # match second schema of OneOf\n\n class OneOfMissingDropDefaultStr(ce.ExtendedMappingSchema):\n field = OneOfDifferentNested(default=\"1\", missing=colander.drop)\n\n class OneOfMissingDropDefaultMap(ce.ExtendedMappingSchema):\n field = OneOfDifferentNested(default={\"value\": 1}, missing=colander.drop)\n\n class OneOfMissingNullDefaultStr(ce.ExtendedMappingSchema):\n field = OneOfDifferentNested(default=\"1\", missing=colander.null)\n\n class OneOfMissingNullDefaultMap(ce.ExtendedMappingSchema):\n field = OneOfDifferentNested(default={\"value\": 1}, missing=colander.null)\n\n class OneOfMissingNullDefaultNull(ce.ExtendedMappingSchema):\n field = OneOfDifferentNested(default=colander.null, missing=colander.null)\n\n evaluate_test_cases([\n (OneOfRequiredDefaultStr, {}, {\"field\": \"1\"}),\n (OneOfRequiredDefaultStr, None, colander.Invalid), # oneOf itself is required\n (OneOfRequiredDefaultStr, {\"field\": True}, colander.Invalid), # raise because provided is wrong format\n (OneOfRequiredDefaultStr, {\"field\": {}}, colander.Invalid),\n (OneOfRequiredDefaultStr, {\"field\": {\"value\": \"1\"}}, colander.Invalid),\n (OneOfRequiredDefaultStr, {\"field\": {\"value\": 1}}, {\"field\": {\"value\": 1}}),\n (OneOfMissingDropDefaultStr, {\"field\": True}, {}),\n (OneOfMissingDropDefaultStr, {\"field\": 1}, {}),\n (OneOfMissingNullDefaultStr, {}, {\"field\": \"1\"}),\n (OneOfMissingNullDefaultStr, {\"field\": True}, colander.Invalid),\n (OneOfMissingNullDefaultStr, {\"field\": {\"value\": \"1\"}}, colander.Invalid),\n (OneOfMissingNullDefaultStr, {\"field\": {\"value\": 1}}, {\"field\": {\"value\": 1}}),\n (OneOfRequiredDefaultMap, {}, {\"field\": {\"value\": 1}}), # default\n (OneOfRequiredDefaultMap, None, colander.Invalid),\n (OneOfRequiredDefaultMap, {\"field\": True}, colander.Invalid),\n (OneOfRequiredDefaultMap, {\"field\": {}}, colander.Invalid),\n (OneOfRequiredDefaultMap, {\"field\": {\"value\": \"1\"}}, colander.Invalid),\n (OneOfRequiredDefaultMap, {\"field\": {\"value\": 1}}, {\"field\": {\"value\": 1}}),\n (OneOfRequiredDefaultMap, {}, {\"field\": {\"value\": 1}}), # default\n (OneOfMissingDropDefaultMap, {\"field\": True}, {}),\n (OneOfMissingDropDefaultMap, {\"field\": 1}, {}),\n (OneOfMissingNullDefaultMap, {}, {\"field\": {\"value\": 1}}),\n (OneOfMissingNullDefaultMap, {\"field\": True}, colander.Invalid),\n (OneOfMissingNullDefaultMap, {\"field\": {\"value\": \"1\"}}, colander.Invalid),\n (OneOfMissingNullDefaultMap, {\"field\": {\"value\": 1}}, {\"field\": {\"value\": 1}}),\n (OneOfMissingNullDefaultNull, {}, {}),\n (OneOfMissingNullDefaultNull, {\"field\": True}, colander.Invalid),\n (OneOfMissingNullDefaultNull, {\"field\": {\"value\": \"1\"}}, colander.Invalid),\n (OneOfMissingNullDefaultNull, {\"field\": \"1\"}, {\"field\": \"1\"}),\n (OneOfMissingNullDefaultNull, {\"field\": {\"value\": 1}}, {\"field\": {\"value\": 1}}),\n ])", "def test_nested_objf(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"bc\":2}}')\n self.assertFalse(check_json_object(jdic, jobj))", "def _get_nested(nested_dict, field):\n print(nested_dict, field)\n keys = field.split('.')\n current = nested_dict\n for k in keys:\n print('key', k, 'current', current)\n # return None for nested fields without a value in this doc\n if isinstance(current, list):\n # this list could contain anything. skip objects not containing `k`.\n return [x[k] for x in current if x.get(k) is not None]\n if not k in current:\n current = None\n break\n current = current[k]\n return current", "def _map_onto(self, field_struct, value, options=None):\n if isinstance(value, list):\n # Fill 'repeated' structure\n # a.b = [1, 2]\n # a.b.add() = 1\n # a.b.add() = 2\n for sub in value:\n if hasattr(field_struct, \"add\"):\n nested = field_struct.add()\n # Composite lists will never\n # need to be set by us\n self._map_onto(nested, sub)\n elif hasattr(field_struct, 'append'):\n # Scalar lists will always\n # need to be set by us\n field_struct.append(self._process_value(sub))\n if options:\n self._check_field_length(field_struct, sub, options)\n else:\n raise FieldWrongTypeException(\"Tried to map illegal structure \" +\n str(value) +\n \" onto an object/message field.\")\n elif isinstance(value, dict):\n # Fill message structure\n # a.b = {c: 1, d: 2}\n # a.b.c = 1\n # a.b.d = 2\n for key in value:\n nested = getattr(field_struct, key)\n r = self._map_onto(nested, value[key], self._get_options(field_struct, key))\n if r:\n self._checked_set(field_struct, key, r[0])\n elif isinstance(value, tuple):\n # Fill message structure (in order)\n # a.b = (1, 2)\n # a.b.c = 1\n # a.b.d = 2\n if not hasattr(field_struct, 'DESCRIPTOR'):\n raise FieldWrongTypeException(\"Tried to map illegal structure \" +\n str(value) +\n \" onto a list/repeated field.\")\n fields = field_struct.DESCRIPTOR.fields\n for i in range(len(value)):\n nested = getattr(field_struct, fields[i].name)\n r = self._map_onto(nested, value[i], self._get_options(field_struct, fields[i].name))\n if r:\n self._checked_set(field_struct, fields[i].name, r[0])\n else:\n return [self._process_value(value), ]", "def _assert_valid_deep(value):\n if isinstance(value, dict):\n for v in value.itervalues():\n _assert_valid_deep(v)\n elif isinstance(value, list):\n for v in value:\n _assert_valid_deep(v)\n else:\n if hasattr(value, \"assert_valid\"):\n value.assert_valid()", "def list_type(verifield, required):\n if verifield is None: return True\n if not isinstance(verifield, list): return False\n all_of = [value or True for value in verifield if isinstance(value, required) or value is None]\n return not verifield or (all(all_of or [False]) and len(all_of) == len(verifield))", "def test_list_deserialization(self):\r\n \r\n original = DeserializationTestModel.create(count=5, text='happy')\r\n nested = original.get_list()\r\n\r\n assert isinstance(nested, list)\r\n assert nested[0] == None\r\n assert nested[1] == 0\r\n assert nested[2] == 1\r\n\r\n assert isinstance(nested[3], list)\r\n assert nested[3][0] == 2\r\n assert nested[3][1] == original\r\n assert nested[3][2] == 3\r\n\r\n assert nested[4] == 5", "def testCheck(self):\r\n from pydsl.Grammar.Definition import JsonSchema\r\n from pydsl.Check import JsonSchemaChecker\r\n schema = {\r\n \"type\" : \"string\",\r\n \"items\" : {\r\n \"type\" : [\"string\", \"object\"],\r\n \"properties\" : {\r\n \"foo\" : {\"enum\" : [1, 3]},\r\n #\"bar\" : { #See https://github.com/Julian/jsonschema/issues/89\r\n # \"type\" : \"array\",\r\n # \"properties\" : {\r\n # \"bar\" : {\"required\" : True},\r\n # \"baz\" : {\"minItems\" : 2},\r\n # }\r\n #}\r\n }\r\n }\r\n }\r\n grammardef = JsonSchema(schema)\r\n checker = JsonSchemaChecker(grammardef)\r\n self.assertTrue(checker.check(\"a\"))\r\n self.assertFalse(checker.check([1, {\"foo\" : 2, \"bar\" : {\"baz\" : [1]}}, \"quux\"]))", "def testSerializer(data):\n deserializer = TypeDeserializer()\n if isinstance(data, list):\n return [deserializer.deserialize(v) for v in data]\n if isinstance(data, dict):\n try:\n return deserializer.deserialize(data)\n except TypeError:\n return {k: deserializer.deserialize(v) for k, v in data.items()}\n else:\n return data", "def verifyDictTypes( template, dictToCheck ):\n for key in dictToCheck:\n if not ( ( isinstance( dictToCheck[ key ], list ) and\n isinstance( template[ key ], list ) ) or\n ( isinstance( dictToCheck[ key ], dict ) and\n isinstance( template[ key ], dict ) ) or\n ( isinstance( dictToCheck[ key ], template[ key ] ) ) ):\n return False\n\n return True", "def quacks_like_dict(object):\n return isinstance(object, Mapping)", "def test_list_field():", "def test_embedded_json(self):\n json_data = '{\"a\": {\"b\" : true } }'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a.b\" : true}'))", "def test_list_4f(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2']),\n JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[{\"test1\":3, \"test2\":4},{\"test1\":3, \"test23\":4}]')\n self.assertFalse(check_json_array(jdic, jobj))" ]
[ "0.65566456", "0.6533364", "0.64874804", "0.63195527", "0.61757094", "0.61565745", "0.5889713", "0.5874048", "0.58668673", "0.58391446", "0.58335656", "0.5819415", "0.58100975", "0.5807514", "0.57962775", "0.5795738", "0.57222366", "0.5711259", "0.570308", "0.56359315", "0.5631693", "0.55610186", "0.5551981", "0.5549525", "0.554588", "0.5543887", "0.5540135", "0.553919", "0.5536169", "0.5532761" ]
0.66391444
0
Tests that mapping a type with an object field with nested enabled correctly maps with nested.
def test_type_mapping_nested_with_disabled_parameter(registry): with mappings_use_nested(True): mapping = type_mapping(registry[TYPES], 'TestingNestedEnabled') assert mapping assert 'properties' in mapping assert mapping['properties']['object_options'].get('type', 'object') != 'nested' # neither enabled assert mapping['properties']['disabled_array_of_objects_in_calc_prop'].get('type', 'object') != 'nested' assert mapping['properties']['enabled_array_of_objects_in_calc_prop']['type'] == 'nested' # enabled assert mapping['properties']['nested_options']['type'] == 'nested' # enabled
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_type_mapping_nested(registry):\n with mappings_use_nested(True):\n mapping = type_mapping(registry[TYPES], 'TestingLinkTargetElasticSearch')\n assert mapping\n assert 'properties' in mapping\n # if type is defined on this field, it should beg object, NOT nested since it is not enabled on this field\n assert mapping['properties']['reverse_es'].get('type', 'object') == 'object'", "def test_type_mapping(registry, item_type):\n with mappings_use_nested(False):\n mapping = type_mapping(registry[TYPES], item_type)\n assert mapping\n assert 'properties' in mapping\n if item_type == 'TestingLinkTargetElasticSearch':\n assert mapping['properties']['reverse_es'].get('type', 'object') != 'nested' # should not occur here\n\n # check calculated properties on objects/arrays of objects are mapped correctly\n if item_type == 'TestingCalculatedProperties':\n assert mapping['properties']['nested']['properties']['key']['type'] == 'text'\n assert mapping['properties']['nested']['properties']['value']['type'] == 'text'\n assert mapping['properties']['nested']['properties']['keyvalue']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['key']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['value']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['keyvalue']['type'] == 'text'", "def test_map_field_recursive_case(self):\n field = 'content.title'\n mapping = {\n 'type': 'text',\n 'index': True\n }\n\n actual = mapper._map_field(mapping, field)\n expected = {\n 'properties': {\n 'content': {\n 'properties': {\n 'title': {\n 'type': 'text',\n 'index': True\n }\n }\n }\n }\n }\n self.assertEqual(actual, expected)", "def test_nested():\n # pylint: disable=no-member\n assert issubclass(NestedSchema, graphene.ObjectType)\n assert isinstance(NestedSchema.name, graphene.String)\n assert isinstance(NestedSchema.leaf, graphene.Field)\n assert str(NestedSchema.leaf.type) == \"Leaf\"\n assert isinstance(NestedSchema.leaf.type.value, graphene.String)\n assert isinstance(NestedSchema.leaf.type.leaflets, graphene.List)", "def test_nested_objf(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"bc\":2}}')\n self.assertFalse(check_json_object(jdic, jobj))", "def test_transform_object(self):\n # Test object with nested \"international\" fields\n obj1 = {\n \"international\": {\n \"display_name\": {\n \"af\": \"Dokumentbestuurstelsel\",\n \"fr\": \"type de logiciel\",\n \"ro\": \"colecție organizată a documentelor\",\n }\n }\n }\n transform_object(obj1, \"international\")\n self.assertDictEqual(\n {\n \"international\": {\n \"display_name\": {\n \"keys\": [\"af\", \"fr\", \"ro\"],\n \"values\": [\n \"Dokumentbestuurstelsel\",\n \"type de logiciel\",\n \"colecție organizată \" \"a documentelor\",\n ],\n }\n }\n },\n obj1,\n )\n\n # Test object with nested \"international\" none\n obj2 = {\"international\": {\"display_name\": None}}\n transform_object(obj2, \"international\")\n self.assertDictEqual({\"international\": {\"display_name\": None}}, obj2)\n\n # Test object with nested \"abstract_inverted_index\" fields\n obj3 = {\n \"abstract_inverted_index\": {\n \"Malignant\": [0],\n \"hyperthermia\": [1],\n \"susceptibility\": [2],\n \"(MHS)\": [3],\n \"is\": [4, 6],\n \"primarily\": [5],\n }\n }\n transform_object(obj3, \"abstract_inverted_index\")\n self.assertDictEqual(\n {\n \"abstract_inverted_index\": {\n \"keys\": [\"Malignant\", \"hyperthermia\", \"susceptibility\", \"(MHS)\", \"is\", \"primarily\"],\n \"values\": [\"0\", \"1\", \"2\", \"3\", \"4, 6\", \"5\"],\n }\n },\n obj3,\n )\n\n # Test object with nested \"abstract_inverted_index\" none\n obj4 = {\"abstract_inverted_index\": None}\n transform_object(obj4, \"abstract_inverted_index\")\n self.assertDictEqual({\"abstract_inverted_index\": None}, obj4)", "def test_nested_obj(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"b\":2}}')\n self.assertTrue(check_json_object(jdic, jobj))", "def test_init_with_nested_dicts(self):\n regex = 'mappings can not be nested'\n with self.assertRaisesRegex(ValueError, regex):\n query = DataQuery({'A': {'B': 'C'}}, D='x')", "def test_roundtrip_nested_user_defined_nested_map():\n club = {\n \"members\": [\n dict(name=\"Bede\", age=20),\n dict(name=\"Jake\", age=21),\n dict(name=\"Cal\", age=22)\n ],\n \"name\": \"The Kool Kids Klub\"\n }\n Club = Map.from_file(\"definitions/Club.buf\")\n assert club == Club.read(bytes(Club.to_bytes(club)))", "def test_map_field_base_case(self):\n field = 'title'\n mapping = {\n 'type': 'text',\n 'index': True\n }\n\n actual = mapper._map_field(mapping, field)\n expected = {\n 'properties': {\n 'title': {\n 'type': 'text',\n 'index': True\n }\n }\n }\n self.assertEqual(actual, expected)", "def test_type_builder_handles_nested_properties():\n schema = [\n SchemaObject(\n name=\"ClassWithNestedClass\",\n properties=[\n SchemaObject(\n name=\"nestedValue\",\n properties=[\n SchemaValue(name=\"string_value\", value_type=\"string\"),\n SchemaEnum(\n name=\"enum_value\",\n value_type=\"string\",\n values=[\"hey\", \"new\", \"value\"],\n ),\n ],\n ),\n ],\n )\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 3\n assert build_result[0] == ClassDefinition(\n name=\"ClassWithNestedClass\",\n properties=[\n PropertyDefinition(\n name=\"nested_value\",\n key=\"nestedValue\",\n value_type=\"ClassWithNestedClassNestedValue\",\n known_type=False,\n ),\n ],\n depends_on={\"ClassWithNestedClassNestedValue\"},\n )\n assert build_result[1] == ClassDefinition(\n name=\"ClassWithNestedClassNestedValue\",\n properties=[\n PropertyDefinition(\n name=\"string_value\",\n key=\"string_value\",\n value_type=\"str\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"enum_value\",\n key=\"enum_value\",\n value_type=\"ClassWithNestedClassNestedValueEnumValue\",\n known_type=False,\n ),\n ],\n depends_on={\"ClassWithNestedClassNestedValueEnumValue\"},\n )\n assert build_result[2] == EnumDefinition(\n name=\"ClassWithNestedClassNestedValueEnumValue\",\n values=[(\"HEY\", \"hey\"), (\"NEW\", \"new\"), (\"VALUE\", \"value\")],\n depends_on=set(),\n )", "def test_create_mapping_correctly_maps_embeds(registry, item_type):\n mapping = type_mapping(registry[TYPES], item_type)\n assert mapping\n type_info = registry[TYPES].by_item_type[item_type]\n schema = type_info.schema\n embeds = add_default_embeds(item_type, registry[TYPES], type_info.embedded_list, schema)\n # assert that all embeds exist in mapping for the given type\n for embed in embeds:\n mapping_pointer = mapping\n split_embed = embed.split('.')\n for idx, split_ in enumerate(split_embed):\n # see if this is last level of embedding- may be a field or object\n if idx == len(split_embed) - 1:\n if 'properties' in mapping_pointer and split_ in mapping_pointer['properties']:\n final_mapping = mapping_pointer['properties']\n else:\n final_mapping = mapping_pointer\n if split_ != '*':\n assert split_ in final_mapping\n else:\n assert 'properties' in final_mapping or final_mapping.get('type') == 'object'\n else:\n assert split_ in mapping_pointer['properties']\n mapping_pointer = mapping_pointer['properties'][split_]", "def test_access_nested_map(self, nested_map, path, result):\n self.assertEqual(access_nested_map(nested_map, path), result)", "def testInitializeNestedFieldFromDict(self):\n class SimpleMessage(messages.Message):\n required = messages.IntegerField(1, required=True)\n\n class NestedMessage(messages.Message):\n simple = messages.MessageField(SimpleMessage, 1)\n\n class RepeatedMessage(messages.Message):\n simple = messages.MessageField(SimpleMessage, 1, repeated=True)\n\n nested_message1 = NestedMessage(simple={'required': 10})\n self.assertTrue(nested_message1.is_initialized())\n self.assertTrue(nested_message1.simple.is_initialized())\n\n nested_message2 = NestedMessage()\n nested_message2.simple = {'required': 10}\n self.assertTrue(nested_message2.is_initialized())\n self.assertTrue(nested_message2.simple.is_initialized())\n\n repeated_values = [{}, {'required': 10}, SimpleMessage(required=20)]\n\n repeated_message1 = RepeatedMessage(simple=repeated_values)\n self.assertEquals(3, len(repeated_message1.simple))\n self.assertFalse(repeated_message1.is_initialized())\n\n repeated_message1.simple[0].required = 0\n self.assertTrue(repeated_message1.is_initialized())\n\n repeated_message2 = RepeatedMessage()\n repeated_message2.simple = repeated_values\n self.assertEquals(3, len(repeated_message2.simple))\n self.assertFalse(repeated_message2.is_initialized())\n\n repeated_message2.simple[0].required = 0\n self.assertTrue(repeated_message2.is_initialized())", "def test_roundtrip_nested_map():\n Person = Map(\n MapEntrySpec(1, \"name\", String),\n MapEntrySpec(2, \"age\", UnsignedInt),\n \"Person\"\n )\n Family = Map(\n MapEntrySpec(1, \"mother\", Person),\n MapEntrySpec(2, \"father\", Person),\n \"Family\"\n )\n\n my_family = {\n \"mother\": {\n \"name\": \"Helen\",\n \"age\": 62\n },\n \"father\": {\n \"name\": \"Mark\",\n \"age\": 65\n }\n }\n\n roundtripped_family = Family.read(Family.to_bytes(my_family))\n assert my_family == roundtripped_family", "def test_nested(cls, value, res):\n\tobj = cls(value, DEFAULT_POD)\n\tassert obj == res", "def test_nested_dict(self):\n nested = self.TEI.nested_dict(exclude=[\"tei:note\"])\n self.assertEqual(nested[\"1\"][\"pr\"][\"1\"], \"Spero me secutum in libellis meis tale temperamen-\",\n \"Check that dictionary path is well done\")\n self.assertEqual(nested[\"1\"][\"12\"][\"1\"], \"Itur ad Herculeas gelidi qua Tiburis arces \",\n \"Check that dictionary path works on more than one passage\")\n self.assertEqual(nested[\"2\"][\"pr\"][\"1\"], \"'Quid nobis' inquis 'cum epistula? parum enim tibi \",\n \"Check that different fist level works as well\")\n self.assertEqual(nested[\"1\"][\"3\"][\"8\"], \"Ibis ab excusso missus in astra sago. \",\n \"Check that notes are removed \")\n self.assertEqual(\n [list(nested.keys()), list(nested[\"1\"].keys())[:3], list(nested[\"2\"][\"pr\"].keys())[:3]],\n [[\"1\", \"2\"], [\"pr\", \"1\", \"2\"], [\"sa\", \"1\", \"2\"]],\n \"Ensure that text keeps its order\")", "def test_mapping_types(self):\n table_schema = {\n 'my_bigint': sql_types.BIGINT(),\n 'my_boolean': sql_types.BOOLEAN(),\n 'my_char': sql_types.CHAR(16),\n 'my_clob': sql_types.CLOB(),\n 'my_date': sql_types.DATE(),\n 'my_datetime': sql_types.DATETIME(),\n 'my_decimal': sql_types.DECIMAL(10, 5),\n 'my_float': sql_types.FLOAT(),\n 'my_integer': sql_types.INTEGER(),\n 'my_nchar': sql_types.NCHAR(16),\n 'my_nvarchar': sql_types.NVARCHAR(16),\n 'my_null': sql_types.NullType(),\n 'my_numeric': sql_types.NUMERIC(),\n 'my_real': sql_types.REAL(),\n 'my_smallint': sql_types.SMALLINT(),\n 'my_text': sql_types.TEXT(),\n 'my_timestamp': sql_types.TIMESTAMP(),\n 'my_varchar': sql_types.VARCHAR(16),\n }\n document_type = 'some_document_type'\n mapping = Mapping(document_type, table_schema)\n self.assertDictEqual(\n mapping.mapping,\n {\n document_type: {\n 'properties': {\n '_metadata': {\n 'type': 'object',\n 'index': 'no',\n 'properties': {\n 'filename': {\n 'type': 'string',\n 'index': 'no',\n },\n 'table': {\n 'type': 'string',\n 'index': 'no',\n },\n },\n },\n 'my_bigint': {'type': 'long'},\n 'my_boolean': {'type': 'boolean'},\n 'my_char': {'type': 'string'},\n 'my_clob': {'type': 'string'},\n 'my_datetime': {'type': 'date'},\n 'my_float': {'type': 'float'},\n 'my_integer': {'type': 'long'},\n 'my_nchar': {'type': 'string'},\n 'my_nvarchar': {'type': 'string'},\n 'my_real': {'type': 'double'},\n 'my_smallint': {'type': 'integer'},\n 'my_text': {'type': 'string'},\n 'my_timestamp': {'type': 'date'},\n 'my_varchar': {'type': 'string'},\n },\n },\n },\n )", "def test_add_model_with_resource_fields_with_nested(model_class,):\n pdst = patch_deduce_swagger_type\n pr = patch_registry\n ppd = patch_parse_doc\n pha = patch_hasattr\n\n with pr(), ppd(), patch_isinstance(True) as mock_isinstance:\n with pha() as mock_hasattr, patch_dir([\"resource_fields\"]) as mock_dir:\n with pdst() as mock_deduce_swagger_type:\n\n swagger.add_model(model_class)\n mock_dir.assert_called_with(model_class)\n assert mock_dir.call_count == 2\n mock_hasattr.assert_called_once_with(model_class, \"required\")\n mock_isinstance.assert_called_with(\n model_class, swagger._Nested)\n assert mock_deduce_swagger_type.call_count == len(\n model_class.resource_fields.items()\n )", "def _classify_object_field(field: s_obj.Field[Any]) -> FieldStorage:\n\n ftype = field.type\n shadow_ptr_kind = None\n shadow_ptr_type = None\n fieldtype = FieldType.OTHER\n\n is_array = is_multiprop = False\n if issubclass(ftype, s_obj.MultiPropSet):\n is_multiprop = True\n ftype = ftype.type\n elif (\n issubclass(\n ftype,\n (checked.CheckedList, checked.FrozenCheckedList,\n checked.CheckedSet, checked.FrozenCheckedSet))\n and not issubclass(ftype, s_expr.ExpressionList)\n ):\n is_array = True\n ftype = ftype.type # type: ignore\n\n if issubclass(ftype, s_obj.ObjectCollection):\n ptr_kind = 'multi link'\n ptr_type = 'schema::Object'\n if issubclass(ftype, s_obj.ObjectDict):\n fieldtype = FieldType.OBJ_DICT\n\n elif issubclass(ftype, s_obj.Object):\n ptr_kind = 'link'\n ptr_type = f'schema::{ftype.__name__}'\n\n elif issubclass(ftype, s_expr.Expression):\n shadow_ptr_kind = 'property'\n shadow_ptr_type = 'tuple<text: str, refs: array<uuid>>'\n ptr_kind = 'property'\n ptr_type = 'str'\n fieldtype = FieldType.EXPR\n\n elif issubclass(ftype, s_expr.ExpressionList):\n shadow_ptr_kind = 'property'\n shadow_ptr_type = (\n 'array<tuple<text: str, refs: array<uuid>>>'\n )\n ptr_kind = 'property'\n ptr_type = 'array<str>'\n fieldtype = FieldType.EXPR_LIST\n\n elif issubclass(ftype, s_expr.ExpressionDict):\n shadow_ptr_kind = 'property'\n shadow_ptr_type = '''array<tuple<\n name: str,\n expr: tuple<text: str, refs: array<uuid>>\n >>'''\n ptr_kind = 'property'\n ptr_type = 'array<tuple<name: str, expr: str>>'\n fieldtype = FieldType.EXPR_DICT\n\n elif issubclass(ftype, collections.abc.Mapping):\n ptr_kind = 'property'\n ptr_type = 'json'\n\n elif issubclass(ftype, (str, sn.Name)):\n ptr_kind = 'property'\n ptr_type = 'str'\n\n if field.name == 'name':\n # TODO: consider shadow-reflecting names as tuples\n shadow_ptr_kind = 'property'\n shadow_ptr_type = 'str'\n\n elif issubclass(ftype, bool):\n ptr_kind = 'property'\n ptr_type = 'bool'\n\n elif issubclass(ftype, int):\n ptr_kind = 'property'\n ptr_type = 'int64'\n\n elif issubclass(ftype, uuid.UUID):\n ptr_kind = 'property'\n ptr_type = 'uuid'\n\n elif issubclass(ftype, verutils.Version):\n ptr_kind = 'property'\n ptr_type = '''\n tuple<\n major: std::int64,\n minor: std::int64,\n stage: sys::VersionStage,\n stage_no: std::int64,\n local: array<std::str>,\n >\n '''\n else:\n raise RuntimeError(\n f'no metaschema reflection for field {field.name} of type {ftype}'\n )\n\n if is_multiprop:\n ptr_kind = 'multi property'\n if is_array:\n ptr_type = f'array<{ptr_type}>'\n\n return FieldStorage(\n fieldtype=fieldtype,\n ptrkind=ptr_kind,\n ptrtype=ptr_type,\n shadow_ptrkind=shadow_ptr_kind,\n shadow_ptrtype=shadow_ptr_type,\n )", "def set_object_field(self, obj, field, field_val):\n if isinstance(field_val, dict) and field != \"kwargs\":\n sub_obj = getattr(obj, field)\n\n if isinstance(sub_obj, IonObjectBase):\n\n if \"type_\" in field_val and field_val[\"type_\"] != sub_obj.type_:\n if issubtype(field_val[\"type_\"], sub_obj.type_):\n sub_obj = IonObject(field_val[\"type_\"])\n setattr(obj, field, sub_obj)\n else:\n raise Inconsistent(\"Unable to walk the field %s - types don't match: %s %s\" % (\n field, sub_obj.type_, field_val[\"type_\"]))\n\n for sub_field in field_val:\n self.set_object_field(sub_obj, sub_field, field_val.get(sub_field))\n\n elif isinstance(sub_obj, dict):\n setattr(obj, field, field_val)\n\n else:\n for sub_field in field_val:\n self.set_object_field(sub_obj, sub_field, field_val.get(sub_field))\n else:\n # type_ already exists in the class.\n if field != \"type_\":\n setattr(obj, field, field_val)", "def test_embedded_json(self):\n json_data = '{\"a\": {\"b\" : true } }'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a.b\" : true}'))", "def quacks_like_dict(object):\n return isinstance(object, Mapping)", "def test_circular_nested(self):\n obj = {}\n obj[\"list\"] = [{\"obj\": obj}]\n with self.assertRaises(orjson.JSONEncodeError):\n orjson.dumps(obj)", "def constrained_lens_object_test():\n return # TODO", "def _map_onto(self, field_struct, value, options=None):\n if isinstance(value, list):\n # Fill 'repeated' structure\n # a.b = [1, 2]\n # a.b.add() = 1\n # a.b.add() = 2\n for sub in value:\n if hasattr(field_struct, \"add\"):\n nested = field_struct.add()\n # Composite lists will never\n # need to be set by us\n self._map_onto(nested, sub)\n elif hasattr(field_struct, 'append'):\n # Scalar lists will always\n # need to be set by us\n field_struct.append(self._process_value(sub))\n if options:\n self._check_field_length(field_struct, sub, options)\n else:\n raise FieldWrongTypeException(\"Tried to map illegal structure \" +\n str(value) +\n \" onto an object/message field.\")\n elif isinstance(value, dict):\n # Fill message structure\n # a.b = {c: 1, d: 2}\n # a.b.c = 1\n # a.b.d = 2\n for key in value:\n nested = getattr(field_struct, key)\n r = self._map_onto(nested, value[key], self._get_options(field_struct, key))\n if r:\n self._checked_set(field_struct, key, r[0])\n elif isinstance(value, tuple):\n # Fill message structure (in order)\n # a.b = (1, 2)\n # a.b.c = 1\n # a.b.d = 2\n if not hasattr(field_struct, 'DESCRIPTOR'):\n raise FieldWrongTypeException(\"Tried to map illegal structure \" +\n str(value) +\n \" onto a list/repeated field.\")\n fields = field_struct.DESCRIPTOR.fields\n for i in range(len(value)):\n nested = getattr(field_struct, fields[i].name)\n r = self._map_onto(nested, value[i], self._get_options(field_struct, fields[i].name))\n if r:\n self._checked_set(field_struct, fields[i].name, r[0])\n else:\n return [self._process_value(value), ]", "def deep_type(obj, depth = None, max_sample = None, get_type = None):\n return _deep_type(obj, [], 0, depth, max_sample, get_type)", "def test_map_deserialization(self):\r\n \r\n original = DeserializationTestModel.create(count=5, text='happy')\r\n nested = original.get_map()\r\n\r\n assert isinstance(nested, dict)\r\n assert nested['vertex'] == original\r\n assert nested['number'] == 5", "def test_fields(self):\n\n class Foo(Model):\n field1 = StringField()\n field2 = IntegralField()\n\n assert hasattr(Foo, \"_fields\")\n assert type(Foo._fields) is dict\n\n assert not hasattr(Foo, \"field1\")\n assert \"field1\" in Foo._fields\n assert type(Foo._fields[\"field1\"]) is StringField\n\n assert not hasattr(Foo, \"field2\")\n assert \"field2\" in Foo._fields\n assert type(Foo._fields[\"field2\"]) is IntegralField", "def test_struct_with_nested_struct(self):\n proto = struct_pb2.Struct()\n\n # pylint: disable=no-member\n subproto = proto.get_or_create_struct(\"bar\")\n subproto[\"baz\"] = 42\n deserialized = rpc.deserialize_resource_props(proto)\n self.assertDictEqual({\n \"bar\": {\n \"baz\": 42\n }\n }, deserialized)" ]
[ "0.8126932", "0.72236913", "0.7048768", "0.6998724", "0.6372483", "0.61942005", "0.61895186", "0.5984007", "0.59549826", "0.5913331", "0.5892807", "0.58536166", "0.5798939", "0.5778446", "0.57278585", "0.5703927", "0.5660505", "0.5648864", "0.5636653", "0.5601702", "0.5564571", "0.5526084", "0.5495203", "0.5464965", "0.5442828", "0.542928", "0.54244685", "0.5414835", "0.5407127", "0.5397632" ]
0.74837184
1
Tests merging schemas with EmbeddingTest
def test_merge_schemas(registry): test_schema = registry[TYPES][unit_test_type].schema test_subschema = test_schema['properties']['attachment'] res = merge_schemas(test_subschema, registry[TYPES]) assert res assert res != test_subschema assert res['properties']['attachment']['attachment'] is True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_merge_model_relationships(bf, dataset, organization, assert_in_neo4j):\n person = dataset.create_model(\n \"Person\",\n schema=[ModelProperty(\"name\", data_type=str, title=True, required=True)],\n )\n\n food = dataset.create_model(\n \"Food\", schema=[ModelProperty(\"name\", data_type=str, title=True, required=True)]\n )\n\n color = dataset.create_model(\n \"Color\",\n schema=[ModelProperty(\"name\", data_type=str, title=True, required=True)],\n )\n\n # Relationship type with no \"from\" and \"to\"\n likes = dataset.create_relationship_type(\"Likes\", \"likes\")\n\n # Relationship type with \"from\" and \"to\", but no instances\n dataset.create_relationship_type(\n \"Appreciates\", \"appreciates\", source=person.id, destination=color.id\n )\n\n alice = person.create_record({\"name\": \"Alice\"})\n bob = person.create_record({\"name\": \"Bob\"})\n charlie = person.create_record({\"name\": \"Charlie\"})\n\n ice_cream = food.create_record({\"name\": \"Ice Cream\"})\n\n alice_likes_bob = likes.relate(alice, bob)\n bob_likes_charlie = likes.relate(bob, charlie)\n alice_likes_ice_cream = likes.relate(alice, ice_cream)\n\n # At this point we have in the relation_types file\n #\n # ()-[likes]->()\n # (person)-[appreciates]->(color)\n #\n # and in the schemaRelations file\n #\n # (person)-[likes]->(person)\n # (person)-[likes]->(food)\n #\n # The /relationships endpoint on the old service *only* returns things in\n # the relation_types file.\n #\n # But the new service should merge them both together to create all\n # necessary model relationships and stubs:\n #\n # ()-[likes]->()\n # (person)-[appreciates]->(color)\n # (person)-[likes]->(person)\n # (person)-[likes]->(food)\n\n migrate_dataset(\n organization_id=organization.int_id,\n # organization_node_id=organization.id,\n dataset_ids=[dataset.int_id]\n # dataset_node_id=dataset.id,\n )\n\n assert_in_neo4j()\n\n # Drop into raw requests because of\n # https://app.clickup.com/t/426zh9\n relationships = bf._api.concepts.relationships._get(\n bf._api.concepts.relationships._uri(\n \"/{dataset_id}/relationships\", dataset_id=dataset.id\n )\n )\n\n assert sorted(\n [(r[\"from\"] or \"*\", r[\"name\"], r[\"to\"] or \"*\") for r in relationships]\n ) == sorted(\n [\n (\"*\", \"Likes\", \"*\"),\n (person.id, \"Likes\", food.id),\n (person.id, \"Likes\", person.id),\n (person.id, \"Appreciates\", color.id),\n ]\n )", "def test_(association_schemas, schemas, expected_schemas):\n returned_schemas = association._combine_defined_expected_schemas(\n association_schemas=association_schemas, schemas=schemas\n )\n\n assert list(returned_schemas) == expected_schemas", "def test_compare_schemas_happypath(self):\n status = schema_utils.compare_schemas(\n self.base_schema,\n self.base_schema\n )\n\n assert status == schema_utils.Update.no_update", "def test_json_merge_patch():\n schemas = {}\n\n basenames = (\n 'record-package-schema.json',\n 'release-package-schema.json',\n 'release-schema.json',\n 'versioned-release-validation-schema.json',\n )\n\n if ocds_version or not use_development_version:\n url_pattern = ocds_schema_base_url + ocds_tag + '/{}'\n else:\n url_pattern = development_base_url + '/{}'\n\n for basename in basenames:\n schemas[basename] = http_get(url_pattern.format(basename)).json()\n\n if basename == 'release-schema.json':\n path = os.path.join(extensiondir, 'extension.json')\n with open(path) as f:\n metadata = json.load(f, object_pairs_hook=rejecting_dict)\n schemas[basename] = extend_schema(basename, schemas[basename], metadata, codelists=external_codelists)\n\n # This loop is somewhat unnecessary, as repositories contain at most one of each schema file.\n for path, name, text, data in walk_json_data(patch):\n if is_json_merge_patch(data):\n if name in basenames:\n unpatched = deepcopy(schemas[name])\n try:\n patched = merge(unpatched, data)\n except Exception as e:\n assert False, f'Exception: {e} {path}'\n\n # All metadata should be present.\n validate_json_schema(path, name, patched, metaschemas()['metaschema'], full_schema=True)\n\n # Empty patches aren't allowed. json_merge_patch mutates `unpatched`, so `schemas[name]` is tested.\n assert patched != schemas[name]", "def merge_schema(first, second):\n if not (type(first) == type(second) == dict):\n raise ValueError(\"Argument is not a schema\")\n\n if not (first.get('type') == second.get('type') == 'object'):\n raise NotImplementedError(\"Unsupported root type\")\n\n return merge_objects(first, second)", "def test_custom_schema():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n assert_that(codec.schema, is_(instance_of(DerivedSchema)))", "def test_json(self):\n schema1 = GraphQlSchemaFactory.create_from_modules([\n 'graphql.executor.test.star_wars',\n 'graphql.scalar_descriptors.strict'])\n schema2 = GraphQlSchema.create_from_json(schema1.to_json())\n self._assert_schemas_equal(schema1, schema2)\n schema1 = GraphQlSchemaFactory.create_from_modules([\n 'graphql.executor.test.star_wars',\n 'graphql.executor.test.star_wars_extra',\n 'graphql.scalar_descriptors.strict'])\n schema2 = GraphQlSchema.create_from_json(schema1.to_json())\n self._assert_schemas_equal(schema1, schema2)", "def test_marshmallow_base_schema_remove_missing(self, base_schema):\n # Typically, we'll use it in all our schemas, so let's define base\n # Document and EmbeddedDocument classes using this base schema class\n @self.instance.register\n class MyDocument(Document):\n MA_BASE_SCHEMA_CLS = base_schema\n\n class Meta:\n abstract = True\n\n @self.instance.register\n class MyEmbeddedDocument(EmbeddedDocument):\n MA_BASE_SCHEMA_CLS = base_schema\n\n class Meta:\n abstract = True\n\n @self.instance.register\n class Accessory(MyEmbeddedDocument):\n brief = fields.StrField()\n value = fields.IntField()\n\n @self.instance.register\n class Bag(MyDocument):\n item = fields.EmbeddedField(Accessory)\n content = fields.ListField(fields.EmbeddedField(Accessory))\n\n data = {\n 'item': {'brief': 'sportbag'},\n 'content': [\n {'brief': 'cellphone'},\n {'brief': 'lighter'}]\n }\n dump = {\n 'id': None,\n 'content': [\n {'brief': 'cellphone', 'value': None},\n {'brief': 'lighter', 'value': None}\n ],\n 'item': {'brief': 'sportbag', 'value': None}\n }\n remove_missing_dump = {\n 'item': {'brief': 'sportbag'},\n 'content': [\n {'brief': 'cellphone'},\n {'brief': 'lighter'}\n ]\n }\n expected_dump = {\n BaseMarshmallowSchema: remove_missing_dump,\n ma.Schema: dump,\n }[base_schema]\n\n bag = Bag(**data)\n ma_schema = Bag.schema.as_marshmallow_schema()\n assert ma_schema().dump(bag) == expected_dump", "def test_schema(self):\r\n db_connection = modulestore().db_connection\r\n for collection in [db_connection.course_index, db_connection.structures, db_connection.definitions]:\r\n self.assertEqual(\r\n collection.find({'schema_version': {'$exists': False}}).count(),\r\n 0,\r\n \"{0.name} has records without schema_version\".format(collection)\r\n )\r\n self.assertEqual(\r\n collection.find({'schema_version': {'$ne': SplitMongoModuleStore.SCHEMA_VERSION}}).count(),\r\n 0,\r\n \"{0.name} has records with wrong schema_version\".format(collection)\r\n )", "def test_custom_metadata_schema(self):\n # The use-case for this functionality is to allow using\n # Foreign Data Wrappers, each with a full set of Django\n # tables, to copy between databases using SQLAlchemy\n # and the automatically generation of aldjemy.\n metadata = MetaData(schema=\"arbitrary\")\n sa_models = construct_models(metadata)\n self.assertEqual(sa_models[Log].__table__.schema, \"arbitrary\")", "def test_merge_duplicate_linked_property_instances(\n bf, dataset, organization, assert_in_neo4j\n):\n person = dataset.create_model(\n \"Person\",\n schema=[ModelProperty(\"name\", data_type=str, title=True, required=True)],\n )\n likes = person.add_linked_property(\"likes\", person)\n\n alice = person.create_record({\"name\": \"Alice\"})\n\n bob = person.create_record({\"name\": \"Bob\"})\n\n # Add one instance: (alice)-[likes]->(alice)\n alice.add_linked_value(alice, likes)\n\n # Add another: (alice)-[likes]->(bob)\n # The Python client deletes existing linked properties before adding new ones -\n # this manual request allows us to duplicate the linked property instance.\n # (This is the same requeust `add_linked_value` makes under the hood.)\n bf._api.concepts.instances._post(\n bf._api.concepts.instances._uri(\n \"/{dataset_id}/concepts/{concept_id}/instances/{instance_id}/linked\",\n dataset_id=dataset.id,\n concept_id=person.id,\n instance_id=alice.id,\n ),\n json={\n \"schemaLinkedPropertyId\": likes.id,\n \"to\": bob.id,\n \"name\": \"likes\",\n \"displayName\": \"likes\",\n },\n )\n\n # This should not be possible - there should only be one \"likes\" property\n assert [lp.type.name for lp in alice.get_linked_values()] == [\"likes\", \"likes\"]\n\n migrate_dataset(organization_id=organization.int_id, dataset_ids=[dataset.int_id])\n\n assert_in_neo4j()\n\n # Import should merge linked values\n linked_values = alice.get_linked_values()\n assert len(linked_values) == 1\n assert linked_values[0].type.name == \"likes\"\n assert linked_values[0].target_record_id == bob.id", "def test_dump_related_records(testdata):\n doc1 = Document.get_record_by_pid(testdata[\"documents\"][0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(testdata[\"documents\"][1][\"document_pid\"])\n doc3 = Document.get_record_by_pid(testdata[\"documents\"][2][\"document_pid\"])\n assert doc1[\"related_records\"] == []\n assert doc2[\"related_records\"] == []\n assert doc3[\"related_records\"] == []\n\n doc1.related.add_language(doc2)\n doc1.commit()\n\n doc1 = Document.get_record_by_pid(doc1[doc1.pid_field])\n doc2 = Document.get_record_by_pid(doc2[doc2.pid_field])\n assert doc1[\"related_records\"] == [\n record_to_relation_dump(doc2, RelatedRecords.language_relation())\n ]\n assert doc2[\"related_records\"] == [\n record_to_relation_dump(doc1, RelatedRecords.language_relation())\n ]\n\n doc1.related.add_language(doc3)\n doc1.commit()\n\n doc1 = Document.get_record_by_pid(doc1[doc1.pid_field])\n doc2 = Document.get_record_by_pid(doc2[doc2.pid_field])\n doc3 = Document.get_record_by_pid(doc3[doc3.pid_field])\n assert doc1[\"related_records\"] == [\n record_to_relation_dump(doc2, RelatedRecords.language_relation()),\n record_to_relation_dump(doc3, RelatedRecords.language_relation())\n ]\n assert doc2[\"related_records\"] == [\n record_to_relation_dump(doc1, RelatedRecords.language_relation()),\n record_to_relation_dump(doc3, RelatedRecords.language_relation())\n ]\n assert doc3[\"related_records\"] == [\n record_to_relation_dump(doc1, RelatedRecords.language_relation()),\n record_to_relation_dump(doc2, RelatedRecords.language_relation())\n ]\n\n doc1.related.remove_language(doc2)\n doc1.related.remove_language(doc3)\n doc1.commit()\n\n doc1 = Document.get_record_by_pid(doc1[doc1.pid_field])\n doc2 = Document.get_record_by_pid(doc2[doc2.pid_field])\n doc3 = Document.get_record_by_pid(doc3[doc3.pid_field])\n assert doc1[\"related_records\"] == []\n assert doc2[\"related_records\"] == []\n assert doc3[\"related_records\"] == []", "def test_compare_schemas_empty(self):\n status = schema_utils.compare_schemas(\n {},\n self.base_schema,\n )\n\n assert status == schema_utils.Update.first_run", "def merge_schema(self, schema):\n for _, attr_schema in schema.iter_attributes():\n self.merge_attribute_schema(attr_schema)", "def test_migrate_dataset(bf, dataset, organization, assert_in_neo4j):\n person = dataset.create_model(\n \"Person\",\n schema=[\n ModelProperty(\"name\", data_type=str, title=True, required=True),\n ModelProperty(\"age\", data_type=int),\n ModelProperty(\"dob\", data_type=datetime),\n ],\n )\n\n likes = dataset.create_relationship_type(\n \"likes\", \"\", source=person.id, destination=person.id\n )\n\n alice = person.create_record(\n {\"name\": \"Alice\", \"age\": 25, \"dob\": datetime(1994, 11, 19)}\n )\n\n bob = person.create_record(\n {\"name\": \"Bob\", \"age\": 24, \"dob\": datetime(1995, 11, 19)}\n )\n\n likes.relate(alice, bob)\n\n migrate_dataset(\n organization_id=organization.int_id,\n # organization_node_id=organization.id,\n dataset_ids=[dataset.int_id]\n # dataset_node_id=dataset.id,\n )\n\n assert_in_neo4j()\n\n assert len(dataset.models()) == 1\n person = dataset.get_model(\"Person\")\n assert person.get_all() == [alice, bob]\n\n assert len(dataset.relationships()) == 1\n assert alice.get_related(person.type) == [bob]", "def test_relations_dumper(testapp, db, example_data):\n\n class RecordWithRelations(Record):\n relations = RelationsField(\n language=PKRelation(\n key=\"language\", keys=[\"iso\", \"information.ethnicity\"], record_cls=Record\n ),\n languages=PKListRelation(\n key=\"languages\",\n keys=[\"iso\", \"information.ethnicity\"],\n record_cls=Record,\n ),\n )\n\n dumper = SearchDumper(extensions=[RelationDumperExt(\"relations\")])\n\n # Create the record\n en_language = Record.create(\n {\n \"title\": \"English\",\n \"iso\": \"en\",\n \"information\": {\"native_speakers\": \"400 million\", \"ethnicity\": \"English\"},\n }\n )\n fr_language = Record.create(\n {\n \"title\": \"French\",\n \"iso\": \"fr\",\n \"information\": {\"native_speakers\": \"76.8 million\", \"ethnicity\": \"French\"},\n }\n )\n db.session.commit()\n record = RecordWithRelations.create(\n {\n \"foo\": \"bar\",\n \"mylist\": [\"a\", \"b\"],\n }\n )\n record.relations.language = en_language\n record.relations.languages = [en_language, fr_language]\n db.session.commit()\n\n # Dump it\n dump = record.dumps()\n assert dump[\"foo\"] == \"bar\"\n assert dump[\"mylist\"] == [\"a\", \"b\"]\n assert dump[\"language\"] == {\n \"id\": str(en_language.id),\n \"iso\": \"en\",\n \"information\": {\"ethnicity\": \"English\"},\n \"@v\": str(en_language.id) + \"::\" + str(en_language.revision_id),\n }\n assert dump[\"languages\"] == [\n {\n \"id\": str(en_language.id),\n \"iso\": \"en\",\n \"information\": {\"ethnicity\": \"English\"},\n \"@v\": str(en_language.id) + \"::\" + str(en_language.revision_id),\n },\n {\n \"id\": str(fr_language.id),\n \"iso\": \"fr\",\n \"information\": {\"ethnicity\": \"French\"},\n \"@v\": str(fr_language.id) + \"::\" + str(fr_language.revision_id),\n },\n ]\n assert dump[\"uuid\"] == str(record.id)\n assert dump[\"version_id\"] == record.revision_id + 1\n assert dump[\"created\"][:19] == record.created.isoformat()[:19]\n assert dump[\"updated\"][:19] == record.updated.isoformat()[:19]\n\n # TODO: Implement loader\n # Load it\n # new_record = Record.loads(dump, loader=dumper)\n # assert 'count' not in new_record", "def test_add_relation_types(self):\n pass", "def test_build_schema_badschema(self):\n dummy_meta = {\n 'schema': '',\n 'version': '1.0.0',\n 'update': datetime.datetime.utcnow().isoformat(),\n }\n\n with pytest.raises(jsonschema.exceptions.ValidationError):\n metadata = schema_utils.build_metadata(\n self.dummy_schema,\n dummy_meta,\n schema_utils.Update.first_run\n )", "def test_merge_extra_metadata(cidc_api, clean_db, monkeypatch):\n user_id = setup_trial_and_user(cidc_api, monkeypatch)\n with cidc_api.app_context():\n user = Users.find_by_id(user_id)\n make_cimac_biofx_user(user_id, cidc_api)\n\n with cidc_api.app_context():\n assay_upload = UploadJobs.create(\n upload_type=\"assay_with_extra_md\",\n uploader_email=user.email,\n gcs_file_map={},\n metadata={\n PROTOCOL_ID_FIELD_NAME: trial_id,\n \"whatever\": {\n \"hierarchy\": [\n {\"we just need a\": \"uuid-1\", \"to be able\": \"to merge\"},\n {\"and\": \"uuid-2\"},\n ]\n },\n },\n gcs_xlsx_uri=\"\",\n commit=False,\n )\n assay_upload.id = 137\n assay_upload.insert()\n\n custom_extra_md_parse = MagicMock()\n custom_extra_md_parse.side_effect = lambda f: {\"extra_md\": f.read().decode()}\n monkeypatch.setattr(\n \"cidc_schemas.prism.merger.EXTRA_METADATA_PARSERS\",\n {\"assay_with_extra_md\": custom_extra_md_parse},\n )\n\n form_data = {\n \"job_id\": 137,\n \"uuid-1\": (io.BytesIO(b\"fake file 1\"), \"fname1\"),\n \"uuid-2\": (io.BytesIO(b\"fake file 2\"), \"fname2\"),\n }\n\n client = cidc_api.test_client()\n res = client.post(\"/ingestion/extra-assay-metadata\", data=form_data)\n assert res.status_code == 200\n assert custom_extra_md_parse.call_count == 2\n\n fetched_jobs = UploadJobs.list()\n assert 1 == len(fetched_jobs)\n au = fetched_jobs[0]\n assert \"extra_md\" in au.metadata_patch[\"whatever\"][\"hierarchy\"][0]\n assert \"extra_md\" in au.metadata_patch[\"whatever\"][\"hierarchy\"][1]", "def test_MergeManifests_comments():\n d1 = dpack_pb2.DataPackage()\n f1 = d1.file.add()\n f1.relative_path = \"a\"\n d2 = dpack_pb2.DataPackage()\n d2.comment = \"abc\"\n f2 = d2.file.add()\n f2.comment = \"def\"\n f2.relative_path = \"a\"\n dpack.MergeManifests(d1, d2)\n assert d1.comment == d2.comment\n assert d1.file[0].comment == d2.file[0].comment", "def testEquivalenceAfterRoundTrip(self):\n correct = 0\n for example in VALID_EXAMPLES:\n original_schema = schema.Parse(example.schema_string)\n round_trip_schema = schema.Parse(str(original_schema))\n if original_schema == round_trip_schema:\n correct += 1\n debug_msg = \"%s: ROUND TRIP SUCCESS\" % example.name\n else:\n debug_msg = \"%s: ROUND TRIP FAILURE\" % example.name\n self.fail(\n \"Round trip failure: %s, %s, %s\"\n % (example.name, original_schema, str(original_schema)))\n\n fail_msg = \"Round trip success on %d out of %d schemas\" % \\\n (correct, len(VALID_EXAMPLES))\n self.assertEqual(correct, len(VALID_EXAMPLES), fail_msg)", "def _assert_schemas_equal(self, schema1, schema2):\n self.assertEqual(\n set(schema1._base_types.iterkeys()),\n set(schema2._base_types.iterkeys()))\n for name, type1 in schema1._base_types.iteritems():\n type2 = schema2._base_types[name]\n if isinstance(type1, GraphQlObjectType):\n self.assertIsInstance(type2, GraphQlObjectType)\n self._assert_object_types_equal(type1, type2)\n elif isinstance(type1, GraphQlInterfaceType):\n self.assertIsInstance(type2, GraphQlInterfaceType)\n self._assert_interface_types_equal(type1, type2)\n elif isinstance(type1, GraphQlUnionType):\n self.assertIsInstance(type2, GraphQlUnionType)\n self._assert_union_types_equal(type1, type2)\n elif isinstance(type1, GraphQlScalarType):\n self.assertIsInstance(type2, GraphQlScalarType)\n self._assert_scalar_types_equal(type1, type2)\n elif isinstance(type1, GraphQlInputObjectType):\n self.assertIsInstance(type2, GraphQlInputObjectType)\n self._assert_input_object_types_equal(type1, type2)\n elif isinstance(type1, GraphQlEnumType):\n self.assertIsInstance(type2, GraphQlEnumType)\n self._assert_enum_types_equal(type1, type2)\n else:\n raise RuntimeError(\n 'Unhandled GraphQlType subclass {:s}'.format(\n type1.__class__.__name__))", "def test_schema_validation():\n resolver = RefResolver.from_schema(j, store={\"definitions\": j})\n schema_definitions = j[\"definitions\"]\n validation_models = root_dir / \"validation\" / \"models.yaml\"\n validation_tests = yaml.load(open(validation_models), Loader=yaml.SafeLoader)\n for cls, tests in validation_tests.items():\n for t in tests:\n validate(instance=t[\"in\"],\n schema=schema_definitions[cls],\n resolver=resolver)", "def test_merge_org_2(self):\r\n result = merge('ot_same', \"Organization\")\r\n self.assertEqual(result,\r\n {'Related People': '<ul></ul>',\r\n 'Kind': u'testOrgType',\r\n 'Alternate Names': u'testOrganization',\r\n 'Related Crises': '<ul></ul>',\r\n 'Maps': '<ul class=\"unstyled\"><li><iframe width=\"425\" height=\"350\" frameborder=\"0\" scrolling=\"no\" marginheight=\"0\" marginwidth=\"0\" src=\"https://maps.google.com/maps?q=testCity, testState, testCountry&amp;output=embed\"></iframe></li><br /></ul>',\r\n 'Citations': '<ul></ul>',\r\n 'External Links': '<ul></ul>',\r\n 'Location': 'testCity, testState, testCountry<br />',\r\n 'Social': '<ul class=\"unstyled\"></ul>',\r\n 'Images': '<ul class=\"thumbnails\"></ul>',\r\n 'Name': u'testOrganization',\r\n 'Description': u'testOrg Description'}\r\n )", "def test_build_schema_no_update(self):\n metadata = schema_utils.build_metadata(\n self.dummy_schema,\n self.fake_metadata,\n schema_utils.Update.no_update,\n )\n assert metadata == self.fake_metadata", "def test_merge_org_1(self):\r\n result = merge('ot_id', \"Organization\")\r\n self.assertEqual(result,\r\n {'Related People': '<ul></ul>',\r\n 'Kind': u'testOrgType, another type',\r\n 'Alternate Names': u'a different Org',\r\n 'Related Crises': '<ul></ul>',\r\n 'Maps': '<ul class=\"unstyled\"><li><iframe width=\"425\" height=\"350\" frameborder=\"0\" scrolling=\"no\" marginheight=\"0\" marginwidth=\"0\" src=\"https://maps.google.com/maps?q=testCity, testState, testCountry&amp;output=embed\"></iframe></li><br /><li><iframe width=\"425\" height=\"350\" frameborder=\"0\" scrolling=\"no\" marginheight=\"0\" marginwidth=\"0\" src=\"https://maps.google.com/maps?q=Austin, TX, Uganda&amp;output=embed\"></iframe></li><br /></ul>',\r\n 'Citations': '<ul></ul>',\r\n 'External Links': '<ul></ul>',\r\n 'Location': 'testCity, testState, testCountry<br />\\nAustin, TX, Uganda<br />',\r\n 'Social': '<ul class=\"unstyled\"></ul>',\r\n 'Images': '<ul class=\"thumbnails\"></ul>',\r\n 'Name': u'testOrganization',\r\n 'Description': u'testOrg Description<p /><p />Add to description'}\r\n )", "def test_schema(self):\n\n # schema for what the 'some_service' configuration\n # ought to look like.\n schema = Schema({\n 'some_service': {\n 'host': basestring,\n 'port': Coerce(int),\n Required(\n 'pool_size', default=5):\n All(Coerce(int), Range(min=1, max=20)),\n 'credentials': {\n 'username': basestring,\n 'password': basestring\n }\n }\n })\n\n cd = ConfigDict()\n cd.register_trigger(\n SchemaTrigger(schema)\n )\n\n cd.merge_dict({\n 'some_service': {\n 'host': 'xyz',\n 'port': 123,\n 'credentials': {'username': 'foo', 'password': 'bar'}\n }\n })\n cd.configure()\n\n self.assertEquals(cd.some_service.host, 'xyz')\n self.assertEquals(cd.some_service.port, 123)\n self.assertEquals(cd.some_service.pool_size, 5)\n self.assertEquals(cd.some_service.credentials.username, 'foo')\n self.assertEquals(cd.some_service.credentials.password, 'bar')\n\n # integer coersion should take care of '123' instead of 123\n cd = ConfigDict()\n cd.register_trigger(\n SchemaTrigger(schema)\n )\n cd.merge_dict({\n 'some_service': {\n 'host': 'xyz',\n 'port': '123',\n 'credentials': {'username': 'foo', 'password': 'bar'}\n }\n })\n cd.configure()\n\n self.assertEquals(cd.some_service.host, 'xyz')\n self.assertEquals(cd.some_service.port, 123)\n self.assertEquals(cd.some_service.pool_size, 5)\n self.assertEquals(cd.some_service.credentials.username, 'foo')\n self.assertEquals(cd.some_service.credentials.password, 'bar')\n\n cd = ConfigDict()\n cd.register_trigger(\n SchemaTrigger(schema)\n )\n cd.merge_dict({\n 'some_service': {\n 'host': 'xyz',\n 'port': 123,\n 'pool_size': 21,\n 'credentials': {'username': 'foo', 'password': 'bar'}\n }\n })\n\n # not valid -- pool_size out of range\n with self.assertRaises(MultipleInvalid):\n cd.configure()", "def test_MergeManifests_missing_files():\n d1 = dpack_pb2.DataPackage()\n f1 = d1.file.add()\n f1.relative_path = \"a\"\n f1.comment = \"abc\"\n d2 = dpack_pb2.DataPackage()\n f2 = d2.file.add()\n f2.relative_path = \"b\"\n f2.comment = \"def\"\n dpack.MergeManifests(d1, d2)\n assert d1.file[0].comment == \"abc\"\n assert d2.file[0].comment == \"def\"", "def test_ref_merge() -> None:\n soup = generate_case(\"ref_merge\")\n\n tests.html_schema_doc_asserts.assert_descriptions(soup, [\"This is the description from the definition\"])\n tests.html_schema_doc_asserts.assert_types(soup, [\"object\", \"enum (of string)\", \"object\", \"string\", \"string\"])\n tests.html_schema_doc_asserts.assert_property_names(soup, [\"aProperty\", \"aDictPropertyARequired\", \"a\", \"b\"])\n tests.html_schema_doc_asserts.assert_default_values(soup, ['\"Default from property\"', '{\"a\": \"a\", \"b\": \"b\"}'])\n tests.html_schema_doc_asserts.assert_enum_values(soup, [['\"value1\"', '\"value2\"']])\n # a and b are required from the definition\n tests.html_schema_doc_asserts.assert_required(soup, [False, False, True, True])", "def test_bert_embedder(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"embedder\"},\n \"params\": {\"embedder_type\": \"bert\"},\n }\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"bert-base-cased\",\n \"add_terminals\": True\n }\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n new_config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"mean\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**new_config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n \"\"\" test for different pretrained transformers\"\"\"\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"distilbert-base-uncased\",\n }\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"albert-base-v2\",\n }\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"sentence-transformers/all-mpnet-base-v2\",\n }\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"roberta-base\",\n }\n }\n with pytest.raises(NotImplementedError):\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)" ]
[ "0.677895", "0.63466245", "0.6154003", "0.6025654", "0.6007597", "0.5987249", "0.5959084", "0.58845913", "0.58188117", "0.5803733", "0.5755856", "0.575136", "0.5711838", "0.568449", "0.5677796", "0.5663447", "0.5576394", "0.5543358", "0.5517996", "0.55151016", "0.5514564", "0.5495061", "0.5478623", "0.5475533", "0.5468228", "0.5466598", "0.5459635", "0.54589623", "0.5455608", "0.5455308" ]
0.745198
0
Use EmbeddingTest schema to ensure unmappable properties under patternProperties and additionalProperties are not mapped.
def test_create_mapping_drops_unmappable_properties(registry): test_item_type = "embedding_test" expected_embeds = ["pattern_property_embed", "additional_property_embed"] expected_mapped_property = "should_be_mapped" mapping = type_mapping(registry[TYPES], test_item_type) mapped_properties = mapping.get("properties") assert mapped_properties for expected_embed in expected_embeds: mapped_embed = mapped_properties.get(expected_embed) assert mapped_embed mapped_embed_properties = mapped_embed.get("properties", {}) assert len(mapped_embed_properties.keys()) == 1 assert mapped_embed_properties.get(expected_mapped_property)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_schema_strict():\n path = os.path.join(extensiondir, 'release-schema.json')\n if os.path.isfile(path):\n with open(path) as f:\n data = json.load(f)\n\n original = deepcopy(data)\n add_validation_properties(data)\n\n assert data == original, f'{path} is missing validation properties, run: ocdskit schema-strict {path}'", "def test_deserialize_with_additional_properties(self):\n\n # Dog is allOf with two child schemas.\n # The OAS document for Dog does not specify the 'additionalProperties' keyword,\n # which means that by default, the Dog schema must allow undeclared properties.\n # The additionalProperties keyword is used to control the handling of extra stuff,\n # that is, properties whose names are not listed in the properties keyword.\n # By default any additional properties are allowed.\n from petstore_api.model import dog, mammal, zebra, banana_req\n data = {\n 'className': 'Dog',\n 'color': 'brown',\n 'breed': 'golden retriever',\n # Below are additional, undeclared properties.\n 'group': 'Terrier Group',\n 'size': 'medium',\n }\n response = self.__response(data)\n _response_for_200 = api_client.OpenApiResponse(\n content={\n self.json_content_type: api_client.MediaType(schema=dog.Dog),\n },\n )\n deserialized = _response_for_200.deserialize(response, self.configuration)\n body = deserialized.body\n self.assertTrue(isinstance(body, dog.Dog))\n self.assertEqual(body['className'], 'Dog')\n self.assertEqual(body['color'], 'brown')\n self.assertEqual(body['breed'], 'golden retriever')\n self.assertEqual(body['group'], 'Terrier Group')\n self.assertEqual(body['size'], 'medium')\n\n # The 'zebra' schema allows additional properties by explicitly setting\n # additionalProperties: true.\n # This is equivalent to 'additionalProperties' not being present.\n data = {\n 'className': 'zebra',\n 'type': 'plains',\n # Below are additional, undeclared properties\n 'group': 'abc',\n 'size': 3,\n 'p1': True,\n 'p2': ['a', 'b', 123],\n }\n response = self.__response(data)\n _response_for_200 = api_client.OpenApiResponse(\n content={\n self.json_content_type: api_client.MediaType(schema=mammal.Mammal),\n },\n )\n deserialized = _response_for_200.deserialize(response, self.configuration)\n body = deserialized.body\n self.assertTrue(isinstance(body, zebra.Zebra))\n self.assertEqual(body['className'], 'zebra')\n self.assertEqual(body['type'], 'plains')\n self.assertEqual(bool(body['p1']), True)\n\n # The 'bananaReq' schema disallows additional properties by explicitly setting\n # additionalProperties: false\n _response_for_200 = api_client.OpenApiResponse(\n content={\n self.json_content_type: api_client.MediaType(schema=banana_req.BananaReq),\n },\n )\n with self.assertRaisesRegex(\n petstore_api.exceptions.ApiTypeError,\n r\"BananaReq was passed 1 invalid argument: \\['unknown-group'\\]\"\n ):\n data = {\n 'lengthCm': 21.2,\n 'sweet': False,\n # Below are additional, undeclared properties. They are not allowed,\n # an exception must be raised.\n 'unknown-group': 'abc',\n }\n response = self.__response(data)\n _response_for_200.deserialize(response, self.configuration)", "def test_create_mapping_correctly_maps_embeds(registry, item_type):\n mapping = type_mapping(registry[TYPES], item_type)\n assert mapping\n type_info = registry[TYPES].by_item_type[item_type]\n schema = type_info.schema\n embeds = add_default_embeds(item_type, registry[TYPES], type_info.embedded_list, schema)\n # assert that all embeds exist in mapping for the given type\n for embed in embeds:\n mapping_pointer = mapping\n split_embed = embed.split('.')\n for idx, split_ in enumerate(split_embed):\n # see if this is last level of embedding- may be a field or object\n if idx == len(split_embed) - 1:\n if 'properties' in mapping_pointer and split_ in mapping_pointer['properties']:\n final_mapping = mapping_pointer['properties']\n else:\n final_mapping = mapping_pointer\n if split_ != '*':\n assert split_ in final_mapping\n else:\n assert 'properties' in final_mapping or final_mapping.get('type') == 'object'\n else:\n assert split_ in mapping_pointer['properties']\n mapping_pointer = mapping_pointer['properties'][split_]", "def test_marshmallow_base_schema_remove_missing(self, base_schema):\n # Typically, we'll use it in all our schemas, so let's define base\n # Document and EmbeddedDocument classes using this base schema class\n @self.instance.register\n class MyDocument(Document):\n MA_BASE_SCHEMA_CLS = base_schema\n\n class Meta:\n abstract = True\n\n @self.instance.register\n class MyEmbeddedDocument(EmbeddedDocument):\n MA_BASE_SCHEMA_CLS = base_schema\n\n class Meta:\n abstract = True\n\n @self.instance.register\n class Accessory(MyEmbeddedDocument):\n brief = fields.StrField()\n value = fields.IntField()\n\n @self.instance.register\n class Bag(MyDocument):\n item = fields.EmbeddedField(Accessory)\n content = fields.ListField(fields.EmbeddedField(Accessory))\n\n data = {\n 'item': {'brief': 'sportbag'},\n 'content': [\n {'brief': 'cellphone'},\n {'brief': 'lighter'}]\n }\n dump = {\n 'id': None,\n 'content': [\n {'brief': 'cellphone', 'value': None},\n {'brief': 'lighter', 'value': None}\n ],\n 'item': {'brief': 'sportbag', 'value': None}\n }\n remove_missing_dump = {\n 'item': {'brief': 'sportbag'},\n 'content': [\n {'brief': 'cellphone'},\n {'brief': 'lighter'}\n ]\n }\n expected_dump = {\n BaseMarshmallowSchema: remove_missing_dump,\n ma.Schema: dump,\n }[base_schema]\n\n bag = Bag(**data)\n ma_schema = Bag.schema.as_marshmallow_schema()\n assert ma_schema().dump(bag) == expected_dump", "def test_base_schema_ignores_unknown_fields():\n assert BaseSchema().load({\"unknown\": \"field\"}) == {}", "def test_test_inline_additional_properties(self):\n pass", "def test_not_keyword_extra_fields_handling():\n\n class RequiredItem(ce.ExtendedMappingSchema):\n item = ce.ExtendedSchemaNode(colander.String())\n\n class MappingWithType(ce.ExtendedMappingSchema):\n type = ce.ExtendedSchemaNode(colander.String())\n\n class MappingWithoutType(ce.NotKeywordSchema, RequiredItem):\n _not = [MappingWithType()]\n\n class MappingOnlyNotType(ce.NotKeywordSchema):\n _not = [MappingWithType()]\n\n value = {\"type\": \"invalid\", \"item\": \"valid\"}\n node_name = ce._get_node_name(MappingWithoutType)\n try:\n result = MappingWithoutType().deserialize(value)\n except colander.Invalid:\n pass\n except Exception:\n raise AssertionError(\"Incorrect exception raised from deserialize \"\n f\"of '{node_name!s}' with {value!s}\")\n else:\n raise AssertionError(\"Should have raised invalid schema from deserialize \"\n f\"of '{node_name!s}' with {value!s}, but got {result!s}\")\n\n test_cases = [\n (MappingWithoutType, {\"item\": \"valid\", \"value\": \"ignore\"}, {\"item\": \"valid\"}),\n (MappingOnlyNotType, {\"item\": \"valid\", \"value\": \"ignore\"}, {})\n ]\n evaluate_test_cases(test_cases)", "def test_custom_schema():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n assert_that(codec.schema, is_(instance_of(DerivedSchema)))", "def find_additional_properties(instance, schema):\r\n\r\n properties = schema.get(\"properties\", {})\r\n patterns = \"|\".join(schema.get(\"patternProperties\", {}))\r\n for property in instance:\r\n if property not in properties:\r\n if patterns and re.search(patterns, property):\r\n continue\r\n yield property", "def test_merge_schemas(registry):\n test_schema = registry[TYPES][unit_test_type].schema\n test_subschema = test_schema['properties']['attachment']\n res = merge_schemas(test_subschema, registry[TYPES])\n assert res\n assert res != test_subschema\n assert res['properties']['attachment']['attachment'] is True", "def test_custom_metadata_schema(self):\n # The use-case for this functionality is to allow using\n # Foreign Data Wrappers, each with a full set of Django\n # tables, to copy between databases using SQLAlchemy\n # and the automatically generation of aldjemy.\n metadata = MetaData(schema=\"arbitrary\")\n sa_models = construct_models(metadata)\n self.assertEqual(sa_models[Log].__table__.schema, \"arbitrary\")", "def setUp(self):\n self.definition = {\n 'required': ['data'],\n 'type': 'object',\n 'properties': {\n 'notifications': {\n 'items': {'type': 'string'},\n 'type': 'array'\n },\n 'data': {\n 'type': 'array',\n 'items': {\n 'required': ['email', 'display_name'],\n 'type': 'object',\n 'properties': {\n 'phone_number': {'type': 'string'},\n 'display_name': {'type': 'string'},\n 'email': {'type': 'string'},\n 'first_name': {'type': 'string'},\n 'last_name': {'type': 'string'},\n 'admin': {'type': 'string'},\n 'vehicles': {\n 'type': 'array',\n 'items': {\n 'required': ['vehicle_info_id', 'is_default'],\n 'type': 'object',\n 'example': {\n 'is_default': True,\n 'make_pretty': 'BMW',\n 'vehicle_info_id': 1234,\n 'year': 2011,\n 'make': 'bmw',\n 'model': '1-series-m',\n 'model_pretty': '1 Series M'\n },\n 'properties': {\n 'is_default': {'type': 'boolean'},\n 'make_pretty': {'type': 'string'},\n 'vehicle_info_id': {'type': 'integer'},\n 'year': {'type': 'string'},\n 'make': {'type': 'string'},\n 'model': {'type': 'string'},\n 'model_pretty': {'type': 'string'}\n }\n }\n }\n }\n }\n }\n }\n }\n\n self.actual = {\n u'notifications': [],\n u'data':\n [\n {\n u'phone_number': None,\n u'first_name': u'Lawrence',\n u'last_name': u'Kiss',\n u'display_name': u'Lawrence Kiss',\n u'vehicles': [\n {\n u'is_default': True,\n u'make_pretty': u'BMW',\n u'vehicle_info_id': 1234,\n u'year': u'2016',\n u'make': u'bmw',\n u'model': u'1-series-m',\n u'model-pretty': u'1 Series M'\n }\n ],\n u'email': u'[email protected]',\n },\n ]\n }", "def test_build_schema_badschema(self):\n dummy_meta = {\n 'schema': '',\n 'version': '1.0.0',\n 'update': datetime.datetime.utcnow().isoformat(),\n }\n\n with pytest.raises(jsonschema.exceptions.ValidationError):\n metadata = schema_utils.build_metadata(\n self.dummy_schema,\n dummy_meta,\n schema_utils.Update.first_run\n )", "def test_meta_data_is_not_inherited(self):", "def test_attribute_missing_validation():\n\n @attr.s\n class Foo(object):\n something = attr.ib()\n\n with pytest.raises(UnextractableSchema):\n extract_jsonschema(Foo)", "def test_propconflict(self):\n mapper(Address, addresses)\n mapper(User, users,\n properties={\n 'addresses':relation(Address, backref='email_address')\n })\n self.assertRaises(exceptions.ArgumentError, compile_mappers)", "def _patch_schema(self):\n fields = get_json()['data']['attributes'].keys()\n return make_entity_schema(\n self.SCHEMA, self.RESOURCE_NAME,\n make_data_schema(\n self.SCHEMA, id_required=True,\n only=fields, partial=True\n )\n )", "def test_non_attrs_object():\n class Foo(object):\n def __init__(self, x):\n self.x = x\n\n with pytest.raises(UnextractableSchema):\n extract_jsonschema(Foo)", "def test_001_validate_with_bad_properties(self):\n m = schematics_flexible.BaseFlexible(\n {'code': '06',\n 'properties': {\"a\": \"this is test\"}},\n store_handler=get_mock())\n try:\n m.validate()\n except schematicsValidationError:\n pass\n else:\n self.assertTrue(False,\n 'Model must raise exception when validate raise')", "def test_missing_mandatory_attributes():\n model_definition = {'source': {'type': 'list',\n 'required': True,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': True,\n 'persisted': True}}\n # missing language in the model\n _ = ProductModelFactory(model_definition)", "def test_schema_default_missing_validator_openapi():\n converter = ce.ObjectTypeConverter(ce.OAS3TypeConversionDispatcher())\n test_schemas = [\n Mapping,\n Missing,\n Default,\n Validator,\n DefaultMissing,\n DefaultValidator,\n MissingValidator,\n DefaultMissingValidator,\n DefaultDropValidator,\n DefaultDropRequired,\n ]\n for schema in test_schemas:\n converted = converter.convert_type(schema())\n assert converted == schema.schema_expected, f\"Schema for [{schema.__name__}] not as expected\"", "def __init__(self):\n super(ObjectSchema, self).__init__()\n self.is_allow_undefined = False", "def test_model(base, fake_session):\n\n # Make a dummy model\n\n # these fields should be ignored and should not appear in the model\n ignored = (\"field1\", \"field2\", \"field3\")\n\n # these fields are in the model, but should not get dumped to json\n loadonly = (\"field6\", \"field7\")\n\n @add_schema\n class MyModel(base):\n fields = dict(ignore=ignored, load_only=loadonly)\n\n # load the model from dummy data\n values = range(10)\n keys = [\"field{}\".format(x) for x in values]\n data = dict(zip(keys, values))\n m = MyModel.load_from(data, fake_session)\n\n return m, ignored, loadonly, data, MyModel", "def test_schema_validation():\n resolver = RefResolver.from_schema(j, store={\"definitions\": j})\n schema_definitions = j[\"definitions\"]\n validation_models = root_dir / \"validation\" / \"models.yaml\"\n validation_tests = yaml.load(open(validation_models), Loader=yaml.SafeLoader)\n for cls, tests in validation_tests.items():\n for t in tests:\n validate(instance=t[\"in\"],\n schema=schema_definitions[cls],\n resolver=resolver)", "def test_unknown_fields_are_not_allowed() -> None:\n with pytest.raises(pydantic.ValidationError):\n r4.Meta(unknown_field=True)", "def SchemaRefInProperties(self):\n api = self.ApiFromDiscoveryDoc(self.__TEST_DISCOVERY_DOC)\n activity_schema = api._schemas['Activity']\n for prop in activity_schema.values['properties']:\n if prop.values['wireName'] == 'object':\n self.assertTrue(prop.object_type)\n self.assertEquals('ActivityObject',\n prop.object_type.values['className'])", "def test_extra_with_required():\n schema = Schema({Required('toaster'): str, Extra: object})\n r = schema({'toaster': 'blue', 'another_valid_key': 'another_valid_value'})\n assert_equal(\n r, {'toaster': 'blue', 'another_valid_key': 'another_valid_value'})", "def testLazySchemaForCreation(self):\n api = self.ApiFromDiscoveryDoc(self.__TEST_DISCOVERY_DOC)\n for schema in ['Activity', 'Comment', 'ActivityObject']:\n self.assertTrue(isinstance(api._schemas[schema], Schema))", "def test_oneof_dropable():\n\n class AnyMap(ce.PermissiveMappingSchema):\n pass # any field is ok\n\n class OneOfStrMap(ce.OneOfKeywordSchema):\n _one_of = [\n ce.ExtendedSchemaNode(colander.String()), # note: 'allow_empty=False' by default\n AnyMap()\n ]\n\n schema = OneOfStrMap(missing=colander.drop)\n evaluate_test_cases([\n (schema, [], colander.drop), # not a string nor mapping, but don't raise since drop allowed\n (schema, \"ok\", \"ok\"),\n (schema, {}, {}), # since mapping is permissive, empty is valid\n (schema, {\"any\": 123}, {\"any\": 123}), # unknown field is also valid\n # since OneOf[str,map], it is not possible to combine them\n ])\n\n class Map1(ce.ExtendedMappingSchema):\n field1 = ce.ExtendedSchemaNode(colander.String())\n\n class Map2(ce.ExtendedMappingSchema):\n field2 = ce.ExtendedSchemaNode(colander.String())\n\n class OneOfTwoMap(ce.OneOfKeywordSchema):\n _one_of = [\n Map1(),\n Map2()\n ]\n\n schema = OneOfTwoMap(missing=colander.drop)\n evaluate_test_cases([\n (schema, [], colander.drop), # not mapping, but don't raise since drop allowed\n (schema, \"\", colander.drop), # not mapping, but don't raise since drop allowed\n (schema, {}, colander.drop), # mapping, but not respecting sub-fields, don't raise since drop allowed\n (schema, {\"field1\": 1}, colander.drop), # mapping with good field name, but wrong type, drop since allowed\n (schema, {\"field1\": \"1\", \"field2\": \"2\"}, colander.drop), # cannot have both, don't raise since drop allowed\n (schema, {\"field1\": \"1\"}, {\"field1\": \"1\"}),\n (schema, {\"field2\": \"2\"}, {\"field2\": \"2\"}),\n ])\n\n # validate that the same definition above behaves normally (raise Invalid) when not dropable\n schema = OneOfTwoMap()\n evaluate_test_cases([\n (schema, [], colander.Invalid), # not mapping\n (schema, \"\", colander.Invalid), # not mapping\n (schema, {}, colander.Invalid), # mapping, but not respecting sub-fields\n (schema, {\"field1\": 1}, colander.Invalid), # mapping with good field name, but wrong type\n (schema, {\"field1\": \"1\", \"field2\": \"2\"}, colander.Invalid), # cannot have both mappings at the same time\n (schema, {\"field1\": \"1\"}, {\"field1\": \"1\"}),\n (schema, {\"field2\": \"2\"}, {\"field2\": \"2\"}),\n ])", "def test_json(self):\n schema1 = GraphQlSchemaFactory.create_from_modules([\n 'graphql.executor.test.star_wars',\n 'graphql.scalar_descriptors.strict'])\n schema2 = GraphQlSchema.create_from_json(schema1.to_json())\n self._assert_schemas_equal(schema1, schema2)\n schema1 = GraphQlSchemaFactory.create_from_modules([\n 'graphql.executor.test.star_wars',\n 'graphql.executor.test.star_wars_extra',\n 'graphql.scalar_descriptors.strict'])\n schema2 = GraphQlSchema.create_from_json(schema1.to_json())\n self._assert_schemas_equal(schema1, schema2)" ]
[ "0.61446816", "0.61118954", "0.6097759", "0.60460526", "0.59127545", "0.5869367", "0.58246493", "0.5662752", "0.56013864", "0.5595379", "0.5581337", "0.55700654", "0.5553623", "0.5466026", "0.54529816", "0.54280853", "0.53772664", "0.53680813", "0.5362037", "0.53439116", "0.53331137", "0.5332325", "0.53230625", "0.53085667", "0.5290815", "0.5275622", "0.5274247", "0.5262119", "0.52500755", "0.5244798" ]
0.7337448
0
Test Elasticsearch items requiring an upgrade are identified for indexing.
def test_get_items_to_upgrade(mock_check_index, testapp, biosample): ignored(mock_check_index) # mocked for side effect app = testapp.app es = None # ES component mocked item_type = "testing_biosample_sno" biosample_uuid = biosample["uuid"] with mock.patch("snovault.elasticsearch.create_mapping.scan", new=mock_scan()): # Mocked scan found no uuids to upgrade to_upgrade = get_items_to_upgrade(app, es, item_type) assert not to_upgrade with mock.patch( "snovault.elasticsearch.create_mapping.scan", new=mock_scan(uuids=[biosample_uuid]) ): # Mocked scan found the biosample uuid to upgrade to_upgrade = get_items_to_upgrade(app, es, item_type) assert to_upgrade == {biosample_uuid}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upgrade_with_auto_upgrade_latest_engine_enabled():", "def test_index_availability(client):\n response = client.get('/')\n assert response.status_code == 200", "def test_recreate_index_that_exists(self):\n indices = self.elasticsearch_cls().indices\n indices.exists.return_value = True\n\n index_name = 'abcd'\n self.client._recreate_index(index_name)\n indices.delete.assert_called_once_with(index_name)\n indices.create.assert_called_once_with(index_name)", "def test_update_inventory(self):\n pass", "def test_indexable(self):\n # verify ----------------------\n try:\n self.collection[0]\n except TypeError:\n msg = \"'Collection' object does not support indexing\"\n self.fail(msg)\n except IndexError:\n pass", "def test_update_checklists_index_out_of_range(self):\r\n update_url = self.get_url(100)\r\n\r\n response = self.client.post(update_url)\r\n self.assertContains(response, 'Could not save checklist', status_code=400)", "def test_creating_index_type(self):", "def test_upgrade_non_vendor(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:100.0.0\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_queryset_update(self):\n car = Car.objects.all()\n car.index_to_appsearch(update_only=True)\n # Note that the app search chunk size is set to 5 in `tests.settings`\n # Therefore you should see 5 calls to cover 22 documents\n self.assertEqual(self.client_update.call_count, 5)", "def test_index(self):", "def test_do_upgrade(self):\n with self.with_config_update():\n result = self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0", "def test_model_object_update(self):\n car = Car.objects.first()\n car.index_to_appsearch(update_only=True)\n self.assertEqual(self.client_update.call_count, 1)", "def test_version_upgrade_persistent_add_index(self):\n db_file = self.mktemp()\n db = Database.TestDB(db_file, persistent=True)\n yield db.open()\n yield db.execute(\"INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)\", (\"FOO\", \"BAR\",))\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ((\"FOO\", \"BAR\"),))\n db.close()\n db = None\n\n db = Database.TestDBCreateIndexOnUpgrade(db_file, persistent=True)\n yield db.open()\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ((\"FOO\", \"BAR\"),))\n db.close()", "def setUp(self):\n body = {\n \"settings\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n }\n }\n self.assertTrue(self.es.create_index('contacts_esclient_test', body))\n self.assertFalse(self.es.create_index('contacts_esclient_test', body))\n\n self.assertTrue(self.es.create_index('contacts_esclient_test2', body))\n self.assertFalse(self.es.create_index('contacts_esclient_test2', body))\n\n\n \"\"\" Index some test data \"\"\"\n data = {\"name\": \"Joe Tester\",\"age\": 21, \"sex\": \"male\"}\n self.assertTrue(self.es.index(\"contacts_esclient_test\", \"person\", body=data,\n docid=1))\n data = {\"name\": \"Joe Schmoe\",\"age\": 17, \"sex\": \"male\"}\n self.assertTrue(self.es.index(\"contacts_esclient_test\", \"person\", body=data,\n docid=2))\n\n self.assertTrue(self.es.refresh('contacts_esclient_test'))", "def update_index(self, ref_gen):\n testing = True\n logging.warning('Updating index')\n es_insert.index(es, ref_gen, self.index_name, testing, action=\"update\")\n logging.warning('Finished updating')", "def test_legacy_items_at_day_1(manager):\n manager.update()\n compare_results_attrs(manager.items, fixtures.FIXTURES[1])", "def test_index(self):\n es = elasticsearch.ElasticSearch(server='8.8.8.8',\n user='alice',\n password='iLoveDogs',\n doc_type='someLogCategory')\n\n index = es.index\n expected = time.strftime('logs-%Y.%m.%d')\n\n self.assertEqual(index, expected)", "def test_export_index(self):", "def test_index_exists(mock_es_client, expected):\n index_name = 'test'\n\n connection = mock_es_client.return_value\n connection.indices.exists.return_value = expected\n\n assert elasticsearch.index_exists(index_name) == expected\n connection.indices.exists.assert_called_with(index_name)", "def test_update_checklists_index_ignored_on_get(self):\r\n update_url = self.get_url(1)\r\n\r\n returned_checklists = json.loads(self.client.get(update_url).content)\r\n for pay, resp in zip(self.get_persisted_checklists(), returned_checklists):\r\n self.compare_checklists(pay, resp)", "def test_works_index_name(self):\n assert \"test_index-v4\" == self.search.works_index_name(self._db)", "def __checkFeatureIndex__(self, index, indexes):\n if index is not False:\n indexes.append(index)", "def monkeypatch_es():\n if _monkeypatched_es:\n return\n\n if VERSION == (0, 4, 5):\n def normalize_bulk_return(fun):\n \"\"\"Set's \"ok\" based on \"status\" if \"status\" exists\"\"\"\n @wraps(fun)\n def _fixed_bulk(self, *args, **kwargs):\n def fix_item(item):\n if 'status' in item['index']:\n item['index']['ok'] = (\n 200 <= item['index']['status'] < 300)\n return item\n\n ret = fun(self, *args, **kwargs)\n if 'items' in ret:\n ret['items'] = [fix_item(item) for item in ret['items']]\n return ret\n return _fixed_bulk\n\n Elasticsearch.bulk = normalize_bulk_return(Elasticsearch.bulk)", "def _ensure_es_index(self, index):\n if not self.elasticsearch.indices.exists(index):\n try:\n self.elasticsearch.indices.create(index=index)\n except TransportError as error_msg:\n self.logger.error(str(error_msg.error))\n return False\n self.logger.info('Created Index: %s', index)\n\n return True", "def test_upgrade_to_same_version(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\"upgrade\", *self.LOCAL, self.ITEM_TYPE, str(self.ITEM_PUBLIC_ID)],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def supports_index_feature(attr_name):\n return supports_indexes and hasattr(_test_index, attr_name)", "def test_index(populate_malware, authenticated_client):\n names = [malware.name for malware in populate_malware]\n for name in names:\n query_json = {'name': name}\n rv = authenticated_client.post('/api/entities/filter/',\n data=json.dumps(query_json),\n content_type='application/json')\n response = json.loads(rv.data)\n for item in response:\n assert item['id'].startswith('malware--')\n assert len(item['labels']) >= 1", "def test_index_instructions(self):\n\n response = self.get_response('/')\n self.assertEqual(200, response.getcode())\n\n # We're just testing if the word \"add\" is present in the index\n self.assertIn(\"add\".encode(), response.read())", "def test_index_stats(self):\n #Create Index\n self.run_multi_operations(buckets = self.buckets,\n query_definitions = self.query_definitions,\n create_index = True, drop_index = False)\n #Check Index Stats\n self.sleep(30)\n index_map = self.get_index_stats()\n self.log.info(index_map)\n for query_definition in self.query_definitions:\n index_name = query_definition.index_name\n for bucket in self.buckets:\n bucket_name = bucket.name\n check_keys = ['items_count', 'total_scan_duration', 'num_docs_queued',\n 'num_requests', 'num_rows_returned', 'num_docs_queued',\n 'num_docs_pending','delete_bytes' ]\n map = self._create_stats_map(items_count=2016)\n self._verify_index_stats(index_map, index_name, bucket_name, map, check_keys)", "def _assert_indices_exist(self, catalog: CatalogName):\n es_client = ESClientFactory.get()\n service = IndexService()\n for index_name in service.index_names(catalog):\n self.assertTrue(es_client.indices.exists(index_name))" ]
[ "0.6379135", "0.6328735", "0.61283654", "0.6004192", "0.600253", "0.5985512", "0.59654146", "0.5963622", "0.5941494", "0.59300137", "0.59283274", "0.5898374", "0.5886221", "0.58828264", "0.58389443", "0.5829789", "0.5815221", "0.5757381", "0.57297903", "0.5676319", "0.56696576", "0.5639802", "0.5630957", "0.56175464", "0.56127065", "0.5596958", "0.5594599", "0.5591307", "0.5575109", "0.55622506" ]
0.6900662
0
shape_dim=3, pad=list[1, 2], mode='constant', len(pad)=2, data_format=NCL
def test_Pad3D1(): input_shape = (1, 2, 3) pad = [1, 2] mode = "constant" res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]] data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1 obj.base(res=res, padding=pad, mode=mode, data_format="NCL", data=data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_Pad3D4():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D16():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 1, 2, 3, 3, 3], [4, 4, 5, 6, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D_base():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D21():\n input_shape = (1, 2, 3)\n # pad = np.array([2, 1]).astype('int32')\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_pad1():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n # data_format = \"NCDHW\"\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value)", "def test_pad3():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_Pad3D17():\n input_shape = (1, 2, 3)\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D12():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D7():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D5():\n input_shape = (1, 1, 2, 3)\n # pad = np.array([1, 0, 1, 2]).astype('int32')\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D6():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 2, 1, 0]).astype('int32')\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D2():\n input_shape = (1, 1, 2, 3)\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_pad2():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (0, 1, 1, 1, 2, 0)\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [1.0, 2.0, 3.0, 0.0], [4.0, 5.0, 6.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_Pad3D9():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D19():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D23():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D11():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 1]).astype('int32')\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D10():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_pad():\n x = randtool(\"float\", -10, 10, [3, 2, 1, 2])\n pad = [1, 1, 2, 3]\n mode = \"constant\"\n value = 2.0\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.88523461, 1.99072967, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 4.45995261, 9.40579439, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 6.43138915, 0.55102135, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -3.37046541, -2.92035609, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.41939397, 1.11828761, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -6.68411074, -4.09524338, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_Pad3D8():\n input_shape = (1, 2, 3)\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D20():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D14():\n input_shape = (1, 1, 2, 2, 3)\n pad = [1, 1, 1, 0, 1, 0]\n mode = \"reflect\"\n res = [\n [\n [\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n [[5, 4, 5, 6, 5], [2, 1, 2, 3, 2], [5, 4, 5, 6, 5]],\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D13():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D26():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)", "def test_Pad3D3():\n input_shape = (1, 1, 2, 3, 2)\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D24():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D18():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")", "def test_Pad3D25():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [[1, 1, 2], [1, 1, 2], [3, 3, 4], [5, 5, 6]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D22():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")", "def test_Pad3D27():\n input_shape = (1, 1, 2, 2, 2)\n pad = 2\n mode = \"replicate\"\n res = [\n [\n [\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n ]\n ]\n ]\n\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)" ]
[ "0.7574175", "0.7328738", "0.7217285", "0.7185073", "0.7179193", "0.71389383", "0.7063445", "0.6920521", "0.6832594", "0.6778972", "0.6775516", "0.6772584", "0.6717206", "0.67117643", "0.6686753", "0.66300046", "0.660099", "0.6598098", "0.6565018", "0.6488363", "0.64845353", "0.6481821", "0.64799106", "0.64464635", "0.6444687", "0.64399886", "0.633987", "0.6325582", "0.63200593", "0.624173" ]
0.74304086
1
shape_dim=3, pad=tensor[1, 2], mode='constant', len(pad)=2, data_format=NCL
def test_Pad3D4(): input_shape = (1, 2, 3) # pad = np.array([1, 2]).astype('int32') pad = [1, 2] mode = "constant" res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]] data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1 obj.base(res=res, padding=pad, mode=mode, data_format="NCL", data=data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_Pad3D1():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D16():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 1, 2, 3, 3, 3], [4, 4, 5, 6, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D_base():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D21():\n input_shape = (1, 2, 3)\n # pad = np.array([2, 1]).astype('int32')\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D17():\n input_shape = (1, 2, 3)\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D12():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D5():\n input_shape = (1, 1, 2, 3)\n # pad = np.array([1, 0, 1, 2]).astype('int32')\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_pad3():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_pad1():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n # data_format = \"NCDHW\"\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value)", "def test_Pad3D2():\n input_shape = (1, 1, 2, 3)\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D7():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D6():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 2, 1, 0]).astype('int32')\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D9():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D10():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D11():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 1]).astype('int32')\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D23():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D19():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def _fixed_padding(inputs, kernel_size, *args, mode='CONSTANT', **kwargs):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if kwargs['data_format'] == 'NCHW':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end],\n [pad_beg, pad_end]],\n mode=mode)\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]], mode=mode)\n return padded_inputs", "def test_Pad3D13():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D8():\n input_shape = (1, 2, 3)\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D26():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)", "def test_Pad3D3():\n input_shape = (1, 1, 2, 3, 2)\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D20():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D22():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")", "def test_Pad3D14():\n input_shape = (1, 1, 2, 2, 3)\n pad = [1, 1, 1, 0, 1, 0]\n mode = \"reflect\"\n res = [\n [\n [\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n [[5, 4, 5, 6, 5], [2, 1, 2, 3, 2], [5, 4, 5, 6, 5]],\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs", "def test_pad2():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (0, 1, 1, 1, 2, 0)\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [1.0, 2.0, 3.0, 0.0], [4.0, 5.0, 6.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(tensor=inputs,\n paddings=[[0, 0], [0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(tensor=inputs,\n paddings=[[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n return padded_inputs", "def fixed_padding(inputs, kernel_size, data_format='channels_first'):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs", "def test_Pad3D18():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")" ]
[ "0.7340136", "0.7218572", "0.7137639", "0.7123048", "0.70074975", "0.6894596", "0.6894202", "0.68156576", "0.67800885", "0.6777805", "0.67192423", "0.6674043", "0.6633804", "0.6597761", "0.6552479", "0.6533389", "0.65330267", "0.64464813", "0.64454484", "0.64092356", "0.6408948", "0.6408177", "0.6396099", "0.636714", "0.6311219", "0.63078326", "0.6276361", "0.6248646", "0.6246608", "0.62453973" ]
0.7512845
0
shape_dim=3, pad=tensor[1, 0, 1, 2], mode='constant', len(pad)=4, data_format=NCHW
def test_Pad3D5(): input_shape = (1, 1, 2, 3) # pad = np.array([1, 0, 1, 2]).astype('int32') pad = [1, 0, 1, 2] mode = "constant" res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]] data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1 obj.run(res=res, padding=pad, mode=mode, data_format="NCHW", data=data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_Pad3D4():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D2():\n input_shape = (1, 1, 2, 3)\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D1():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D16():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 1, 2, 3, 3, 3], [4, 4, 5, 6, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D21():\n input_shape = (1, 2, 3)\n # pad = np.array([2, 1]).astype('int32')\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D20():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D6():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 2, 1, 0]).astype('int32')\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D_base():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D17():\n input_shape = (1, 2, 3)\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D22():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")", "def test_Pad3D10():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def _fixed_padding(inputs, kernel_size, *args, mode='CONSTANT', **kwargs):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if kwargs['data_format'] == 'NCHW':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end],\n [pad_beg, pad_end]],\n mode=mode)\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]], mode=mode)\n return padded_inputs", "def test_Pad3D18():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")", "def test_pad_8():\n paddle.disable_static()\n x = np.array([[[[1.0, 3.0], [-3.0, 1.0]]]])\n pad = [1, 1, 1, 2]\n mode = \"constant\"\n value = np.array(2.0)\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 1.0, 3.0, 2.0],\n [2.0, -3.0, 1.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ]\n ]\n ]\n )\n exp = paddle.nn.functional.pad(\n x=paddle.to_tensor(x), pad=pad, mode=mode, value=paddle.to_tensor(value), data_format=data_format\n )\n assert np.allclose(exp.numpy(), res)", "def test_pad3():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_Pad3D3():\n input_shape = (1, 1, 2, 3, 2)\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D23():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs", "def test_Pad3D12():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D13():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D26():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)", "def test_Pad3D24():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(tensor=inputs,\n paddings=[[0, 0], [0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(tensor=inputs,\n paddings=[[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n return padded_inputs", "def test_Pad3D19():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D7():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def fixed_padding(inputs, kernel_size, data_format='channels_first'):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs", "def test_pad1():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n # data_format = \"NCDHW\"\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value)", "def test_Pad3D25():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [[1, 1, 2], [1, 1, 2], [3, 3, 4], [5, 5, 6]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D8():\n input_shape = (1, 2, 3)\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_pad():\n x = randtool(\"float\", -10, 10, [3, 2, 1, 2])\n pad = [1, 1, 2, 3]\n mode = \"constant\"\n value = 2.0\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.88523461, 1.99072967, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 4.45995261, 9.40579439, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 6.43138915, 0.55102135, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -3.37046541, -2.92035609, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.41939397, 1.11828761, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -6.68411074, -4.09524338, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)" ]
[ "0.7555017", "0.7396482", "0.7254787", "0.721744", "0.71152276", "0.7049675", "0.7046378", "0.70024747", "0.700044", "0.6937854", "0.6908162", "0.68594635", "0.6833953", "0.6811814", "0.67920035", "0.6791336", "0.67853403", "0.6761495", "0.6758956", "0.6755592", "0.6754462", "0.6726379", "0.6721559", "0.6720484", "0.67171174", "0.67090034", "0.67088914", "0.6696633", "0.66935444", "0.6686173" ]
0.7529686
1
shape_dim=3, pad=list[1, 2], mode='reflect', len(pad)=2, data_format=NCL must set left and right value < W, top and bottom < H.
def test_Pad3D7(): input_shape = (1, 2, 3) pad = [1, 2] mode = "reflect" res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]] data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1 obj.base(res=res, padding=pad, mode=mode, data_format="NCL", data=data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_Pad3D12():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D11():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 1]).astype('int32')\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D13():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D4():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D10():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D9():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D14():\n input_shape = (1, 1, 2, 2, 3)\n pad = [1, 1, 1, 0, 1, 0]\n mode = \"reflect\"\n res = [\n [\n [\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n [[5, 4, 5, 6, 5], [2, 1, 2, 3, 2], [5, 4, 5, 6, 5]],\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D8():\n input_shape = (1, 2, 3)\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D16():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 1, 2, 3, 3, 3], [4, 4, 5, 6, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D1():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D15():\n input_shape = (1, 2, 2, 2, 2)\n pad = [1, 1, 1, 0, 1, 0]\n mode = \"reflect\"\n res = [\n [\n [\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n [[11, 12], [9, 10], [11, 12], [9, 10]],\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n ],\n [[[7, 8], [5, 6], [7, 8], [5, 6]], [[3, 4], [1, 2], [3, 4], [1, 2]], [[7, 8], [5, 6], [7, 8], [5, 6]]],\n [\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n [[11, 12], [9, 10], [11, 12], [9, 10]],\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)", "def test_Pad3D_base():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D21():\n input_shape = (1, 2, 3)\n # pad = np.array([2, 1]).astype('int32')\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_pad7():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (2, 2, 1, 1, 0, 0)\n mode = \"reflect\"\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [\n [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0],\n [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0],\n [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0],\n [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0],\n ]\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, data_format=data_format)", "def test_Pad3D17():\n input_shape = (1, 2, 3)\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_pad3():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_pad1():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n # data_format = \"NCDHW\"\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value)", "def test_Pad3D2():\n input_shape = (1, 1, 2, 3)\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D20():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D6():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 2, 1, 0]).astype('int32')\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D5():\n input_shape = (1, 1, 2, 3)\n # pad = np.array([1, 0, 1, 2]).astype('int32')\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D27():\n input_shape = (1, 1, 2, 2, 2)\n pad = 2\n mode = \"replicate\"\n res = [\n [\n [\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n ]\n ]\n ]\n\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_pad2():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (0, 1, 1, 1, 2, 0)\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [1.0, 2.0, 3.0, 0.0], [4.0, 5.0, 6.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_pad4():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (0, 1, 1, 1, 2, 0)\n mode = \"circular\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [[4.0, 5.0, 6.0, 4.0], [1.0, 2.0, 3.0, 1.0], [4.0, 5.0, 6.0, 4.0], [1.0, 2.0, 3.0, 1.0]],\n [[4.0, 5.0, 6.0, 4.0], [1.0, 2.0, 3.0, 1.0], [4.0, 5.0, 6.0, 4.0], [1.0, 2.0, 3.0, 1.0]],\n [[4.0, 5.0, 6.0, 4.0], [1.0, 2.0, 3.0, 1.0], [4.0, 5.0, 6.0, 4.0], [1.0, 2.0, 3.0, 1.0]],\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_Pad3D24():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_pad():\n x = randtool(\"float\", -10, 10, [3, 2, 1, 2])\n pad = [1, 1, 2, 3]\n mode = \"constant\"\n value = 2.0\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.88523461, 1.99072967, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 4.45995261, 9.40579439, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 6.43138915, 0.55102135, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -3.37046541, -2.92035609, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.41939397, 1.11828761, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -6.68411074, -4.09524338, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_Pad3D18():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")", "def test_Pad3D3():\n input_shape = (1, 1, 2, 3, 2)\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D19():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D26():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)" ]
[ "0.7113082", "0.67949843", "0.67728066", "0.67715853", "0.6747454", "0.6736785", "0.6714607", "0.6617691", "0.6598806", "0.6399041", "0.6382364", "0.6346402", "0.6342303", "0.6328459", "0.6254949", "0.6217158", "0.6181735", "0.6166698", "0.61532205", "0.6087225", "0.60173815", "0.6006889", "0.59726155", "0.5907919", "0.5903609", "0.5892222", "0.5888459", "0.5887197", "0.58697116", "0.5851049" ]
0.6933413
1
shape_dim=3, pad=list[1, 2], mode='reflect', len(pad)=2, data_format=NCL
def test_Pad3D12(): input_shape = (1, 2, 3) # pad = np.array([1, 2]).astype('int32') pad = [1, 2] mode = "reflect" res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]] data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1 obj.run(res=res, padding=pad, mode=mode, data=data, data_format="NCL")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_Pad3D7():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D9():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D11():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 1]).astype('int32')\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D16():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 1, 2, 3, 3, 3], [4, 4, 5, 6, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D13():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D4():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D8():\n input_shape = (1, 2, 3)\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D10():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D14():\n input_shape = (1, 1, 2, 2, 3)\n pad = [1, 1, 1, 0, 1, 0]\n mode = \"reflect\"\n res = [\n [\n [\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n [[5, 4, 5, 6, 5], [2, 1, 2, 3, 2], [5, 4, 5, 6, 5]],\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D1():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D17():\n input_shape = (1, 2, 3)\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D21():\n input_shape = (1, 2, 3)\n # pad = np.array([2, 1]).astype('int32')\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D_base():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_pad7():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (2, 2, 1, 1, 0, 0)\n mode = \"reflect\"\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [\n [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0],\n [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0],\n [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0],\n [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0],\n ]\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, data_format=data_format)", "def test_Pad3D15():\n input_shape = (1, 2, 2, 2, 2)\n pad = [1, 1, 1, 0, 1, 0]\n mode = \"reflect\"\n res = [\n [\n [\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n [[11, 12], [9, 10], [11, 12], [9, 10]],\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n ],\n [[[7, 8], [5, 6], [7, 8], [5, 6]], [[3, 4], [1, 2], [3, 4], [1, 2]], [[7, 8], [5, 6], [7, 8], [5, 6]]],\n [\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n [[11, 12], [9, 10], [11, 12], [9, 10]],\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)", "def test_pad1():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n # data_format = \"NCDHW\"\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value)", "def test_pad3():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_Pad3D19():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D2():\n input_shape = (1, 1, 2, 3)\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D23():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D6():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 2, 1, 0]).astype('int32')\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D20():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D5():\n input_shape = (1, 1, 2, 3)\n # pad = np.array([1, 0, 1, 2]).astype('int32')\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D26():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)", "def test_pad2():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (0, 1, 1, 1, 2, 0)\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [1.0, 2.0, 3.0, 0.0], [4.0, 5.0, 6.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_Pad3D27():\n input_shape = (1, 1, 2, 2, 2)\n pad = 2\n mode = \"replicate\"\n res = [\n [\n [\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n ]\n ]\n ]\n\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D18():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")", "def test_Pad3D24():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D25():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [[1, 1, 2], [1, 1, 2], [3, 3, 4], [5, 5, 6]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D3():\n input_shape = (1, 1, 2, 3, 2)\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)" ]
[ "0.74735045", "0.732375", "0.72852135", "0.71872795", "0.71636003", "0.7149898", "0.71228147", "0.7097487", "0.70847744", "0.69549984", "0.6883246", "0.68575925", "0.6828769", "0.6778449", "0.6663831", "0.6538713", "0.6519225", "0.6482645", "0.63899547", "0.63770014", "0.6361665", "0.6321006", "0.6307108", "0.626855", "0.623173", "0.62260324", "0.6208816", "0.6159604", "0.6155285", "0.611887" ]
0.7672928
0
shape_dim=3, pad=tensor[1, 1, 1, 0], mode='reflect', len(pad)=2, data_format='NHWC'
def test_Pad3D13(): input_shape = (1, 2, 3, 1) pad = [1, 1, 1, 0] mode = "reflect" res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]] data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1 obj.run(res=res, padding=pad, mode=mode, data_format="NHWC", data=data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_Pad3D10():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D12():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D7():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D11():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 1]).astype('int32')\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D8():\n input_shape = (1, 2, 3)\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D14():\n input_shape = (1, 1, 2, 2, 3)\n pad = [1, 1, 1, 0, 1, 0]\n mode = \"reflect\"\n res = [\n [\n [\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n [[5, 4, 5, 6, 5], [2, 1, 2, 3, 2], [5, 4, 5, 6, 5]],\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D4():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D9():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D16():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 1, 2, 3, 3, 3], [4, 4, 5, 6, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D15():\n input_shape = (1, 2, 2, 2, 2)\n pad = [1, 1, 1, 0, 1, 0]\n mode = \"reflect\"\n res = [\n [\n [\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n [[11, 12], [9, 10], [11, 12], [9, 10]],\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n ],\n [[[7, 8], [5, 6], [7, 8], [5, 6]], [[3, 4], [1, 2], [3, 4], [1, 2]], [[7, 8], [5, 6], [7, 8], [5, 6]]],\n [\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n [[11, 12], [9, 10], [11, 12], [9, 10]],\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)", "def test_Pad3D1():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D5():\n input_shape = (1, 1, 2, 3)\n # pad = np.array([1, 0, 1, 2]).astype('int32')\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D6():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 2, 1, 0]).astype('int32')\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D2():\n input_shape = (1, 1, 2, 3)\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D26():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)", "def test_Pad3D17():\n input_shape = (1, 2, 3)\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D21():\n input_shape = (1, 2, 3)\n # pad = np.array([2, 1]).astype('int32')\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_pad7():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (2, 2, 1, 1, 0, 0)\n mode = \"reflect\"\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [\n [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0],\n [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0],\n [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0],\n [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0],\n ]\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, data_format=data_format)", "def reflection_pad(images, filter_size):\n num = filter_size // 2\n return tf.pad(images, [[0, 0], [num, num], [num, num], [0, 0]], mode='REFLECT')", "def test_Pad3D_base():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_pad3():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_pad1():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n # data_format = \"NCDHW\"\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value)", "def test_Pad3D3():\n input_shape = (1, 1, 2, 3, 2)\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D25():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [[1, 1, 2], [1, 1, 2], [3, 3, 4], [5, 5, 6]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D20():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D24():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D18():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")", "def test_Pad3D23():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D19():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D22():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")" ]
[ "0.743592", "0.7412066", "0.7233009", "0.72233677", "0.71813476", "0.71769875", "0.71484405", "0.70754975", "0.70616287", "0.69861776", "0.69375956", "0.68913984", "0.68714756", "0.68257403", "0.67965996", "0.6769647", "0.67384905", "0.66786844", "0.66665286", "0.66623294", "0.66504633", "0.664237", "0.65933347", "0.65773684", "0.65725315", "0.65676314", "0.6450052", "0.6431921", "0.6405839", "0.64009756" ]
0.75172204
0
shape_dim=3, pad=list[2, 1], mode='replicate', len(pad)=2, data_format=NCL
def test_Pad3D17(): input_shape = (1, 2, 3) pad = [2, 1] mode = "replicate" res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]] data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1 obj.run(res=res, padding=pad, mode=mode, data=data, data_format="NCL")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_Pad3D21():\n input_shape = (1, 2, 3)\n # pad = np.array([2, 1]).astype('int32')\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D16():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 1, 2, 3, 3, 3], [4, 4, 5, 6, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D19():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D23():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D26():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)", "def test_Pad3D25():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [[1, 1, 2], [1, 1, 2], [3, 3, 4], [5, 5, 6]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D18():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")", "def test_Pad3D22():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")", "def test_Pad3D24():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D1():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D20():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D4():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D27():\n input_shape = (1, 1, 2, 2, 2)\n pad = 2\n mode = \"replicate\"\n res = [\n [\n [\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n ]\n ]\n ]\n\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_pad6():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (2, 1, 3, 0, 2, 0)\n mode = \"replicate\"\n data_format = \"NDHWC\"\n res = np.array(\n [\n [\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, data_format=data_format)", "def test_Pad3D_base():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D12():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D7():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D2():\n input_shape = (1, 1, 2, 3)\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D6():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 2, 1, 0]).astype('int32')\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D5():\n input_shape = (1, 1, 2, 3)\n # pad = np.array([1, 0, 1, 2]).astype('int32')\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D9():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D8():\n input_shape = (1, 2, 3)\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D10():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D13():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D14():\n input_shape = (1, 1, 2, 2, 3)\n pad = [1, 1, 1, 0, 1, 0]\n mode = \"reflect\"\n res = [\n [\n [\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n [[5, 4, 5, 6, 5], [2, 1, 2, 3, 2], [5, 4, 5, 6, 5]],\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_pad1():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n # data_format = \"NCDHW\"\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value)", "def test_Pad3D11():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 1]).astype('int32')\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_pad3():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_Pad3D3():\n input_shape = (1, 1, 2, 3, 2)\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_pad2():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (0, 1, 1, 1, 2, 0)\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [1.0, 2.0, 3.0, 0.0], [4.0, 5.0, 6.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)" ]
[ "0.80337155", "0.79982525", "0.79480815", "0.7743944", "0.7216146", "0.71544755", "0.71229935", "0.7099466", "0.7025264", "0.70234096", "0.7021302", "0.6972085", "0.69312805", "0.68579054", "0.6719512", "0.66343516", "0.65743333", "0.65555793", "0.651141", "0.6510628", "0.65037286", "0.6460431", "0.63991666", "0.63783205", "0.63415766", "0.63142884", "0.6313707", "0.6254311", "0.6096983", "0.5981614" ]
0.80598235
0
shape_dim=2, pad=list[1, 2], mode='replicate', len(pad)=2, data_format=NLC
def test_Pad3D19(): input_shape = (1, 2, 3) pad = [1, 2] mode = "replicate" res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]] data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1 obj.run(res=res, padding=pad, mode=mode, data_format="NLC", data=data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_Pad3D17():\n input_shape = (1, 2, 3)\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D21():\n input_shape = (1, 2, 3)\n # pad = np.array([2, 1]).astype('int32')\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D16():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 1, 2, 3, 3, 3], [4, 4, 5, 6, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D23():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D25():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [[1, 1, 2], [1, 1, 2], [3, 3, 4], [5, 5, 6]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D22():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")", "def test_Pad3D26():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)", "def test_Pad3D24():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D18():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")", "def test_pad6():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (2, 1, 3, 0, 2, 0)\n mode = \"replicate\"\n data_format = \"NDHWC\"\n res = np.array(\n [\n [\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, data_format=data_format)", "def test_Pad3D27():\n input_shape = (1, 1, 2, 2, 2)\n pad = 2\n mode = \"replicate\"\n res = [\n [\n [\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n ]\n ]\n ]\n\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D20():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D1():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D4():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D2():\n input_shape = (1, 1, 2, 3)\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D8():\n input_shape = (1, 2, 3)\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_pad1():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n # data_format = \"NCDHW\"\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value)", "def test_Pad3D12():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D_base():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D5():\n input_shape = (1, 1, 2, 3)\n # pad = np.array([1, 0, 1, 2]).astype('int32')\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D7():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D6():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 2, 1, 0]).astype('int32')\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D11():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 1]).astype('int32')\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_pad3():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_Pad3D10():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D9():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D14():\n input_shape = (1, 1, 2, 2, 3)\n pad = [1, 1, 1, 0, 1, 0]\n mode = \"reflect\"\n res = [\n [\n [\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n [[5, 4, 5, 6, 5], [2, 1, 2, 3, 2], [5, 4, 5, 6, 5]],\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D13():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_pad2():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (0, 1, 1, 1, 2, 0)\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [1.0, 2.0, 3.0, 0.0], [4.0, 5.0, 6.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_pad():\n x = randtool(\"float\", -10, 10, [3, 2, 1, 2])\n pad = [1, 1, 2, 3]\n mode = \"constant\"\n value = 2.0\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.88523461, 1.99072967, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 4.45995261, 9.40579439, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 6.43138915, 0.55102135, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -3.37046541, -2.92035609, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.41939397, 1.11828761, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -6.68411074, -4.09524338, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)" ]
[ "0.77679384", "0.7764022", "0.77187085", "0.77154005", "0.70600796", "0.70272607", "0.70131814", "0.7012119", "0.69885784", "0.6947094", "0.69383657", "0.68388224", "0.66823745", "0.66219604", "0.64789426", "0.641125", "0.63886523", "0.63435936", "0.6326833", "0.62947047", "0.624552", "0.6233194", "0.6183375", "0.61764187", "0.61716974", "0.61510295", "0.61204034", "0.6115086", "0.6089734", "0.60827035" ]
0.78964156
0
shape_dim=3, pad=tensor[2, 1], mode='replicate', len(pad)=2, data_format=NCL
def test_Pad3D21(): input_shape = (1, 2, 3) # pad = np.array([2, 1]).astype('int32') pad = [2, 1] mode = "replicate" res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]] data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1 obj.run(res=res, padding=pad, mode=mode, data=data, data_format="NCL")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_Pad3D17():\n input_shape = (1, 2, 3)\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D16():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 1, 2, 3, 3, 3], [4, 4, 5, 6, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D19():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D23():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D26():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)", "def test_Pad3D22():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")", "def test_Pad3D25():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [[1, 1, 2], [1, 1, 2], [3, 3, 4], [5, 5, 6]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D18():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")", "def test_Pad3D1():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D20():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D4():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D24():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D27():\n input_shape = (1, 1, 2, 2, 2)\n pad = 2\n mode = \"replicate\"\n res = [\n [\n [\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n ]\n ]\n ]\n\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D_base():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D5():\n input_shape = (1, 1, 2, 3)\n # pad = np.array([1, 0, 1, 2]).astype('int32')\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D12():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D2():\n input_shape = (1, 1, 2, 3)\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)", "def test_Pad3D7():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def test_Pad3D9():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def test_Pad3D10():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_pad6():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (2, 1, 3, 0, 2, 0)\n mode = \"replicate\"\n data_format = \"NDHWC\"\n res = np.array(\n [\n [\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, data_format=data_format)", "def test_Pad3D6():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 2, 1, 0]).astype('int32')\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D8():\n input_shape = (1, 2, 3)\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D13():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)", "def test_Pad3D11():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 1]).astype('int32')\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)", "def test_Pad3D14():\n input_shape = (1, 1, 2, 2, 3)\n pad = [1, 1, 1, 0, 1, 0]\n mode = \"reflect\"\n res = [\n [\n [\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n [[5, 4, 5, 6, 5], [2, 1, 2, 3, 2], [5, 4, 5, 6, 5]],\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def test_Pad3D3():\n input_shape = (1, 1, 2, 3, 2)\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def pad_single_tensor(tensor):\n rank = len(tensor.shape)\n assert rank > 0\n padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))\n padded_shape = (batch_size,) + tuple(tensor.shape[1:])\n padded_tensor = array_ops.pad(tensor, padding)\n padded_tensor.set_shape(padded_shape)\n return padded_tensor", "def test_pad3():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test_pad1():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n # data_format = \"NCDHW\"\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value)" ]
[ "0.79318434", "0.7797551", "0.76867455", "0.7518421", "0.7086973", "0.7018578", "0.69699883", "0.6949413", "0.68757856", "0.68674964", "0.68330324", "0.66925746", "0.6622649", "0.6594129", "0.6534619", "0.65186477", "0.6466588", "0.6418636", "0.63755435", "0.6347652", "0.6341581", "0.6329735", "0.6314614", "0.62864304", "0.61848587", "0.6132565", "0.6006758", "0.5979523", "0.5938675", "0.5917794" ]
0.7886506
1
Add a topic publisher for a joystick command.
def register_topic(self, name, command): topic_name = command['topic_name'] try: topic_type = self.get_interface_type(command['interface_type'], '.msg') self.pubs[topic_name] = self.create_publisher(topic_type, topic_name, 1) except JoyTeleopException as e: self.get_logger().error( 'could not register topic for command {}: {}'.format(name, str(e)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def topic(phenny, input):\n if not input.admin:\n return\n text = input.group().split()\n topic = ' '.join(text[1:])\n if topic == '':\n return\n channel = input.sender\n phenny.write(['PRIVMSG', 'ChanServ'], 'TOPIC %s %s' % (input.sender, topic))\n return", "def topic(self, channel, topic=None):\n if topic:\n channel += ' :' + topic\n self.send_line('TOPIC %s' % channel)", "def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n pass", "def topic(self, topic):\n self.connection.topic(str(self), topic)", "def command(self, msg):\n self.cmd_pub.publish(msg)", "def _registerPublisher(self, callerId, topic, topicType, callerApi):\n if topic not in self.FilterPublishedTopic:\n self.__docWriter.addPub(callerId, topic, topicType)", "def AddTopic(self, topic_obj):\n self.topics.append(topic_obj)", "def add_topic ( topics , stream = -1 ) :\n return Ostap.Utils.AddTopic ( topics , level , stream )", "def subscribe_topic(self):\n req = {\n \"op\": \"subscribe\",\n \"args\": [\n \"instrument\",\n \"trade\",\n \"orderBook10\",\n \"execution\",\n \"order\",\n \"position\",\n \"margin\",\n ],\n }\n self.send_packet(req)", "def start_robot_publisher(self):\n print('Robot Pub Node started')\n\n #if GlobalSettings.USE_TEGA:\n msg_type = TegaAction\n msg_topic = ROSCORE_TO_TEGA_TOPIC\n #else:\n # msg_type = JiboAction\n # msg_topic = ROSCORE_TO_JIBO_TOPIC\n\n self.robot_commander = rospy.Publisher(msg_topic, msg_type, queue_size=10)\n rate = rospy.Rate(10) # spin at 10 Hz\n rate.sleep() # sleep to wait for subscribers", "def register_event_topic(DirectoryId=None, TopicName=None):\n pass", "def addTopic(self, topic):\n # Existing topics take priority\n if not self.helpTopics.containsKey(topic.__name__):\n self.helpTopics.put(topic.__name__, topic)", "def publishCmd(self, cmd):\n cmd_to_publish = Twist()\n cmd_to_publish.linear.x = cmd[0]\n cmd_to_publish.angular.z = cmd[1]\n self.cmd_pub.publish(cmd_to_publish)", "def publish(self, topic, value):\n msg = self.topics[topic]['msg']\n msg.data = value\n self.topics[topic]['publisher'].publish(msg)\n print(\"published \\t{} \\t{}\".format(topic, value))", "def publish(self, topic, msg):\n\t\tself.topic = topic\n\t\tself.msg = msg \n\t\tself.client.publish(self.topic, self.msg)", "async def publish(self, topic: str, *args: aiowamp.WAMPType,\n kwargs: aiowamp.WAMPDict = None,\n acknowledge: bool = None,\n blackwhitelist: aiowamp.BlackWhiteList = None,\n exclude_me: bool = None,\n disclose_me: bool = None,\n resource_key: str = None,\n options: aiowamp.WAMPDict = None) -> None:\n ...", "def publish_to_simulation(self, topic, message, **kwargs):\n pass", "def publishEvent(eventName,publisher, msg):", "def publish(self, topic:str, data:bytes) -> None:\n\t\tself.mqttClient.publish(topic, data)", "def publish_mqtt(self, topic, data={}, on_publish=None, on_response=None, inject_rid=True):\n payload = data\n\n # If this is a dict and we're allowed to inject a request ID, do so\n # Injecting a request ID allows the nodes to respond and us to execute callbacks\n if (type(data) is dict) and inject_rid:\n data['rid'] = str(shortuuid.uuid())\n\n # JSON encode dicts, lists and stuff\n if type(data) in [dict, list, tuple]:\n payload = json.dumps(data)\n\n result, mid = self.mqtt.publish(topic, payload, qos=1)\n\n if on_publish:\n self.publish_callbacks[mid] = on_publish\n\n if on_response and data and data.get('rid', None):\n self.response_callbacks[data['rid']] = on_response\n\n self.publishes.append(mid)\n\n while mid in self.publishes:\n self.wait()", "def subscribe(self, topic):\n\t\tself.topic=topic\n\t\tself.client.subscribe(self.topic)", "def create_pubsub_topic(client, project, name):\n full_name = pubsub.topic_name(project, name)\n if client.get_topic(full_name):\n return\n\n client.create_topic(full_name)", "def publish(topic, message):\n if DEBUG:\n print(\"Publish: '\" + message + \"' (topic: '\" + topic + \"')\")\n DATA[\"client\"].publish(topic, message)", "def topic(self, msg):\n self.make_topic(msg, new_topic=msg.args[0])\n self.bot.log.info(\"Topic changed by \" + msg.user)", "def publish(self, topic, msg):\n formatted_msg = json.dumps(msg)\n self.client.publish(topic, formatted_msg) # json converting cause of mqtt's data transfer limit.", "def Talk(self, topic, message):\n Send(self.channel, topic, message)", "def publish_event(self, topic):\n topic = \"{}/{}\".format(self._base_topic, topic)\n self._client.publish(topic, qos=2)\n logger.info(\"Event published on topic %s\", topic)", "def subscribe_to_ticks_publisher(topic):\n ConfigFile = \"../config/kuber.conf\"\n config = configparser.ConfigParser()\n config.read(ConfigFile)\n\n zmq_conf = config['ZMQ CONFIGURATION']\n publish_port = zmq_conf['publish_port']\n\n print(\"Subscribing to topic %s at %s\" % (topic, publish_port))\n sub = TopicSubscriber()\n\n try: \n sub.init(topic, publish_port)\n except Exception as e:\n print(\"\"\"\n Subscriber init failed: {}\n \"\"\".format(e))\n sys.exit(0)\n\n # Return the subscriber context.\n return sub", "def pub(self, topic, msg, callback=None):\n return self._pub('pub', topic, msg, callback=callback)", "def publish(self, message, topic=''):\n if type(message) != types.ListType:\n message = [message]\n if topic:\n message = [topic] + message\n self.send(message)" ]
[ "0.6031776", "0.5997131", "0.58900696", "0.58886224", "0.58743054", "0.58579695", "0.5785329", "0.57462335", "0.57402533", "0.57101375", "0.5690793", "0.5680169", "0.56232977", "0.55973136", "0.55766237", "0.5570244", "0.5550636", "0.5547172", "0.552251", "0.54776114", "0.54412764", "0.54315954", "0.54050964", "0.539491", "0.53931314", "0.53913087", "0.53894615", "0.5383846", "0.5379311", "0.5366901" ]
0.726427
0
Add an AsyncServiceProxy for a joystick command.
def register_service(self, name, command): service_name = command['service_name'] try: service_type = self.get_interface_type(command['interface_type'], '.srv') self.srv_clients[service_name] = self.AsyncServiceProxy( self, service_name, service_type) if service_name in self.offline_services: self.offline_services.remove(service_name) except JoyTeleopException: if service_name not in self.offline_services: self.offline_services.append(service_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_with_proxy(self, proxy: CommandProxy):\n return proxy.argument(*self.name_or_flags, **self.kwargs)", "def add_proxy(self, app: Flask, handle_errors: bool = True, auth: list = [\"\"]) -> Flask:\n raise NotImplemented('proxys are not yet supported')\n if hasattr(app, 'HOIST_INTERNALPROXY'):\n raise HoistExistsError('hoist is already set up on app')\n\n\n app.HOIST_INTERNALPROXY = HoistProxy(app, handle_errors)\n\n @app.route('/hoist/proxy/connect', methods=['POST'])\n def hoist_proxy_connect() -> str:\n return self.get_response(app, auth, app.HOIST_INTERNALPROXY._connect, 'data')\n\n @app.route('/hoist/proxy/disconnect', methods=['POST'])\n def hoist_proxy_disconnect() -> str:\n return self.get_response(app, auth, app.HOIST_INTERNALPROXY._disconnect, 'data')\n\n\n return app", "async def async_service_handler(service):\n api_command = MAP_SERVICE_API[service.service][0]\n data = service.data.copy()\n addon = data.pop(ATTR_ADDON, None)\n snapshot = data.pop(ATTR_SNAPSHOT, None)\n payload = None\n\n # Pass data to Opp.io API\n if service.service == SERVICE_ADDON_STDIN:\n payload = data[ATTR_INPUT]\n elif MAP_SERVICE_API[service.service][3]:\n payload = data\n\n # Call API\n try:\n await oppio.send_command(\n api_command.format(addon=addon, snapshot=snapshot),\n payload=payload,\n timeout=MAP_SERVICE_API[service.service][2],\n )\n except OppioAPIError as err:\n _LOGGER.error(\"Error on Opp.io API: %s\", err)", "def register_proxy(self, proxy):\n self.__proxy = proxy", "def add_service(self, service):\n self.app.add_service(service)", "def add_listener(self) -> None:\n client = self.discord_client\n\n try:\n add_listener = cast(\"Bot\", client).add_listener\n except AttributeError:\n wrap_client_listener(self.discord_client, self.on_socket_response)\n else:\n log.info(f\"Adding socket response listener to {client}\")\n add_listener(self.on_socket_response)", "async def async_setup_service(hass: HomeAssistant) -> None:\n\n async def send_text_command(call: ServiceCall) -> ServiceResponse:\n \"\"\"Send a text command to Google Assistant SDK.\"\"\"\n commands: list[str] = call.data[SERVICE_SEND_TEXT_COMMAND_FIELD_COMMAND]\n media_players: list[str] | None = call.data.get(\n SERVICE_SEND_TEXT_COMMAND_FIELD_MEDIA_PLAYER\n )\n command_response_list = await async_send_text_commands(\n hass, commands, media_players\n )\n if call.return_response:\n return {\n \"responses\": [\n dataclasses.asdict(command_response)\n for command_response in command_response_list\n ]\n }\n return None\n\n hass.services.async_register(\n DOMAIN,\n SERVICE_SEND_TEXT_COMMAND,\n send_text_command,\n schema=SERVICE_SEND_TEXT_COMMAND_SCHEMA,\n supports_response=SupportsResponse.OPTIONAL,\n )", "def add_service(self, zeroconf, service_type, name):\n self.pending.add(\n asyncio.ensure_future(self._internal_add(zeroconf, service_type, name))\n )", "async def _register_command(self) -> JSON:\n loop = asyncio.get_event_loop()\n async with aiohttp.ClientSession() as session:\n async with session.post(\n url=InteractionRoute().application(self._application_id).commands(self._id).url,\n json=self._data\n ) as response:\n interaction: JSON = await response.json(encoding='utf-8')\n return interaction", "def _add_services(self):\n this_service = {'name': 'swift-proxy'}\n other_services = [\n {'name': 'percona-cluster'},\n {'name': 'keystone'},\n {'name': 'glance'},\n {'name': 'swift-storage'}\n ]\n super(SwiftProxyBasicDeployment, self)._add_services(this_service,\n other_services)", "def connect(self, service, handler):\n self.partyline.setdefault(service, []).append(handler)", "def command(\n self,\n handler: Handler = None,\n *,\n name: str = None,\n aliases: Sequence[str] = (),\n help_text: str = None,\n ) -> CommandProxy:\n\n def inner(func: Handler) -> CommandProxy:\n kwargs = {\"aliases\": aliases}\n\n help_text_ = help_text or func.__doc__\n if help_text_:\n kwargs[\"help\"] = help_text_.strip()\n\n name_ = name or func.__name__\n if asyncio.iscoroutinefunction(func):\n proxy = AsyncCommandProxy(\n func, self._sub_parsers.add_parser(name_, **kwargs)\n )\n else:\n proxy = CommandProxy(\n func, self._sub_parsers.add_parser(name_, **kwargs)\n )\n\n self._add_handler(proxy, name_, aliases)\n\n return proxy\n\n return inner(handler) if handler else inner", "async def _create_proxy(self):\n self._proxy = await self._controller.fopen_tcp_proxy(\n Cellular._DRONE_WEB_API_PORT\n )\n\n self._drone_http_url = f\"http://{self._proxy.address}:{self._proxy.port}\"\n\n if self._autoconfigure and self._user_apc_token is None:\n self.logger.info(\"cellular auto pairing and configuration\")\n # generate a new anonymous user APC token and configure the cellular.\n self._fautoconfigure_with_new_token()", "def ev_joydeviceadded(self, event: tcod.event.JoystickDevice) -> T | None:", "def _add_proxy(self, proxy):\t\n\t\turi = proxy.uri\n\t\tif not proxy.connected:\n\t\t\traise TypeError('Worker {} is not available'.format(uri))\n\n\t\tif not proxy.is_worker:\n\t\t\tPrint('Proxy {} is not a compatible worker. You need to subclass Worker'.format(uri))\n\t\t\treturn\n\n\t\tprint('Worker {} is available'.format(uri))\n\t\tself._uris.add(uri)\n\t\tQueue._put(self, proxy)", "def add(self, service: AbstractService):\n self.services.append(service)", "def fusion_api_add_proxy_server(self, body, api=None, headers=None):\n return self.proxyserver.add(body, api, headers)", "def do_poortego_add(self, arg, opts=None):\n # Code moved to .command.add sub-module for easier reading/debugging \n poortego_add(self.my_interface, arg, opts)", "async def async_service_handler(service: ServiceCall) -> None:\n api_endpoint = MAP_SERVICE_API[service.service]\n\n data = service.data.copy()\n addon = data.pop(ATTR_ADDON, None)\n slug = data.pop(ATTR_SLUG, None)\n payload = None\n\n # Pass data to Hass.io API\n if service.service == SERVICE_ADDON_STDIN:\n payload = data[ATTR_INPUT]\n elif api_endpoint.pass_data:\n payload = data\n\n # Call API\n # The exceptions are logged properly in hassio.send_command\n with suppress(HassioAPIError):\n await hassio.send_command(\n api_endpoint.command.format(addon=addon, slug=slug),\n payload=payload,\n timeout=api_endpoint.timeout,\n )", "def add_service(torconfig, service, port=None):\n # picks a random port until it finds one avaible.\n while not service.tcp:\n port = port or new_port()\n try:\n service.tcp = reactor.listenTCP(port, service.factory)\n except error.CannotListenError:\n pass\n\n service.hs = txtorcon.HiddenService(\n torconfig, os.path.join(config.tor_data, service.name),\n ['%d 127.0.0.1:%d' % (service.port, port)])\n apaf.hiddenservices.append(service)", "def register_service(self, service, name):\n assert service._remote_service, \"Services should be decorated correctly.\"\n \n prepare_remote_service(service)\n self._services[name] = service", "def async_setup_platform(hass, config, async_add_devices, discovery_info=None):\n _LOGGER.debug('ROTEL : starting')\n rotel = RotelDevice(config.get(CONF_NAME), config.get(CONF_HOST), config.get(CONF_PORT), hass.loop)\n async_add_devices([rotel])\n\n coro = hass.loop.create_connection(RotelProtocol, config.get(CONF_HOST), config.get(CONF_PORT))\n futur = hasync.run_coroutine_threadsafe(coro, hass.loop)\n futur.add_done_callback(partial(bind_transport_to_device, rotel))", "def add_command(self, name, command_class, ns=None):\n ep = EntryPointWrapper(name, command_class)\n self.add_command_ep(ep, ns=ns)", "def _create_service_client(self, srv_name):\n if self._srv:\n self._srv.close()\n\n if srv_name in rosservice.get_service_list():\n rospy.loginfo(\"Creating proxy for service '%s'\" % srv_name)\n self._srv = rospy.ServiceProxy(srv_name, rosservice.get_service_class_by_name(srv_name))", "def pyga_joytask(self, task):\n for e in PYG.event.get():\n # joystick buttons up and down events routing\n # will send a string like, i.e.: \"joy0-button1\" for the button 1 of the joy 0 and 0 or 1 as a parameter for button up or down status respectively\n if e.type in [PYG.JOYBUTTONDOWN, PYG.JOYBUTTONUP]:\n s=\"joy%d-button%d\" % (e.joy, e.button)\n messenger.send(s, [1 if e.type == PYG.JOYBUTTONDOWN else 0])\n # joistick axis (analog and digital)\n # will send a string like, i.e.: \"joy0-axis1\" for the axis 1 of the joy 0 and a number between 0 and 1 or 0 and -1 as the stick or hat status (the digital stick returns 0 OR +-1 but analog sticks floating values from 0.0 and +-1.0)\n elif e.type == PYG.JOYAXISMOTION:\n s=\"joy%d-axis%d\" % (e.joy, e.axis)\n ###print \"Jax-%r(%r)\" % (e.axis, e.value)\n if e.axis in [1,2]:\n messenger.send(s, [-e.value])\n else:\n messenger.send(s, [e.value])\n return Task.cont", "async def _async_install_addon(self):\n try:\n await self.hass.components.hassio.async_install_addon(\"core_zwave\")\n finally:\n # Continue the flow after show progress when the task is done.\n self.hass.async_create_task(\n self.hass.config_entries.flow.async_configure(flow_id=self.flow_id)\n )", "async def __add_commands(self):\r\n commands_to_add: List[ClientCommandStructure] = [\r\n cmd for cmd in ChatCommandHandler.register.values()\r\n if cmd.app not in self._api_commands\r\n ]\r\n\r\n if commands_to_add:\r\n for cmd in commands_to_add:\r\n endpoint = f\"applications/{self.client.bot.id}\"\r\n\r\n if cmd.app.guild_id is not MISSING:\r\n endpoint += f\"/guilds/{cmd.app.guild_id}\"\r\n\r\n await self.client.http.post(\r\n endpoint + \"/commands\",\r\n cmd.app.to_dict()\r\n )", "def addService(self, interfaceClass: java.lang.Class, service: object) -> None:\n ...", "async def async_added_to_hass(self):\n\n self._undo_dispatcher = async_dispatcher_connect(\n self.hass,\n SIGNAL_TADO_UPDATE_RECEIVED.format(\"device\", self.device_id),\n self._async_update_callback,\n )\n self._async_update_device_data()", "async def register_rpc_proxies(self):\n for rpc_name in self.rpc_proxy_list:\n logger.debug('Registering RPC to Proxy: {}'.format(rpc_name))\n\n class RPCProxy:\n\n def __init__(self, local_session, rpc_name):\n self._local_session = local_session\n self._rpc_name = rpc_name\n\n async def __call__(self, *args, **kwargs):\n logger.debug('Proxying RPC {}, with args {}, kwargs {}'.format(self._rpc_name, args, kwargs))\n return await self._local_session.call(self._rpc_name, *args, **kwargs)\n\n await self.remote_session.register(RPCProxy(self.local_session, rpc_name), rpc_name)" ]
[ "0.53599304", "0.5337162", "0.52263916", "0.51141346", "0.51067567", "0.5030058", "0.5025323", "0.5012126", "0.49890283", "0.4984398", "0.49809358", "0.49488193", "0.49456048", "0.49326336", "0.49211985", "0.491806", "0.49023402", "0.48898253", "0.48897958", "0.4888669", "0.4872532", "0.48542297", "0.48525926", "0.48398727", "0.48368108", "0.48355135", "0.48295304", "0.47947475", "0.47894034", "0.47698483" ]
0.61700124
0
Returns lists for keeping track of free (outside of GCs) memory and naive B cells as well as a list of lists of B cells waiting for surivival signals in each GC.
def new_lists(): free_naives, free_memory = [], [] GC_waiting = [[] for gc in range(cf.nGCs)] return free_naives, free_memory, GC_waiting
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cells_enter_GCs(GC_waiting, celllist, tnow, RIs):\n for cell in celllist:\n # get a random GC for entry\n GCpos = RIs.getR()\n # set entrytnow into the waiting area and new position\n cell.GCentrytime = tnow\n cell.AIDstart = tnow\n # add cell to correct waitlist\n GC_waiting[GCpos].append(cell)\n\n return GC_waiting", "def long_waiters_die(celllist, tnow):\n survivors = []\n for sublist in celllist:\n newsub = []\n for cell in sublist:\n if tnow - cell.GCentrytime <= cf.tlifeGC:\n newsub.append(cell)\n survivors.append(newsub)\n return survivors", "def get_free_slots(self):\n # pon_ports = keys(self.uncfg_onu)\n pass # TODO", "def get_available_memory_blocks(self):\n status = self.get_status()\n return status & (STATUS_MEM_0_EMPTY | STATUS_MEM_1_EMPTY)", "def getallocatedblocks(): # real signature unknown; restored from __doc__\n return 0", "def getMemoryBlocks(self) -> List[ghidra.program.model.mem.MemoryBlock]:\n ...", "def gc_blocks(seq, block_size):\n\n # Make all capital\n seq = seq.upper()\n iterations = len(seq) // block_size\n\n # Iterate through finding the GC content\n gc = []\n for i in range(iterations):\n block = seq[i*block_size:(i+1)*block_size]\n gc.append((block.count('G') + block.count('C')) / block_size)\n return tuple(gc)", "def BufferList(self) -> _n_2_t_0[_n_0_t_11[_n_0_t_6]]:", "def getBlocks(self) -> List[ghidra.program.model.mem.MemoryBlock]:\n ...", "def get_mem(self) -> list:\n return self.__mem", "def make_memory(RNs, seq_list, AgEpitope, tnow):\n ab = random.choice(seq_list)\n Emax = E_best(ab, AgEpitope)\n mutcount = np.round(RNs.getR() * 40)\n newcell = Bcell(sequence=ab, sequence0=ab, affinity=Emax, affinity0=Emax,\n origin='umem', mutations=mutcount,\n family=None, birthtime=tnow, GCentrytime=None,\n AIDstart=None, block=False)\n return newcell", "def get_greenlets(cls):\n return { obj for obj in gc.get_objects() if isinstance(obj, greenlet) and not obj.dead }", "def unallocated_spaces(self):\n unallocated_offices = 0\n for office in self.offices:\n unallocated_offices += self.offices[\n office]['room'].unallocated_spaces\n unallocated_living = 0\n for living in self.living_spaces:\n unallocated_living += self.living_spaces[\n living]['room'].unallocated_spaces\n\n return [unallocated_offices, unallocated_living]", "def get_all_objects():\n gc.collect()\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist", "def memory(self):\n mem_size_list = []\n gig_size = self.random.randint(1,32)\n size = gig_size * 1073741824\n suffixes=['B','KB','MB','GB','TB']\n suffixIndex = 0\n while size > 1024 and suffixIndex < 4:\n suffixIndex += 1 #increment the index of the suffix\n size = size/1024.0 #apply the division\n mem_size_list.append(f\"{size:.2f} {suffixes[suffixIndex]}\")\n return mem_size_list", "def get_leaks(self):\n _run_garbage_collection()\n\n remaining_objects = self._get_all_tracked_objects()\n remaining_objects = self._remove_initial_objects_from_list(remaining_objects)\n\n return remaining_objects", "def get_claimable_cells(self) -> list[tuple[int, int]]:\n pass", "def get_blists(self):\n return self.blists[:]", "def mem(self) -> List[MemorySlot]:\n return self._mem_slots", "def get_free_sessions(self):\n return [session for session in self.sessions if not session.is_booked()]", "def compute_free_space(self, env, obj_mask):\n free = np.ones(obj_mask.shape, dtype=np.uint8)\n for obj_ids in env.obj_ids.values():\n for obj_id in obj_ids:\n free[obj_mask == obj_id] = 0\n return free", "def get_free_games(self) -> List[Game]:", "def calc():\n global last_free, iteration, pin\n mf = gc.mem_free()\n if False and last_free < mf:\n print(\">>> ran gc, iteration=\", iteration)\n last_free = mf\n # allocate memory\n x = \"abc\" + str(iteration)\n pin(not pin())\n sleep_ms(2)", "def get_pending_instances(self):\n return [instance for instance in self.instances.itervalues()\n if InstanceState.REQUESTED <= instance.state < InstanceState.RUNNING]", "def calculate_queues(self):\n\t\t#queues = [get_queue(lane) for lane in self.Vissim_Lanes]\n\t\t\n\t\tqueues = [0. if queue.AttValue('QLen(Current, Last)') is None else queue.AttValue('QLen(Current, Last)') for queue in self.queues_counters]\n\t\treturn queues", "def eligible_nodes(self):\n return [v for v in self.G if self.eligible_node(v)]", "def dec_gains_of_free_cells(self):\r\n for cell in self.cells:\r\n if not cell.locked:\r\n cell.gain -= 1\r\n cell.yank()", "def get_waiting_jobs(self):\n return []", "def collect_nodes(self):\n free_nodes = Node.query.filter_by(project_id=None).all()\n return free_nodes", "def get_free(self):\r\n\t\treturn len(self.free_objects)" ]
[ "0.6278023", "0.6249889", "0.60423493", "0.5866494", "0.5843637", "0.57247835", "0.57123166", "0.5661632", "0.56212527", "0.55807775", "0.55428755", "0.5538133", "0.5531805", "0.5523965", "0.5468503", "0.54654175", "0.5453251", "0.54434454", "0.5362702", "0.5341909", "0.5341142", "0.53252906", "0.531662", "0.53148437", "0.5301672", "0.529585", "0.52952135", "0.5288661", "0.52835", "0.5265284" ]
0.7558097
0
Given an epitopic sequence, queries the objective distribution of the unselected binding repertoire and generates sequences according to this distribution. Incorporates a cutoff mechanism in case there are bins that have not been filled at least once after a given number of tries. Returns a list of Ab sequences whose energies comply to the objective binding energy distribution.
def make_shaped_repertoire(RNs): # get objective distribution bin_edges, obj_dist, volume = objective_distribution() # get an antigenic epitope sequence, and in case of nkey=1,2 check whether # it can populate all required bins, thus avoiding infinite loop below AgEpitope = get_AgEpitope(RNs) if cf.nkey == 1 or cf.nkey == 2: while 1: # get list of all possible binding partners and their energies all_partners = get_all_partners() all_energies = [E_best(partner, AgEpitope) for partner in all_partners] # check whether all bins are occupiable with these energies, # if not, get new epitope sequence indices = np.digitize(all_energies, bin_edges, right=True) ind_set = set(indices) ind_set.discard(0) # if all bins can be occupied, move on if ind_set == set(range(1, len(bin_edges))): break # else get a new epitope and check its validity else: AgEpitope = get_AgEpitope(RNs) # initialise empty list for counting how many seqs have been found per bin ist_dist = np.zeros(len(obj_dist)) # seq_list for collecting identified sequences seq_list = [] E_list = [] # while ist_dist and obj_dist are not equal, get new sequences and position # them if they are useful # introduce a tolerance of how far bins are allowed to deviate from the # goal, as otherwise runtime explodes due to very long waiting times for # high binding energy codes in large nkey cases - allow an absolute # deviation of volume*tolerance % for each bin. abs_tol = volume * 0.005 while np.sum(np.abs((ist_dist-obj_dist)) > abs_tol) > 0: ab = Ab_seq(RNs) Emax = E_best(ab, AgEpitope) # find index bin of this energy indx = np.digitize(Emax, bin_edges, right=True) # if the index is in the useful range and the bin is not yet full, # count the sequence and store it if indx in range(1, len(bin_edges)): if obj_dist[indx-1] - ist_dist[indx-1] > 0: ist_dist[indx-1] += 1 seq_list.append(ab) E_list.append(Emax) return seq_list, E_list, AgEpitope
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Ag_seq(RNs):\n seq = []\n for res in range(cf.lAg):\n randi = RNs.getR()\n for i in range(20):\n if randi < cf.cumprob20[i]:\n seq.append(i + 1) # want amino acids between 1 and 20\n break\n return seq", "def get_low_binder(RNs, AgEpitope, ntest):\n E_collect = []\n while len(E_collect) < ntest:\n ab = Ab_seq(RNs)\n Emax = E_best(ab, AgEpitope)\n if Emax >= cf.thr:\n E_collect.append(Emax)\n return min(E_collect)", "def Ab_seq(RNs):\n seq = []\n for res in range(cf.nkey):\n randi = RNs.getR()\n for i in range(20):\n if randi < cf.cumprob20[i]:\n seq.append(i + 1) # want amino acids between 1 and 20\n break\n return seq", "def generate_nonbinding_intervals(bed_df, genome_seq_records,\n rep_cutoff_type, rep_cutoff,\n min_dist, max_dist, step,\n num_intervals_per_side,\n empty_columns, output_fp):\n bf = BedFile(bed_df)\n df = pd.DataFrame(columns=['chr', 'start', 'end', 'id'])\n\n chrom_sizes_dict = {chrom: len(genome_seq_records[chrom])\n for chrom in genome_seq_records}\n\n for i, interval in enumerate(bf.intervals):\n print 'interval {} out of {}'.format(i + 1, len(bf.intervals))\n chrom = interval.chrom\n start = interval.start\n end = interval.end\n length = interval.length\n\n lower = max(0, start - max_dist)\n upper = max(0, start - min_dist)\n lefts = []\n while len(lefts) == 0:\n print '\\tsearching in ({}, {})'.format(lower, upper)\n lefts = generate_random_intervals(chrom, lower, upper, length,\n genome_seq_records,\n rep_cutoff_type, rep_cutoff,\n avoid_intervals=bf.intervals,\n n=num_intervals_per_side)\n lower = max(0, lower - step)\n upper = max(0, upper - step)\n if lower == upper:\n print 'reached chromosome boundary. cannot search next iteration'\n break\n for left in lefts:\n df.loc[len(df)] = [left.chrom, left.start, left.end, i]\n\n lower = min(chrom_sizes_dict[chrom], end + min_dist)\n upper = min(chrom_sizes_dict[chrom], end + max_dist)\n rights = []\n while len(rights) == 0:\n print '\\tsearching in ({}, {})'.format(lower, upper)\n rights = generate_random_intervals(chrom, lower, upper, length,\n genome_seq_records,\n rep_cutoff_type, rep_cutoff,\n avoid_intervals=bf.intervals,\n n=num_intervals_per_side)\n lower = max(0, lower + step)\n upper = max(0, upper + step)\n if lower == upper:\n print 'reached chromosome boundary. cannot search next iteration'\n break\n for right in rights:\n df.loc[len(df)] = [right.chrom, right.start, right.end, i]\n\n df['start'] = df['start'].astype(int)\n df['end'] = df['end'].astype(int)\n df['id'] = df['id'].astype(int)\n for col_name in empty_columns:\n df[col_name] = 0\n assert list(df.columns) == peak_file_cols\n df.to_csv(output_fp, sep='\\t', index=False)", "def Boltzchoice(LFnum, energylist, RNs):\n # transform list to energy values in kT according to experimental\n # affinities and the energy window allowed by the threshold\n energylist = cf.y0 + np.array(energylist) * cf.m\n # calculate norm of initial list\n Norm = sum([math.exp(-ener) for ener in energylist])\n # calculate initial probability vector\n probs = np.array([math.exp(-ener) / Norm for ener in energylist])\n # list to catch indices of selected cells\n selected = []\n # cells to be picked: determined by the lesser of #waiters and #LFs\n cellpick = min(len(energylist), LFnum)\n while len(selected) < cellpick:\n bins = np.cumsum(probs)\n ind = np.digitize(RNs.getR(), bins)\n selected.append(ind)\n # now, set the probability of the selected cell to 0 and renormalise\n # the remaining probability vector\n newNorm = Norm - math.exp(-energylist[ind])\n probs[ind] = 0\n probs = probs * Norm / newNorm\n Norm = newNorm\n\n return selected", "def gene_finder(dna):\n orfs = find_all_ORFs_both_strands(dna)\n print(orfs)\n threshold = longest_ORF_noncoding(dna, 1000)\n print('threshold is', threshold)\n print('number of orfs:', len(orfs))\n aa_sequences = []\n i = 0\n while i < len(orfs):\n print(len(orfs[i]))\n if len(orfs[i]) > threshold:\n print('if')\n aa_sequences += [coding_strand_to_AA(orfs[i])]\n i += 1\n print(aa_sequences)", "def correct_intron_boundaries_by_exonsignal(abfgp_genemodel,organism,PCG,introndata,\n array_algpresence,array_algsimilarity,verbose=True):\n\n # Global Variable Imports\n CORRECT_BY_EXONSIGNAL_ACCEPTOR_NT_OFFSET = 200\n CORRECT_BY_EXONSIGNAL_DONOR_NT_OFFSET = 200\n FINETUNE_ACCEPTOR_NT_OFFSET = 12\n FINETUNE_DONOR_NT_OFFSET = 12\n from settings.genestructure import MIN_INTRON_NT_LENGTH\n\n\n # list with adjusted boundaries\n refined_boundaries = []\n\n # recreate lists of ABGFP exons & introns\n abfgp_exons = [ abfgp_genemodel[pos] for pos in range(0,len(abfgp_genemodel),2) ]\n abfgp_introns = [ abfgp_genemodel[pos] for pos in range(1,len(abfgp_genemodel),2) ]\n\n for intron_pos in range(0,len(abfgp_introns)):\n intron = abfgp_introns[intron_pos]\n if not intron: continue\n if intron.__class__.__name__ == 'SequenceErrorConnectingOrfs': continue\n\n next_exon = abfgp_exons[intron_pos+1]\n prev_exon = abfgp_exons[intron_pos]\n\n # make elegiable correction range\n correct_acceptor_range = range(\n intron.acceptor.pos-CORRECT_BY_EXONSIGNAL_ACCEPTOR_NT_OFFSET,\n min([ next_exon.donor.pos-6, intron.acceptor.pos+CORRECT_BY_EXONSIGNAL_ACCEPTOR_NT_OFFSET+1 ]) )\n correct_donor_range = range(\n max([ prev_exon.acceptor.pos+6, intron.donor.pos-CORRECT_BY_EXONSIGNAL_DONOR_NT_OFFSET]),\n intron.donor.pos+CORRECT_BY_EXONSIGNAL_DONOR_NT_OFFSET+1)\n finetune_acceptor_range = range(intron.acceptor.pos-FINETUNE_ACCEPTOR_NT_OFFSET,\n intron.acceptor.pos+FINETUNE_ACCEPTOR_NT_OFFSET+1)\n finetune_donor_range = range(intron.donor.pos-FINETUNE_DONOR_NT_OFFSET,\n intron.donor.pos+FINETUNE_DONOR_NT_OFFSET+1)\n\n\n staI,endI = intron.donor.pos/3, intron.acceptor.pos/3\n staE,endE = intron.acceptor.pos/3, next_exon.donor.pos/3\n algsim_intron = sum(array_algsimilarity[staI:endI])\n algsim_next_exon = sum(array_algsimilarity[staE:endE])\n algsim_score_abs = float(algsim_intron)/algsim_next_exon\n algsim_score_rel = float(algsim_intron)/algsim_next_exon * (float(endE-staE)/float(endI-staI))\n\n print \"CORRECT\", intron\n print intron.acceptor, intron.acceptor.pos, algsim_score_abs, algsim_score_rel\n print [ a.pos for a in intron.orfAcceptor._acceptor_sites ]\n\n\n possible_acceptor_boundaries = []\n possible_donor_boundaries = []\n\n for acceptor in intron.orfAcceptor._acceptor_sites:\n if acceptor.pos - intron.donor.pos < MIN_INTRON_NT_LENGTH: continue\n if acceptor.pos not in correct_acceptor_range: continue\n if acceptor.pos == intron.acceptor.pos: continue\n if _finetune_splicesite_comparison(intron.acceptor,acceptor) == False: continue\n\n # if here, generate stats\n staI,endI = intron.donor.pos/3, acceptor.pos/3\n algsim_intron_pos = sum(array_algsimilarity[staI:endI])\n staE,endE = acceptor.pos/3, next_exon.donor.pos/3\n algsim_next_exon_pos = sum(array_algsimilarity[staE:endE]) \n # check if no similarity left of exon; avoid ZeroDivisionError\n if algsim_next_exon_pos == 0: continue\n\n # calculate score ratios\n alternative_algsim_score_abs = float(algsim_intron_pos)/algsim_next_exon_pos\n alternative_algsim_score_rel = alternative_algsim_score_abs * (float(endE-staE)/float(endI-staI))\n\n # test if exon signal is an improvement\n #if alternative_algsim_score_abs > algsim_score_abs: continue\n #if alternative_algsim_score_rel > algsim_score_rel: continue\n\n\n if acceptor.phase == intron.donor.phase:\n # get data on this alternative acceptor/donor combination\n test_intron = IntronConnectingOrfs(intron.donor,acceptor,None,intron.orfDonor,intron.orfAcceptor)\n test_intron.assign_bp_and_ppts()\n else:\n test_intron = None\n\n print acceptor, acceptor.pos, \n print alternative_algsim_score_abs,\n print alternative_algsim_score_rel,\n if test_intron:\n print _branchpoint_comparison(intron,test_intron),\n print _polypyrimidinetract_comparison(intron,test_intron)\n else:\n print \"OTHER PHASE\"\n\n\n staI,endI = intron.donor.pos/3, intron.acceptor.pos/3\n staE,endE = prev_exon.acceptor.pos/3, intron.donor.pos/3, \n algsim_intron = sum(array_algsimilarity[staI:endI])\n algsim_prev_exon = sum(array_algsimilarity[staE:endE])\n algsim_score_abs = float(algsim_intron)/algsim_prev_exon\n algsim_score_rel = float(algsim_intron)/algsim_prev_exon * (float(endE-staE)/float(endI-staI))\n\n print intron.donor, intron.donor.pos, algsim_score_abs, algsim_score_rel\n print [ d.pos for d in intron.orfDonor._donor_sites ]\n\n for donor in intron.orfDonor._donor_sites:\n if intron.acceptor.pos - donor.pos < MIN_INTRON_NT_LENGTH: continue\n if donor.pos == intron.donor.pos: continue\n if donor.pos not in correct_donor_range: continue\n if _finetune_splicesite_comparison(intron.donor,donor) == False: continue\n\n # if here, generate stats\n staI,endI = donor.pos/3, intron.acceptor.pos/3\n algsim_intron_pos = sum(array_algsimilarity[staI:endI])\n staE,endE = prev_exon.acceptor.pos/3, donor.pos/3\n algsim_prev_exon_pos = sum(array_algsimilarity[staE:endE]) \n # check if no similarity left of exon; avoid ZeroDivisionError\n if algsim_prev_exon_pos == 0: continue\n\n # calculate score ratios\n alternative_algsim_score_abs = float(algsim_intron_pos)/algsim_prev_exon_pos\n alternative_algsim_score_rel = alternative_algsim_score_abs * (float(endE-staE)/float(endI-staI))\n\n # test if exon signal is an improvement\n #if alternative_algsim_score_abs > algsim_score_abs: continue\n #if alternative_algsim_score_rel > algsim_score_rel: continue\n\n if donor.phase == intron.acceptor.phase:\n # get data on this alternative acceptor/donor combination\n test_intron = IntronConnectingOrfs(donor,intron.acceptor,None,intron.orfDonor,intron.orfAcceptor)\n test_intron.assign_bp_and_ppts()\n else:\n test_intron = None\n\n print donor, donor.pos, \n print alternative_algsim_score_abs,\n print alternative_algsim_score_rel,\n if test_intron:\n print _branchpoint_comparison(intron,test_intron),\n print _polypyrimidinetract_comparison(intron,test_intron)\n else:\n print \"OTHER PHASE\"\n\n\n #for position in range(intron.acceptor.pos,intron.orfAcceptor.start,-10):\n # staI,endI = intron.donor.pos/3, position/3\n # if endI <= staI: break\n # algsim_intron_pos = sum(array_algsimilarity[staI:endI])\n # staE,endE = position/3, next_exon.donor.pos/3\n # algsim_next_exon_pos = sum(array_algsimilarity[staE:endE]) \n # print position, scoreA, scoreB,\n # print float(algsim_intron_pos)/algsim_next_exon_pos,\n # print float(algsim_intron_pos)/algsim_next_exon_pos * (float(endE-staE)/float(endI-staI))", "def evolve(self):\n self.generation = 0\n start_time = time.time()\n\n # while the termination criteria is not satisfied, makes another generation\n while not self.termination_criteria.satisfied(self.generation, time.time()-start_time, self.population):\n self.generation += 1\n #print str(self.generation)\n next_generation = []\n\n if self.elitism:\n # Keeps the 10% best individuals\n best_individuals = heapq.nsmallest(int(0.1*self.population_size), self.population, lambda individual: individual.get_fitness())\n next_generation += copy.deepcopy(best_individuals)\n\n # select genetic operation probabilistically\n # this is a roulette wheel selection\n operations = numpy.random.choice(['reproduction', 'crossover', 'mutation'], size=self.population_size, p=[self.reproduction, self.crossover, self.mutation]).tolist()\n individuals = numpy.random.choice(self.population, p=self.normalized_fitness, size=2*self.population_size, replace=True).tolist()\n\n while len(next_generation) < self.population_size:\n operation = operations.pop()\n individual = individuals.pop()\n individual.get_fitness() # enforce fitness calculation\n\n if operation == 'reproduction':\n next_generation.append(individual)\n elif operation == 'crossover':\n individual2 = individuals.pop()\n individual2.get_fitness() # enforce fitness calculation\n individual1, individual2 = individual.crossover(individual2)\n next_generation.append(individual1)\n next_generation.append(individual2)\n elif operation == 'mutation':\n individual1 = individual.mutate()\n next_generation.append(individual1)\n\n self.population = next_generation\n self.population_fitness = numpy.asarray(map(lambda individual: individual.get_fitness(), self.population))\n most_negative = self.population_fitness.min()\n self.normalized_fitness = numpy.asarray(map(lambda fitness: 1/math.pow(fitness+numpy.absolute(most_negative)+1, 1), self.population_fitness))\n s = float(self.normalized_fitness.sum())\n self.normalized_fitness = numpy.asarray(map(lambda fitness: fitness/s, self.normalized_fitness))\n\n mean = numpy.mean(self.population_fitness)\n std = numpy.std(self.population_fitness)\n min = self.population_fitness.min()\n\n info_mean = pandas.DataFrame([[self.generation, mean, min, std]], columns=[\"generation\", \"mean\", \"min\", \"std\"])\n self.generation_info = self.generation_info.append(info_mean, ignore_index=True)", "def gene_finder(dna):\n threshold = longest_ORF_noncoding(dna, 1500)\n l = []\n for i in find_all_ORFs_both_strands(dna):\n \tif len(i)>=threshold:\n \t\tl.append(coding_strand_to_AA(i))\n print l\n return l", "def solve(self):\n\n n = 0\n\n # Keep iterating while incrementing the allowed combination length until\n # a combination that can accommodate the defined load has been found.\n while True:\n n += 1\n combos = self.fit_n_bins(n=n)\n if combos:\n return combos", "def get_filtered_probes(seqdf, escores, models, mutate_cutoff, mutate_gap,\n egaps, thresholds, proteins, colors,\n generate_plots=False, spcomb=[(0, 0)], analysis_path=\"\",\n mode=\"custom\", predict_flanks=True, flank_len=0,\n key_colname=\"key\",\n show_model_flanks=False, get_complete_mutated=True,\n primer=\"\", max_mutate_count=2):\n filtered_probes = []\n # iterate through each site num and peak len combination\n for comb in spcomb:\n # get escore and model predictions for each protein\n es_preds = {}\n esplots = {}\n model_preds = {}\n model_plots = {}\n sitenum = comb[0]\n peaklen = comb[1]\n\n # get rows with the current sitenum and peaklen if specified\n if sitenum != 0 and peaklen != 0:\n df = seqdf.loc[(seqdf[\"sites_in_peak\"] == sitenum) & (seqdf[\"peaklen\"] == peaklen)]\n # otherwise use all rows\n else:\n df = seqdf\n # initialize escore and model objects for each protein\n for protein in proteins:\n protein_num = proteins.index(protein)\n es_preds[protein] = escores[protein].predict_sequences(df, key_colname=key_colname)\n esplots[protein] = escores[protein].make_plot_data(es_preds[protein], color=colors[protein_num][0])\n\n model_preds[protein] = models[protein].predict_sequences(df,\n key_colname=key_colname,\n predict_flanks=predict_flanks,\n flank_len=flank_len)\n model_plots[protein] = models[protein].make_plot_data(model_preds[protein],\n color=colors[protein_num][1],\n show_model_flanks=show_model_flanks)\n\n # Generate plots\n if generate_plots:\n sp = SitesPlotter()\n # if need to plot, uncomment this\n sp.plot_seq_combine([esplots, model_plots],\n filepath=\"%s/sitesplot_d%d_p%d.pdf\" %\n (analysis_path, sitenum, peaklen))\n\n # get filtered sequences\n filtered_seqs = {}\n flanks = {}\n print(\"Site filtering...\")\n print(\"Number of sites before mutating:\", len(es_preds[proteins[0]]))\n\n # get sequences with 2 significant binding sites\n sites_mutated = 0\n sites_removed = 0\n failed_mutations = 0\n for key in es_preds[proteins[0]]:\n curr_es_preds = {}\n curr_model_preds = {}\n for protein in proteins:\n curr_es_preds[protein] = es_preds[protein][key]\n curr_model_preds[protein] = model_preds[protein][key]\n #print(key,\"asd\",curr_model_preds[\"ets1\"])\n bs = Sequence(curr_es_preds, curr_model_preds, proteins=proteins,\n escore_cutoff=mutate_cutoff, escore_gap=mutate_gap,\n pbmescores=escores)\n ### print(key, bs.is_valid())\n if bs.is_valid():\n filtered_seqs[key] = bs\n # TODO: move all print statements to a log file\n # print(\"Number of sites mutated:\", sites_mutated)\n # print(\"Number of failed mutations:\", failed_mutations)\n # print(\"Number of sites removed:\", sites_removed)\n print(\"Number of sites after filtering:\", len(filtered_seqs))\n\n print(\"Creating m1,m2,m3 sequences...\")\n # for each of the filtered sequence, create m1,m2,m3 sequences\n seqdict = {}\n funcdict = {}\n for key in filtered_seqs:\n # Visualization part\n seqdict[\"%s-wt\" % key] = filtered_seqs[key].sequence\n # current binding site object\n bs = filtered_seqs[key]\n # get m1,m2,m3 for each wt\n for idx, mut in enumerate([[0], [1], [0, 1]]):\n # here we mutate on the first, second, and both sites\n # mut is the index of the site to abolish\n to_remove = bs.remove_pos(mut)\n mutseq = bs.abolish_sites(to_remove, mode=\"to_eliminate\",\n escore_threshold=mutate_cutoff)\n seqdict[\"%s-m%d\" % (key, idx + 1)] = mutseq.sequence\n funcdict[\"%s-m%d\" % (key, idx + 1)] = mutseq.plot_functions\n\n # get sequences that pass given escore gap and threshold combination\n for e in list(itertools.product(egaps, thresholds)):\n egapthres = e[0]\n ecutoff = e[1]\n\n # check that wt, m1, m2, m3 are valid\n if coopfilter.check_all_seqs(seqdict[\"%s-wt\" % key],\n seqdict[\"%s-m1\" % key],\n seqdict[\"%s-m2\" % key],\n seqdict[\"%s-m3\" % key],\n filtered_seqs[key].get_sites_dict(),\n escores,\n escore_cutoff=ecutoff,\n escore_gap=egapthres,\n get_complete_mutated=get_complete_mutated):\n bsites_dict = filtered_seqs[key].get_sites_dict()\n lst = [seqdict[\"%s-wt\" % key], seqdict[\"%s-m1\" % key], seqdict[\"%s-m2\" % key],\n seqdict[\"%s-m3\" % key]]\n lst, successful = clean_junctions(seqlst=lst,\n proteins=proteins,\n escores=escores,\n models=models,\n mutate_cutoff=mutate_cutoff,\n mutate_gap=mutate_gap,\n primer=\"GTCTTGATTCGCTTGACGCTGCTG\",\n max_mutate_count=max_mutate_count)\n if successful:\n # replace seqdict with the new sequences\n seqdict[\"%s-wt\" % key] = lst[0]\n seqdict[\"%s-m1\" % key] = lst[1]\n seqdict[\"%s-m2\" % key] = lst[2]\n seqdict[\"%s-m3\" % key] = lst[3]\n filtered_probes.append({\"key\": key,\n \"wt\": seqdict[\"%s-wt\" % key],\n \"m1\": seqdict[\"%s-m1\" % key],\n \"m2\": seqdict[\"%s-m2\" % key],\n \"m3\": seqdict[\"%s-m3\" % key],\n \"tf1\": bsites_dict[\"protein_1\"],\n \"tf2\": bsites_dict[\"protein_2\"],\n \"core1_start\": bsites_dict[\"core_start_1\"],\n \"core1_mid\": bsites_dict[\"core_mid_1\"],\n \"core1_end\": bsites_dict[\"core_end_1\"],\n \"core1_pref\": bsites_dict[\"score_1\"],\n \"core2_start\": bsites_dict[\"core_start_2\"],\n \"core2_mid\": bsites_dict[\"core_mid_2\"],\n \"core2_end\": bsites_dict[\"core_end_2\"],\n \"core2_pref\": bsites_dict[\"score_2\"],\n \"ecutoff\": ecutoff,\n \"egapthres\": egapthres,\n \"distance\": filtered_seqs[key].get_sites_dist(),\n \"sites_in_peak\": sitenum,\n \"peak_length\": peaklen\n })\n break # the sequence passes the filtering check, so stop\n\n # generate plots of wt, m1, m2, m3\n if generate_plots:\n filtered_es_preds = {}\n filtered_esplots = {}\n filtered_model_preds = {}\n filtered_model_plots = {}\n for protein in proteins:\n protein_num = proteins.index(protein)\n filtered_es_preds[protein] = escores[protein].predict_sequences(seqdict, key_colname=\"key\")\n filtered_esplots[protein] = escores[protein].make_plot_data(filtered_es_preds[protein], color=colors[protein_num][0])\n\n filtered_model_preds[protein] = models[protein].predict_sequences(seqdict,\n key_colname=\"key\",\n predict_flanks=predict_flanks)\n filtered_model_plots[protein] = models[protein].make_plot_data(filtered_model_preds[protein],\n color=colors[protein_num][1],\n show_model_flanks=show_model_flanks)\n sp.plot_seq_combine([filtered_esplots, filtered_model_plots],\n filepath=\"%splot_%s_d%d_p%d.pdf\" % (analysis_path, mode, sitenum, peaklen))\n\n return filtered_probes", "def ffa(items_list, bin_capacity):\n bins =[]\n randomised_np_list = np.random.permutation(items_list) # list containing initial items in a random order\n items_list = randomised_np_list.tolist() \n \n for item in items_list:\n # foeach item we search if there's an open bin where it can fit\n for bin in bins:\n if bin.total_weight + item <= bin_capacity: #if it fits\n bin.add_item(item) #we add the item in the bin\n break\n else:\n # there is no open bin where the item can fit\n #so we open a new bin and add the item in it\n bin = Bin()\n bin.add_item(item)\n bins.append(bin)\n\n return bins", "def performChipSeq( sequences=[], spEnergies=[], numCells=100000, depth=100, ampRatio=1000, pExt=1.0, pcrCycles=15, bgEnergy=1, chemicalPotential=0, secondTFspEnergies=[], secondTFchemicalPotential=0, chromAccessibility=[], secondTFintEnergies=[], indirectLocations=[], indirectSequences=[], numFPlocations=0 ):\n N = len( spEnergies )\n pExtChip = pExt\n pExtControl = pExt\n\n numChipReads = N * depth\n numControlReads = N * depth\n\n bgEnergy = makeArray( bgEnergy, N )\n pAmp = np.round( np.power(ampRatio,1.0/pcrCycles) - 1, 2 )\n pAmp = np.maximum( 0.01, pAmp )\n pAmp = np.minimum( pAmp, 0.99 )\n\n table = gbt.GenomeBindingTable( sequences, spEnergies,\n bgEnergy, chemicalPotential, numCells,\n secondTFspEnergies=secondTFspEnergies,\n secondTFchemicalPotential=secondTFchemicalPotential,\n secondTFintEnergies=secondTFintEnergies,\n indirectLocations=indirectLocations,\n chromAccessibility=chromAccessibility )\n\n pExtControl = makeArray( pExtControl, N )\n pExtChip = makeArray( pExtChip, N )\n fragExtract = Frag.FragExtract( pExtControl, pExtChip, table )\n pAmp = makeArray( pAmp, N )\n pcrObj = ChipSeq.PCR( pcrCycles, pAmp )\n\n chipSeq = ChipSeq.ChipSeq( table, fragExtract, pcrObj, \n nControlReads=numControlReads,\n nChipReads=numChipReads )\n \n genome = table.locations.merge( chipSeq.readsTable )\n genome = genome.merge( chipSeq.amplifiedTable )\n genome = genome.merge( fragExtract.extractedTable )\n genome.loc[:,'ratio'] = genome.eval('unique_chip_reads/unique_control_reads')\n\n if numFPlocations > 0:\n genome.loc[:,'ratio'] = genome.eval('unique_chip_reads/unique_control_reads')\n genome.loc[(N-numFPlocations):,'binding'] = 'false-positive' \n\n return genome", "def probability_distribution(self):\n if not len(self.arms):\n return []\n\n possibility_of_pulling_others = self.epsilon / len(self.arms)\n possibility_of_pulling_best_arm = 1 - self.epsilon + possibility_of_pulling_others\n\n def arm_probability(arm):\n if arm != self.best_arm:\n return possibility_of_pulling_others\n else:\n return possibility_of_pulling_best_arm\n\n return [arm_probability(arm) for arm in self.arms]", "def gene_finder(dna):\n threshold = longest_ORF_noncoding(dna, 1500)\n orfs = find_all_ORFs_both_strands(dna)\n list1 = []\n for orf in orfs:\n if len(orf) > threshold:\n list1.append(coding_strand_to_AA(orf))\n return list1", "def __generate_random_gene_sequence(self):\n genes = []\n for j in range(self.chromosome_size):\n genes.append(random.choice(self.gene_pool))\n\n return genes", "def finetune_acceptor_boundaries(abfgp_genemodel,introndata,\n array_algpresence,array_algsimilarity,verbose=True):\n\n # Global Variable Imports\n from settings.genestructure import MIN_INTRON_NT_LENGTH\n FINETUNE_ACCEPTOR_NT_OFFSET = 18\n\n # list with adjusted boundaries\n refined_boundaries = []\n\n # recreate lists of ABGFP exons & introns\n abfgp_exons = [ abfgp_genemodel[pos] for pos in range(0,len(abfgp_genemodel),2) ]\n abfgp_introns = [ abfgp_genemodel[pos] for pos in range(1,len(abfgp_genemodel),2) ]\n\n for intron_pos in range(0,len(abfgp_introns)):\n intron = abfgp_introns[intron_pos]\n if not intron: continue\n if intron.__class__.__name__ == 'SequenceErrorConnectingOrfs': continue\n\n # assign branchpoint in current intron\n intron.assign_bp_and_ppts()\n\n has_been_printed = False\n\n # list of alternatives & associated scores\n alternatives = []\n finetune_range = range(intron.acceptor.pos-FINETUNE_ACCEPTOR_NT_OFFSET,\n intron.acceptor.pos+FINETUNE_ACCEPTOR_NT_OFFSET+1,3)\n\n for acceptor in intron.orfAcceptor._acceptor_sites:\n if acceptor.pos != intron.acceptor.pos and\\\n acceptor.pos in finetune_range:\n # get the next exon (3'of this intron)\n next_exon = abfgp_exons[intron_pos+1]\n if not has_been_printed:\n has_been_printed = True\n ############################################################\n if verbose: print \"FINETUNING ACCEPTOR::\", intron\n ############################################################\n\n # get data on this alternative acceptor position\n test_intron = IntronConnectingOrfs(intron.donor,acceptor,None,intron.orfDonor,intron.orfAcceptor)\n test_intron.assign_bp_and_ppts()\n\n # test if refinement will result in a long enough intron\n if test_intron.length < MIN_INTRON_NT_LENGTH: continue\n\n scorelist = []\n # score 1: is acceptor.pssm_score `higher`?\n scorelist.append( _finetune_splicesite_comparison(intron.acceptor,test_intron.acceptor) )\n # score 2: branchpoint comparison?\n scorelist.append( _branchpoint_comparison(intron,test_intron) )\n # score 3: ppt comparison?\n scorelist.append( _polypyrimidinetract_comparison(intron,test_intron) )\n # score 4: is algsimilarity ratio increased (==better)?\n scorelist.append( _algsimilarity_comparison(intron,test_intron,None,next_exon,array_algsimilarity) )\n\n # evaluate scorelist; improved intron boundary or not?\n # use acceptor, branchpoint & ppt, do *NOT* use algsim score\n if scorelist[0:3].count(False) == 0 and scorelist[0:3].count(True) >= 1:\n alternatives.append( ( acceptor, scorelist ) )\n is_accepted = True\n else:\n is_accepted = False\n\n ################################################################\n if verbose:\n print \"alternative:\", acceptor,\n print intron.acceptor.pos - acceptor.pos, scorelist,\n print is_accepted, \"BP:\",\n print intron.get_branchpoint_nt_distance(),\n print \"alt:\",\n print test_intron.get_branchpoint_nt_distance()\n ################################################################\n\n # now evaluate the alternatived and take the best one\n if not alternatives:\n continue\n elif len(alternatives) == 1:\n refined_boundaries.append( ( intron.acceptor, alternatives[0][0] ) )\n else:\n # multiple! again, take the *best* one\n pass\n\n # return list of refined_boundaries\n return refined_boundaries", "def cutoff_frequencies(n_e, B, gamma=1.):\n m_e = gamma * cgs.me\n m_i = cgs.mp\n n_i = n_e\n\n om_pe = omega_plasma(n_e, m_e)\n om_pi = omega_plasma(n_i, m_i)\n om_ce = omega_cyclotron(-1, B, m_e)\n om_ci = omega_cyclotron(+1, B, m_i)\n\n cutoffs = [np.sqrt(om_pe**2 + om_pi**2)] # P = 0 cutoff is trivial.\n\n A = 1.\n B = -(om_ce + om_ci) # this is the L = 0 cutoff; we're destroying the magnetic field variable\n C = om_ce * om_ci - om_pe**2 - om_pi**2\n\n if 4 * A * C > B**2:\n return np.array(cutoffs) # no other valid solutions.\n\n # Between R and L and the +/- in the quadratic equations, there are four\n # possible solutions, two of which are negations of the other, so there\n # are always two nonnegative solutions. If RHS == 0 they're the same\n # number, though. `B` and `RHS` as we've defined them are always\n # nonnegative.\n\n prefactor = 1. / (2 * A)\n rhs = np.sqrt(B**2 - 4 * A * C)\n\n cutoffs.append(prefactor * (B + rhs))\n\n if rhs != 0.:\n if rhs > B:\n cutoffs.append(prefactor * (rhs - B))\n else:\n cutoffs.append(prefactor * (B - rhs))\n\n return np.array(sorted(cutoffs))", "def generate(self):\n\n minEig = -1\n attempts = 0\n\n while minEig <= 0:\n p = np.zeros((self.n_combi, self.signum))\n sigma_signals = np.zeros((self.n_combi, self.signum))\n\n corr_samples = []\n for j in range(self.corrnum):\n t = random.sample(list(range(self.n_sets)), int(self.tot_corr[j]))\n corr_samples.append(t)\n t1 = ismember(self.x_corrs, t)\n t2 = t1.sum(axis=1) == 2\n\n temp = self.corr_means[j] + self.corr_std[j] * np.random.randn(t2.sum(), 1)\n corr_arr = [0] * len(t2)\n idx = 0\n for k in range(len(t2)):\n if t2[k] == 1:\n p[k, j] = max(min(temp[idx], 1), 0)\n sigma_signals[k, j] = self.sigmad\n idx += 1\n else:\n p[k, j] = 0\n sigma_signals[k, j] = self.sigmaf\n\n if self.corrnum < self.signum:\n sigma_signals[:, self.corrnum: self.signum] = self.sigmaf * np.ones(\n (self.n_combi, self.signum - self.corrnum))\n # minEig = 1\n\n R = self.generateBlockCorrelationMatrix(sigma_signals, p)\n\n attempts += 1\n e, ev = np.linalg.eig(self.R)\n minEig = np.min(e)\n if attempts > self.maxIters and minEig < 0:\n raise Exception(\"A positive definite correlation matrix could not be found with prescribed correlation \"\n \"structure. Try providing a different correlation structure or reducing the standard \"\n \"deviation\")\n\n return p, sigma_signals, R", "def generate_humanization_cuts(molecule, spots, database = []): \n # Convert amino acids names to number\n positions = []\n indexNameDict = {}\n for i, residue in enumerate(molecule):\n if residue.name in spots:\n positions.append(i)\n indexNameDict[i] = residue.name\n length = len(molecule)\n positionNum = len(positions)\n #print length, positionNum \n allowedNum = 3\n \n if not positionNum-1 in range(allowedNum):\n text = \"The humanization of the sequences only allows no more than 3 residues mutated at one time\"\n raise DeimmunizationError(text)\n \n cuts= []\n \n if positionNum == 1:\n position = positions[0]\n #print \"Position in generate_humanization_cuts function\", position\n if position < 0 or position > length:\n text = \" The provided residue position is out of the molecule residues range\"\n raise DeimmunizationError(text)\n begin, end = determine_begin_end(molecule, position)\n seqs = extract_all_sequences(molecule, begin, end)\n #print \"seqs: \", len(seqs)\n #count = count_total_mutations(seqs, database)\n count = count_total_mutations_cpp(seqs)\n #print \"count: \", count\n iteraction = 0\n for aa in aminoAcids[\"PDB\"]:\n molecule_mut = molecule.duplicate()\n molecule_mut[position].kind = aa\n seqs_mut = extract_all_sequences(molecule_mut, begin, end)\n #mutCount = count_total_mutations(seqs_mut, database)\n mutCount = count_total_mutations_cpp(seqs_mut)\n #print \"seqs_mut: \", len(seqs_mut)\n #print \"mutCount: \", mutCount\n #print iteration\n iteration += 1\n if mutCount > count:\n solution = {}\n solution[indexNameDict[position]] = aa\n cuts.append(solution) \n\n elif positionNum == 2:\n position1 = positions[0]\n position2 = positions[1]\n if position1 < 0 or position1 > length or position2 < 0 or position2 > length:\n text = \" The provided residue position is out of the molecule residues range\"\n raise DeimmunizationError(text)\n if position1 > position2 :\n position1, position2 = position2, position1\n begin_all, end1 = determine_begin_end(molecule, position1)\n begin2, end_all = determine_begin_end(molecule, position2)\n seqs = extract_all_sequences(molecule, begin_all, end_all)\n #count = count_total_mutations(seqs, database)\n count = count_total_mutations_cpp(seqs)\n #print \"seqs: \", len(seqs)\n #print \"count: \", count\n iteration = 0\n for aa1 in aminoAcids[\"PDB\"]:\n molecule1 = molecule.duplicate()\n molecule1[position1].kind = aa1\n for aa2 in aminoAcids[\"PDB\"]:\n molecule2 = molecule1.duplicate()\n molecule2[position2].kind = aa2\n begin_mut_all, end_mut1 = determine_begin_end(molecule, position1)\n begin_mut2, end_mut_all = determine_begin_end(molecule, position2)\n seqs_mut = extract_all_sequences(molecule2, begin_mut_all, end_mut_all)\n #mutCount = count_total_mutations(seqs_mut, database)\n mutCount = count_total_mutations_cpp(seqs_mut)\n #print \"seqs_mut: \", len(seqs_mut)\n #print \"mutCount: \", mutCount\n #print iteration\n iteration += 1\n if mutCount > count:\n solution = {}\n solution[indexNameDict[position1]] = aa1\n solution[indexNameDict[position2]] = aa2\n cuts.append(solution) \n \n elif positionNum == 3:\n position1 = positions[0]\n position2 = positions[1]\n position3 = positions[2]\n if position1 < 0 or position1 > length or position2 < 0 or position2 > length or position3 < 0 or position3 > length:\n text = \" The provided residue positions are out of the molecule residues range\"\n raise DeimmunizationError(text)\n # Make sure position1 < position2 < position3\n indexs = [position1, position2, position3]\n indexs.sort()\n position1 = indexs[0]\n position2 = indexs[1]\n position3 = indexs[2]\n begin_all, end1 = determine_begin_end(molecule, position1)\n begin3, end_all = determine_begin_end(molecule, position3)\n seqs = extract_all_sequences(molecule, begin_all, end_all)\n #count = count_total_mutations(seqs, database)\n count = count_total_mutations_cpp(seqs)\n #print \"seqs: \", len(seqs)\n #print \"count: \", count\n iteration = 0\n for aa1 in aminoAcids[\"PDB\"]:\n molecule1 = molecule.duplicate()\n molecule1[position1].kind = aa1\n for aa2 in aminoAcids[\"PDB\"]:\n molecule2 = molecule1.duplicate()\n molecule2[position2].kind = aa2\n for aa3 in aminoAcids[\"PDB\"]:\n molecule3 = molecule2.duplicate()\n molecule3 = molecule2.duplicate()\n molecule3[position3].kind = aa3\n begin_mut_all, end_mut1 = determine_begin_end(molecule, position1)\n begin_mut3, end_mut_all = determine_begin_end(molecule, position3)\n seqs_mut = extract_all_sequences(molecule2, begin_mut_all, end_mut_all)\n #mutCount = count_total_mutations(seqs_mut, database)\n mutCount = count_total_mutations_cpp(seqs_mut)\n #print \"seqs_mut: \", len(seqs_mut)\n #print \"mutCount: \", mutCount\n #print iteration\n iteration += 1 \n if mutCount > count:\n solution = {}\n solution[indexNameDict[position1]] = aa1\n solution[indexNameDict[position2]] = aa2\n solution[indexNameDict[position3]] = aa3\n cuts.append(solution) \n \n return cuts", "def get_epa_species_list(self, composition_space, constraints, random):\n # get random number of formula units and resulting number of atoms\n reduced_formula = composition_space.endpoints[0].reduced_composition\n num_atoms_in_formula = reduced_formula.num_atoms\n max_num_formulas = int(math.floor(\n self.max_num_atoms/num_atoms_in_formula))\n min_num_formulas = int(math.ceil(\n constraints.min_num_atoms/num_atoms_in_formula))\n # round up the next formula unit if necessary\n if max_num_formulas < min_num_formulas:\n max_num_formulas += 1\n random_num_formulas = random.randint(min_num_formulas,\n max_num_formulas)\n\n # add the right number of each specie\n species = []\n for specie in reduced_formula:\n for _ in range(random_num_formulas*int(reduced_formula[specie])):\n species.append(specie)\n return species", "def gen_energies(n_muons):\r\n pdist, bounds = fit_energylaw()\r\n samples = monte_carlo_sample(pdist, bounds, n_muons)\r\n return samples", "def search_a_for_genetic(env:RailEnv,randomized):\r\n schedules = []\r\n occupancy_map=[[] for i in range(len(env.agents))]\r\n\r\n n_timesteps = np.array([])\r\n state_schedule =[]\r\n conv = StateConverter(env)\r\n # Compute the transition and valid action table\r\n model = convert_to_transition(env, conv)\r\n # Calculate the shortest dist from one state to another state\r\n shortest = all_pairs_shortest_paths(conv.num_states, model[0])\r\n random_order_agent = randomized\r\n print(random_order_agent)\r\n\r\n for i in random_order_agent:\r\n # Compute occupancy map\r\n occupancy_map[i] = compute_map(i, random_order_agent, n_timesteps, state_schedule, conv)\r\n\r\n # Compute schedule for each agent based on the occupancy map\r\n each_schedule = a_star_search(SearchEnv(env,conv,model,shortest,i).get_root_node(),occupancy_map[i])\r\n #print(each_schedule)\r\n schedules.append(each_schedule[0])\r\n state_schedule.append(each_schedule[1])\r\n n_timesteps = np.append(n_timesteps, [len(each_schedule[1])])\r\n\r\n # Combine separate actions into a list\r\n actions = combine(schedules,random_order_agent,int(np.max(n_timesteps)))\r\n\r\n return actions", "def energy_array_uniform(seqs, energetics):\n seqs = seqs.ends.append(seqs.comps)\n return energetics.uniform(\n np.repeat(seqs, seqs.shape[0], 0), np.tile(\n seqs, (seqs.shape[0], 1))).reshape((seqs.shape[0], seqs.shape[0]))", "def create_program(fe: FitnessEvaluator, max_len: int) -> str:\n\n # mut_prob = {\"<\": 0.8, \">\": 0.8, \"+\": 0.6, \"-\": 0.6, \"[\": 0.1, \"]\": 0.1}\n\n # new_population: List[Program] = []\n\n # k = 1000\n # N = 0.5 # N is top percentile for selection process\n\n converges = True\n gen_no = 0\n\n while 1:\n k = 1000 # k represents the initial population size\n gen_no = gen_no + 1\n print(gen_no)\n if gen_no == 100:\n converges = True\n gen_no = 0\n\n # generate initial random, score initial random, add to population\n if converges:\n converges = False\n population: List[Program] = []\n res = generate_random(fe, max_len, k, population)\n if res != \"\":\n # print(\"from RANDOM\")\n return res\n\n new_population: List[Program] = []\n ct = [0]\n\n while ct[0] != k:\n weights = populate_weights(k, population)\n\n population.sort(key=lambda program: program.score)\n\n selected = random.choices(population, weights=weights, k=k//2)\n selected.sort(key=lambda program: program.score)\n\n if bad_average(selected):\n k = 0\n converges = True\n gen_no = False\n break\n\n res = select(new_population, selected, fe, k//2, ct)\n if res != \"\":\n return res\n\n for i in range(k):\n population[i] = new_population[i]", "def fit_gaussian(position, energy, dx=0.005, a=2, b=1.5, db=0.01, tolerance=0.05, max_iterations=1000):\n min_energy, max_energy = min(energy), max(energy)\n x_start, x_range = min(position), max(position) - min(position)\n x_gauss = np.arange(0, x_range, dx)\n f_gauss = np.exp(-a * (x_range / 2 - x_gauss) ** 2 + b)\n delta_energy = abs(max(f_gauss) - max_energy)\n b_direction = np.sign(max_energy - max(f_gauss))\n print('E_WHAM: %.3f | E_GAUSS: %.3f | b_direction: %i' % (max_energy, max(f_gauss), b_direction))\n for i in range(max_iterations):\n b = b + b_direction * db\n f_gauss_trial = np.exp(-a * (x_range / 2 - x_gauss) ** 2 + b)\n delta_energy_trial = abs(max(f_gauss_trial) - max_energy)\n if delta_energy_trial < tolerance:\n f_gauss = f_gauss_trial\n print('Found b value: %.2f with dE: %.3f within tolerance in %i iterations' % (b, delta_energy, i))\n break\n elif delta_energy_trial < delta_energy:\n f_gauss = f_gauss_trial\n delta_energy = delta_energy_trial\n print('Finished fitting. %i iterations | dE: %.3f | b_final: %.2f' % (i, delta_energy, b))\n return (x_gauss + x_start, f_gauss)", "def get_all_relaxed_candidates_after_generation(self, gen):\n q = 'relaxed=1,extinct=0,generation<={0}'\n entries = self.c.select(q.format(gen))\n\n trajs = []\n for v in entries:\n t = self.get_atoms(id=v.id)\n t.info['confid'] = v.gaid\n t.info['relax_id'] = v.id\n trajs.append(t)\n trajs.sort(key=lambda x: get_raw_score(x),\n reverse=True)\n return trajs", "def generate_sequence(\n size: int,\n omissions: int,\n edge_perc: Union[int, float],\n tdef,\n max_iter: int = 500,\n on_diverge: str = \"warn\",\n) -> NDArray[int]:\n _check_type(size, (\"int\",), \"size\")\n _check_type(omissions, (\"int\",), \"omissions\")\n _check_type(edge_perc, (\"numeric\",), \"edge_perc\")\n _check_type(max_iter, (\"int\",), \"max_iter\")\n _check_value(on_diverge, (\"warn\", \"raise\"), \"on_diverge\")\n if size <= 0:\n raise ValueError(\n \"Argument 'size' must be a strictly positive integer. \"\n f\"Provided: '{size}'.\"\n )\n if omissions < 0:\n raise ValueError(\n \"Argument 'omissions' must be a strictly positive integer. \"\n f\"Provided: '{omissions}'.\"\n )\n if not (0 <= edge_perc <= 100):\n raise ValueError(\n \"Argument 'edge_perc' must be a valid percentage between 0 and \"\n f\"100. Provided {edge_perc}%.\"\n )\n if max_iter <= 0:\n raise ValueError(\n \"Argument 'max_iter' must be a strictly positive integer. \"\n f\"Provided: '{max_iter}'.\"\n )\n\n n_edge = math.ceil(edge_perc * size / 100)\n start = [tdef.sound] * n_edge\n\n middle = [tdef.sound] * (size - omissions - 2 * n_edge)\n middle += [tdef.omission] * omissions\n random.shuffle(middle)\n iter_ = 0\n while True:\n groups = [(n, list(group)) for n, group in groupby(middle)]\n\n if all(len(group[1]) == 1 for group in groups if group[0] == tdef.omission):\n converged = True\n break\n\n if max_iter < iter_:\n msg = \"Randomize sequence generation could not converge.\"\n if on_diverge == \"warn\":\n logger.warning(msg)\n converged = False\n else:\n raise RuntimeError(msg)\n break\n\n for i, (n, group) in enumerate(groups):\n if n == tdef.sound or len(group) == 1:\n continue\n\n # find the longest group of TRIGGERS['sound']\n idx = np.argmax([len(g) if n == tdef.sound else 0 for n, g in groups])\n pos_sound = sum(len(g) for k, (_, g) in enumerate(groups) if k < idx)\n pos_sound = pos_sound + len(groups[idx][1]) // 2 # center\n\n # find position of current group\n pos_omission = sum(len(g) for k, (_, g) in enumerate(groups) if k < i)\n\n # swap first element from omissions with center of group of sounds\n middle[pos_sound], middle[pos_omission] = (\n middle[pos_omission],\n middle[pos_sound],\n )\n\n break\n\n iter_ += 1\n\n # sanity-check\n if converged:\n assert all(len(group) == 1 for n, group in groups if n == tdef.omission)\n assert not any(\n middle[i - 1] == middle[i] == tdef.omission for i in range(1, len(middle))\n )\n\n end = [tdef.sound] * n_edge\n return np.array(start + middle + end)", "def get_beads(self):\n phos_atoms,sugar_atoms,base_atoms = [],[],[]\n\n for i,a in enumerate(self.atoms):\n if a is None:\n continue\n if i < 3:\n phos_atoms.append(a)\n elif i < 12:\n sugar_atoms.append(a)\n else:\n base_atoms.append(a)\n\n beads = []\n types = [residue.BeadType.PHOS, residue.BeadType.SUGAR, residue.BeadType.BASE]\n for i,alist in enumerate([phos_atoms,sugar_atoms,base_atoms]):\n if len(alist) > 0:\n beads.append(residue.Bead(util.center(alist), types[i]))\n\n return beads", "def gene_finder(dna):\n threshold = longest_ORF_noncoding(dna, 1500)\n list_of_aminos = []\n list_of_ORFs = find_all_ORFs_both_strands(dna)\n for an_ORF in list_of_ORFs:\n if an_ORF > threshold:\n amino_acid_ORF = coding_strand_to_AA(an_ORF)\n list_of_aminos.append(amino_acid_ORF)\n return list_of_aminos" ]
[ "0.5575298", "0.5412178", "0.54081595", "0.51966965", "0.5137365", "0.50596523", "0.50536424", "0.4986733", "0.4958774", "0.49356183", "0.4860119", "0.48584643", "0.48507887", "0.47952068", "0.47873732", "0.47784394", "0.47781447", "0.47645146", "0.4723615", "0.47215456", "0.47059876", "0.46782893", "0.46746656", "0.4663936", "0.4661615", "0.4657361", "0.46496308", "0.46388745", "0.46344012", "0.46328682" ]
0.60680664
0
Given a number of selecting limiting factors and a list of qualities of the B cells competing, selects LFnum winners based on a Boltzmann energy distribution.
def Boltzchoice(LFnum, energylist, RNs): # transform list to energy values in kT according to experimental # affinities and the energy window allowed by the threshold energylist = cf.y0 + np.array(energylist) * cf.m # calculate norm of initial list Norm = sum([math.exp(-ener) for ener in energylist]) # calculate initial probability vector probs = np.array([math.exp(-ener) / Norm for ener in energylist]) # list to catch indices of selected cells selected = [] # cells to be picked: determined by the lesser of #waiters and #LFs cellpick = min(len(energylist), LFnum) while len(selected) < cellpick: bins = np.cumsum(probs) ind = np.digitize(RNs.getR(), bins) selected.append(ind) # now, set the probability of the selected cell to 0 and renormalise # the remaining probability vector newNorm = Norm - math.exp(-energylist[ind]) probs[ind] = 0 probs = probs * Norm / newNorm Norm = newNorm return selected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_best_waiters(LFnum, cellSK, GCpos, tnow, AgEpitope, mut_list, RNs):\n # determine the indices of cells to be chosen\n selinds = Boltzchoice(LFnum, [cell.affinity for cell in cellSK], RNs)\n\n # put selected cells on one list, rest on another\n select = [cellSK[i] for i in range(len(cellSK)) if i in selinds]\n rest = [cellSK[i] for i in range(len(cellSK)) if i not in selinds]\n\n # divide the selected cells once to have survivors of first division round\n # only, then choose fate for surviving daughters: another division or\n # differeniation. since we are dividing all cells on the list here and they\n # are not added to the waitlist again, pass empty waitlist.\n selected_daughters, mut_list = cell_division([], select, AgEpitope, tnow,\n mut_list, RNs)\n\n # for these viable daughters, decide how many to divide again and how many\n # to differentiate according to the recycle frequency\n div = np.random.binomial(len(selected_daughters), cf.recycle)\n diff = len(selected_daughters) - div\n # mix daughters (twice, don't trust this function so much)\n random.shuffle(selected_daughters)\n random.shuffle(selected_daughters)\n # make events if count > 0\n new_events = []\n if div > 0:\n event_div = (tnow + cf.thelp + 2*cf.tdiv, 'Divide', GCpos,\n selected_daughters[:div])\n new_events.append(event_div)\n if diff > 0:\n # get number of cells that will become memory cells, ignore rest (PCs)\n memdiff = np.random.binomial(diff, (1 - cf.PCexport))\n event_diff = (tnow + cf.thelp + cf.tdiv + cf.tdiff, 'Differentiate',\n GCpos, selected_daughters[div:div + memdiff])\n new_events.append(event_diff)\n\n return rest, new_events, mut_list", "def select_best_chanels():\r\n \r\n \r\n all_paths = [['data_bci\\\\row_data\\\\subject1\\\\'], ['data_bci\\\\row_data\\\\subject2\\\\'],['data_bci\\\\row_data\\\\subject3\\\\']]\r\n\r\n train_subjects = ['01']\r\n test_subject = '02'\r\n freq = 512\r\n\r\n cutoff_beggining = 0\r\n columns_to_read = ['Fp1', 'AF3' ,'F7', 'F3', 'FC1', 'FC5', 'T7', 'C3', 'CP1', 'CP5',\r\n 'P7', 'P3', 'Pz', 'PO3', 'O1', 'Oz', 'O2', 'PO4', 'P4', 'P8', 'CP6',\r\n 'CP2', 'C4', 'T8', 'FC6', 'FC2', 'F4', 'F8', 'AF4', 'Fp2', 'Fz', 'Cz','class']\r\n seq_len = 0\r\n cut_step = 0\r\n num_perseg = freq\r\n num_overlap = int(num_perseg/2)\r\n min_freq=8\r\n max_freq=45\r\n \r\n chanels_rank = rank_chanels()\r\n \r\n result = []\r\n for i in range(1, len(chanels_rank)):\r\n intermidiate_result = []\r\n for path in all_paths:\r\n train_full_data, train_full_data_filtered, train_full_anots, test_full_data, test_full_filtered, test_full_annoations = read_filter(path, train_subjects,test_subject, columns_to_read, cutoff_beggining, seq_len, cut_step)\r\n\r\n train_psd_signals = eval_psd_not_modulated(train_full_data, num_perseg, num_overlap, freq, min_freq, max_freq)\r\n test_psd_signals = eval_psd_not_modulated(test_full_data, num_perseg, num_overlap, freq, min_freq, max_freq) \r\n\r\n train_psd_signals = flatten_data(train_psd_signals[:,:,chanels_rank[:i]])\r\n test_psd_signals = flatten_data(test_psd_signals[:,:,chanels_rank[:i]])\r\n \r\n acc = evalute_subset(train_psd_signals, test_psd_signals, train_full_anots, test_full_annoations)\r\n intermidiate_result.append(acc)\r\n \r\n result.append(intermidiate_result)\r\n #mean_subject_acc = np.array([sum(humans_acc)/len(humans_acc) for humans_acc in result])\r\n #best_idx = np.argmax(mean_subject_acc)\r\n\r\n return result, chanels_rank", "def __init__(\n self, data: pd.DataFrame,\n n_picked: int, n_choices: int,\n vmin: int, vmax: int, n_train: int, hits_to_win: tuple\n ):\n self.data = data\n self.n_picked = n_picked\n self.n_choices = n_choices\n self.vmin = vmin\n self.vmax = vmax\n self.n_train = n_train\n self.hits_to_win = hits_to_win\n \n self.k_results = list(range(n_picked))\n self.run()", "def compute_strategies_given_max_winning_bid(max_winning_bid, bidders):\r\n bid_start_points = [[-1.0] * len(bidder.values) for bidder in bidders]\r\n bid_end_points = [[-1.0] * len(bidder.values) for bidder in bidders]\r\n F_jump_points = [[(max_winning_bid, 1.0)] for _ in range(len(bidders))]\r\n\r\n # current state\r\n is_active = [0] * len(bidders)\r\n remaining_prob = [-1.0] * len(bidders)\r\n cur_bid = max_winning_bid\r\n cur_value_idx = [len(bidder.values) - 1 for bidder in bidders]\r\n\r\n def cur_value(bidder_idx):\r\n if cur_value_idx[bidder_idx] >= 0:\r\n return bidders[bidder_idx].values[cur_value_idx[bidder_idx]]\r\n else:\r\n return None\r\n\r\n def next_candidate():\r\n \"\"\"the next bidder to enter the bidding set\"\"\"\r\n candidate_bidder = -1\r\n candidate_value = -1\r\n for n in range(len(bidders)):\r\n if (is_active[n] == 0 and cur_value(n) is not None\r\n and cur_value(n) > max(candidate_value, cur_bid)):\r\n candidate_value = bidders[n].values[cur_value_idx[n]]\r\n candidate_bidder = n\r\n return candidate_value, candidate_bidder\r\n\r\n while True:\r\n # compute bidding set\r\n max_inactive_value, entering_bidder = next_candidate()\r\n while entering_bidder >= 0:\r\n active_values = [cur_value(j) for j in range(len(bidders)) if is_active[j] == 1]\r\n if ((sum(is_active) < 2 or\r\n h(cur_bid, cur_value(entering_bidder), active_values) >= 0) and\r\n not max_inactive_value > cur_bid > max_inactive_value - 1e-8):\r\n is_active[entering_bidder] = 1\r\n bid_start_points[entering_bidder][cur_value_idx[entering_bidder]] = cur_bid\r\n remaining_prob[entering_bidder] = bidders[entering_bidder].prob[cur_value_idx[entering_bidder]]\r\n max_inactive_value, entering_bidder = next_candidate()\r\n else:\r\n break\r\n\r\n # terminates computation\r\n if sum(is_active) < 2:\r\n for i in range(len(bidders)):\r\n if is_active[i] == 1:\r\n bid_end_points[i][cur_value_idx[i]] = cur_bid\r\n break\r\n\r\n # compute next change point\r\n exiting_criteria = H\r\n exp_exiting_criteria = exp_H\r\n change_points = [-1e8] * len(bidders)\r\n active_values = [cur_value(j) for j in range(len(bidders)) if is_active[j] == 1 and cur_value(j) is not None]\r\n for i in range(len(bidders)):\r\n if cur_value(i) is None:\r\n continue\r\n if is_active[i] == 0:\r\n try:\r\n change_points[i] = optimize.brentq(\r\n lambda x: h(x, cur_value(i), active_values, poly_form=True),\r\n -1e8,\r\n cur_bid)\r\n except ValueError:\r\n change_points[i] = -1e8\r\n else:\r\n if sum(bidders[i].prob[:cur_value_idx[i]]) == 0:\r\n change_points[i] = -1e8\r\n else:\r\n try:\r\n change_points[i] = optimize.brentq(\r\n lambda x: (exiting_criteria(cur_bid, cur_value(i), active_values) -\r\n exiting_criteria(x, cur_value(i), active_values) -\r\n (np.log(sum(bidders[i].prob[:cur_value_idx[i]]) + remaining_prob[i]) -\r\n np.log(sum(bidders[i].prob[:cur_value_idx[i]])))),\r\n -1e8,\r\n cur_bid)\r\n except ValueError:\r\n change_points[i] = -1e8\r\n\r\n # update state\r\n next_change = max(change_points)\r\n changing_bidder = np.argmax(change_points)\r\n for i in range(len(bidders)):\r\n if i == changing_bidder:\r\n continue\r\n if is_active[i] == 1:\r\n remaining_prob[i] = ((sum(bidders[i].prob[:cur_value_idx[i]]) + remaining_prob[i]) /\r\n exp_exiting_criteria(cur_bid, cur_value(i), active_values) *\r\n exp_exiting_criteria(next_change, cur_value(i), active_values) -\r\n sum(bidders[i].prob[:cur_value_idx[i]]))\r\n if np.abs(remaining_prob[i]) <= 1e-8:\r\n F_jump_points[i].append((next_change, sum(bidders[i].prob[:cur_value_idx[i]])))\r\n is_active[i] = 0\r\n remaining_prob[i] = -1.0\r\n bid_end_points[i][cur_value_idx[i]] = next_change\r\n cur_value_idx[i] -= 1\r\n else:\r\n F_jump_points[i].append((next_change, sum(bidders[i].prob[:cur_value_idx[i]]) + remaining_prob[i]))\r\n else:\r\n F_jump_points[i].append((next_change, sum(bidders[i].prob[:cur_value_idx[i] + 1])))\r\n if is_active[changing_bidder] == 0: # entering the bidding set\r\n is_active[changing_bidder] = 1\r\n remaining_prob[changing_bidder] = bidders[entering_bidder].prob[cur_value_idx[entering_bidder]]\r\n bid_start_points[changing_bidder][cur_value_idx[changing_bidder]] = next_change\r\n F_jump_points[changing_bidder].append(\r\n (next_change, sum(bidders[changing_bidder].prob[:cur_value_idx[changing_bidder] + 1])))\r\n else: # exiting the bidding set\r\n is_active[changing_bidder] = 0\r\n remaining_prob[changing_bidder] = -1.0\r\n bid_end_points[changing_bidder][cur_value_idx[changing_bidder]] = next_change\r\n cur_value_idx[changing_bidder] -= 1\r\n F_jump_points[changing_bidder].append(\r\n (next_change, sum(bidders[changing_bidder].prob[:cur_value_idx[changing_bidder] + 1])))\r\n cur_bid = next_change\r\n if cur_bid <= 0.0:\r\n break\r\n solution_state = State(is_active=is_active,\r\n remaining_prob=remaining_prob,\r\n cur_bid=cur_bid,\r\n cur_value_idx=cur_value_idx)\r\n\r\n return bid_start_points, bid_end_points, F_jump_points, solution_state", "def get_winners():\r\n\r\n numbers = []\r\n\r\n # Randomly get 5 winning numbers\r\n for i in range(0, 5):\r\n if not numbers:\r\n number = random.randrange(1, 71)\r\n while number > 25:\r\n number = random.randrange(1, 71)\r\n else:\r\n numbers.append(number)\r\n else:\r\n numbers.append(random.randrange(numbers[i - 1], 71))\r\n\r\n # Randomly get multiplying ball\r\n numbers.append(random.randrange(1, 25))\r\n\r\n return numbers", "def optimal_agent(bandit, iterations):\n\n for i in range(iterations):\n a = bandit.pay_offs.index(max(bandit.pay_offs))\n r = bandit.sample(a)\n yield a, r", "def fit_weibull_models(distribution_values, tailsizes, num_max_fits=50):\n\n weibull_models = []\n\n # loop through the list containing distance values per class\n for i in range(len(distribution_values)):\n # for each class set the initial success to False and number of attempts to 0\n is_valid = False\n count = 0\n\n # If the list contains distance values conduct a fit. If it is empty, e.g. because there is not a single\n # prediction for the corresponding class, continue with the next class. Note that the latter isn't expected for\n # a model that has been trained for even just a short while.\n if isinstance(distribution_values[i], torch.Tensor):\n distribution_values[i] = distribution_values[i].cpu().numpy()\n # weibull model per class\n weibull_models.append(libmr.MR())\n # attempt num_max_fits many fits before aborting\n while is_valid is False and count < num_max_fits:\n # conduct the fit with libmr\n weibull_models[i].fit_high(distribution_values[i], tailsizes[i])\n is_valid = weibull_models[i].is_valid\n count += 1\n if not is_valid:\n print(\"Weibull fit for class \" + str(i) + \" not successful after \" + str(num_max_fits) + \" attempts\")\n return weibull_models, False\n else:\n weibull_models.append([])\n\n return weibull_models, True", "def select_cars(\n\tdf_merged: pd.DataFrame,\n\tdf_prediction: pd.DataFrame,\n\tdemand_vars: list = [\n\t\t\t\t\t'MAKE',\n\t\t\t\t\t'FUELTYPE',\n\t\t\t\t\t'GEARTYPE',\n\t\t\t\t\t'VEHICLEKIND'],\n\tdamage_vars: list = [\n\t\t\t\t\t'DAMAGE_CURRENT',\n\t\t\t\t\t'DAMAGE_MAX',\n\t\t\t\t\t'DAMAGE_COUNT',\n\t\t\t\t\t'DAMAGE_CATEGORY',\n\t\t\t\t\t'DAMAGE_LARGE_CATEGORY'],\n\tN: int = 50\n):\n\tlogger.info(\"Selecting optimal %d cars to sell BSC\" % N)\n\n\t# split datasets\n\tdf_b2c = df_merged[df_merged['SALES_CHANNEL']=='B2C'].reset_index()\n\tdf_target = df_merged[df_merged['SALES_CHANNEL']=='TO BE DETERMINED'].reset_index()\n\n\t# calculate demand rank\n\tdf_target['DEMAND_RANK'] = _demand_rank(df_b2c, df_target, demand_vars)\n\n\t# calculate damage rank\n\tdf_target['DAMAGE_RANK'] = _damage_rank(df_target, damage_vars)\n\n\t# calculate final rank from average between demand and damage ranks\n\tdf_target['RANK'] = df_target[['DEMAND_RANK','DAMAGE_RANK']]\\\n\t\t\t\t\t\t\t\t\t.mean(axis=1).rank(pct=True)\n\n\t# sort by rank and get first N rows\n\tdf_selection = df_target.sort_values('RANK', ascending=False).head(N)\n\n\t# adding sell price predictions\n\tdf_selection = pd.merge(df_selection.drop('SELLPRICE_CAR', axis=1),\n df_prediction[['SELLPRICE_CAR']].reset_index(), how='left')\n\n\treturn df_selection", "def param_selection(df):\n n = df.count()\n numTrees = np.round(np.log10(n) * 100)\n maxDepth = np.round(np.log(n))\n minInstancesPerNode = np.round(np.log10(n) * (np.ceil(n / 500000) + 1))\n #maxBins = np.minimum(80, np.round(500 / np.log(n)))\n subsamplingRate = float(np.where(n > 500000, 0.6, 0.8))\n maxIter = np.round(np.log10(n) * 50)\n\n # minInstancesPerNode\n\n minInstancesPerNode = 200 if minInstancesPerNode > 200 else maxDepth\n minInstancesPerNode = 25 if minInstancesPerNode < 25 else minInstancesPerNode\n\n # maxDepth\n\n maxDepth = 15 if maxDepth > 15 else maxDepth\n maxDepth = 3 if maxDepth < 3 else maxDepth\n\n # maxIter applies to GBT\n\n maxIter = 200 if maxIter > 100 else maxIter\n maxIter = 50 if maxIter < 50 else maxIter\n\n # maxBins set to 32\n\n maxBins = 32\n\n print \"[Info] numTrees: \" + str(numTrees)\n print \"[Info] maxDepth: \" + str(maxDepth)\n print \"[Info] minInstancesPerNode: \" + str(minInstancesPerNode)\n print \"[Info] maxBins: \" + str(maxBins)\n print \"[Info] subsamplingRate: \" + str(subsamplingRate)\n print \"[Info] maxIter: \" + str(maxIter)\n\n return numTrees, maxDepth, minInstancesPerNode, maxBins, subsamplingRate, maxIter", "def choose_split(data,treshold):\n n_features = len(data[0]) - 1 # number of columns\n quest_gain = [] # keep track of the gains and questions\n\n for col in range(1,n_features): # for each feature\n values = set([row[col] for row in data]) # unique values in the column\n for val in values: # for each value\n question = Question(col, val)\n \n # try splitting the dataset\n true_rows, false_rows = partition(data, question)\n\n # Skip this split if it doesn't divide the dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(data, true_rows, false_rows)\n quest_gain.append(Question_gain(gain,question))\n\n possible_question = [] # possible questions to ask\n n_quest_gain = len(quest_gain)\n\n if n_quest_gain == 0:\n return float('Inf'), float('NaN') #\n\n for x in range(n_quest_gain):\n if (quest_gain[x].gain >= treshold):\n possible_question.append(Question_gain(quest_gain[x].gain,quest_gain[x].question))\n \n n_possible_question = len(possible_question)\n if n_possible_question == 0:\n return float('Inf'), float('NaN')\n\n if n_possible_question>=2:\n [i, j] = random.sample(range(0, n_possible_question), 2)\n else:\n i = j = random.randint(0,n_possible_question-1)\n\n if possible_question[i].gain>=possible_question[j].gain:\n return possible_question[i].gain, possible_question[i].question\n else:\n return possible_question[j].gain, possible_question[j].question", "def Bayes5DStats(numIters, numRuns):\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n assert comm.Get_size() == numRuns, \"Please ensure there is one process running per run i.e \" + str(numRuns) + \" processes.\"\n \n problemBounds = {\"Bfield\": choco.uniform(10, 1300), \"T\": choco.uniform(50, 230), \"Btheta\": choco.uniform(0, 90), \"Etheta\": choco.uniform(0, 90), \"Bphi\": choco.uniform(0, 90)}\n\n # Set up the database for the chocolate optimiser.\n connection = choco.SQLiteConnection(\"sqlite:///bayes_5D_\" + str(rank) + \"_db.db\")\n\n if rank == 0:\n timeList = []\n bestFoMList = []\n\n # Define which solver will be used.\n solver = choco.Bayes(connection, problemBounds, utility_function = \"ei\", n_bootstrap = int(np.ceil(numIters/10)), clear_db = True)\n\n # Clear the database. TODO: To do this?\n connection.clear()\n\n # Start timing.\n startTime = time.time()\n bestFoM = 0\n\n # Start optimisation.\n for iteration in range(numIters):\n\n # Make one suggestion.\n try:\n token, nextParams = solver.next()\n except:\n print(\"Error suggesting a new point. Here are the last set of parameters sampled, and it's returned value:\")\n print(str(nextParams))\n print(\"Iteration number: \" + str(iteration))\n continue\n\n # Check what FoM this gives. Go negative as this is a minimisation routine.\n fEval = abs(Fitness5D(**nextParams))\n\n # Update best FoM.\n if fEval > bestFoM:\n bestFoM = fEval\n \n # Tell the optimiser about the result.\n solver.update(token, fEval)\n\n # One run complete.\n timeElapsed = time.time() - startTime\n # Run complete. Send results to main process. Tags are unique identifiers.\n if rank != 0:\n comm.send(timeElapsed, dest = 0, tag = 1)\n comm.send(bestFoM, dest = 0, tag = 2)\n \n # Wait for all the processes to end.\n comm.Barrier()\n \n if rank == 0:\n # Add own data first.\n bestFoMList.append(bestFoM)\n timeList.append(timeElapsed)\n\n for process in range(comm.Get_size() - 1):\n # Get the data.\n individualTime = None\n individualTime = comm.recv(individualTime, source = process + 1, tag = 1)\n\n individualFoM = None\n individualFoM = comm.recv(individualFoM, source = process + 1, tag = 2)\n\n bestFoMList.append(individualFoM)\n timeList.append(individualTime)\n\n avgRuntime = np.average(timeList)\n avgFoM = np.average(bestFoMList)\n avgFoMPerTime = np.average(np.divide(bestFoMList, timeList))\n avgFoMPerIter = np.average(np.divide(bestFoMList, numIters))\n absBestFoM = np.max(bestFoMList)\n\n print(\"Bayesian optimisation 5D testing complete! Here are the stats:\")\n print(\"Average runtime per run (s): \" + str(avgRuntime))\n print(\"Average FoM: \" + str(avgFoM))\n print(\"Average FoM per unit time: \" + str(avgFoMPerTime))\n print(\"Average FoM per unit iteration: \" + str(avgFoMPerIter))\n print(\"Absolute best FoM determined: \" + str(absBestFoM))\n print(\"------------------------------------------------------------------------------------------------------------------\")\n \n return", "def cutoff_frequencies(n_e, B, gamma=1.):\n m_e = gamma * cgs.me\n m_i = cgs.mp\n n_i = n_e\n\n om_pe = omega_plasma(n_e, m_e)\n om_pi = omega_plasma(n_i, m_i)\n om_ce = omega_cyclotron(-1, B, m_e)\n om_ci = omega_cyclotron(+1, B, m_i)\n\n cutoffs = [np.sqrt(om_pe**2 + om_pi**2)] # P = 0 cutoff is trivial.\n\n A = 1.\n B = -(om_ce + om_ci) # this is the L = 0 cutoff; we're destroying the magnetic field variable\n C = om_ce * om_ci - om_pe**2 - om_pi**2\n\n if 4 * A * C > B**2:\n return np.array(cutoffs) # no other valid solutions.\n\n # Between R and L and the +/- in the quadratic equations, there are four\n # possible solutions, two of which are negations of the other, so there\n # are always two nonnegative solutions. If RHS == 0 they're the same\n # number, though. `B` and `RHS` as we've defined them are always\n # nonnegative.\n\n prefactor = 1. / (2 * A)\n rhs = np.sqrt(B**2 - 4 * A * C)\n\n cutoffs.append(prefactor * (B + rhs))\n\n if rhs != 0.:\n if rhs > B:\n cutoffs.append(prefactor * (rhs - B))\n else:\n cutoffs.append(prefactor * (B - rhs))\n\n return np.array(sorted(cutoffs))", "def _compute_bn(self, lvl):\n bn = [0] # number of samples crossing the left/right boundary\n for n in range(lvl):\n # 1. down-sampling of N samples by the factor scl gives (N-1)//scl + 1 samples\n # 2. bn[-1]+M-1 is the number of samples acrossing the left/right boundary, with M being the number of freqeuncies\n # => hence after the downsampling the number of boundary crossing samples is:\n bn.append((bn[-1]+self.nfreq-2)//self.scaling+1)\n bn.append(bn[-1]) # repeat the value of the coarsest scale for the approximation coefficient\n return bn[1:][::-1]", "def get_low_binder(RNs, AgEpitope, ntest):\n E_collect = []\n while len(E_collect) < ntest:\n ab = Ab_seq(RNs)\n Emax = E_best(ab, AgEpitope)\n if Emax >= cf.thr:\n E_collect.append(Emax)\n return min(E_collect)", "def roulette_wheel_selection(fitness, n):\n\n # calculate standard propabilites in regard to fitness scores\n sum_of_fitness = np.sum(fitness)\n\n # since smaller is better, inverse it\n probabilities = [fit/sum_of_fitness for fit in fitness]\n\n # build cummulative probabilites\n cum_propabilites = [sum(probabilities[:i]) for i in range(1, len(probabilities)+1)]\n\n # list of indexes of selected members\n indx_list = []\n\n while len(indx_list) != n:\n\n # generate random number pepresenting the ball in the roulette\n r = random.uniform(0, 1)\n\n for indx, prob in enumerate(cum_propabilites):\n # we found the place the ball fell down\n if r <= prob:\n indx_list.append(indx)\n break\n\n return indx_list", "def choose_features(nof_features, max_features):\n features_indices = numpy.arange(nof_features)\n #numpy.random.seed()\n #features_chosen = numpy.random.choice(features_indices, size=max_features, replace = True)\n features_chosen = numpy.random.choice(features_indices,\n size=nof_features,\n replace=False)\n\n #print(features_chosen)\n return features_chosen", "def get_W_L_sets(vote_count, n_winners):\n tuples = list(vote_count.items())\n sorted_tuples = sorted(tuples, key=operator.itemgetter(1), reverse=True)\n W = [c[0] for c in sorted_tuples[:n_winners]]\n L = [c[0] for c in sorted_tuples[n_winners:]]\n return W, L", "def solve(self):\n\n n = 0\n\n # Keep iterating while incrementing the allowed combination length until\n # a combination that can accommodate the defined load has been found.\n while True:\n n += 1\n combos = self.fit_n_bins(n=n)\n if combos:\n return combos", "def play_beergame(self, ntrials=1000, get_output=True):\n pdata = np.zeros((ntrials+1, self.nact))\n pdata[0, :] = np.array([1/self.nact]*self.nact)\n qdata = np.zeros_like(pdata)\n self.choices = []\n self.feedback = []\n\n for t in range(ntrials):\n\n # select bandit arm (action) from state space \n act_i = np.random.choice(self.actions, p=pdata[t, :])\n \n # get reward for current action \n r = self.beergame.get_reward(act_i)\n \n if t>0: \n # update value of selected action\n qdata[t+1, act_i] = update_Qi(qdata[t-1, self.last], qdata[t, act_i], r, self.alpha, self.gamma)\n \n # broadcast old q-values for unchosen actions\n for act_j in range(self.nact):\n if act_j == act_i: continue \n qdata[t+1, act_j] = qdata[t, act_j]\n \n self.last = act_i\n # update action selection probabilities and store data\n pdata[t+1, :] = update_Pall(qdata[t+1, :], self.beta)\n self.choices.append(act_i)\n self.feedback.append(r)\n\n self.pdata = pdata[1:, :]\n self.qdata = qdata[1:, :]\n self.make_output_df()\n\n if get_output:\n return self.data.copy()", "def driver() :\n\t\n\t#The lists for the first name, last name and the favorite number for all employees.\n\tfname_list = [] \n\tlname_list = []\n\tfavorite_number = []\n\n\t#The list used in calculating the frequency of the numbers at a specific slot. \n\tfreq = [[] for _ in xrange(6)]\n\n\t#The result list containing the Powerball winning number.\n\tres=[]\n\tinput_choice=\"\"\n\n\t# Taking the user inputs until N or n is entered as input.\n\twhile 1 : \n\t\tinput_choice = raw_input(\"Enter employee info? [Y/N] \") \n\n\t\t# if the user inputs lowercase y or n it would still work.\n\t\tif input_choice in ['y','Y']:\n\n\t\t\tdata_update(fname_list,lname_list,favorite_number,freq)\n\n\t\telif input_choice in ['n','N'] :\n\t\t\t\n\t\t\tbreak\n\n\t\telse :\n\t\t\tprint \"Invalid Choice\"\n\t\t\tcontinue\t\t\n\n\tn_employees = len(fname_list)\n\tcounter = 0\n\tprint \"\\n\\n\"\n\n\t#Printing the user names and their favorite numbers to stdout.\n\twhile counter < n_employees :\n\t\tprint fname_list[counter] + \" \" + lname_list[counter] + \" \" + \" \".join(map(str,favorite_number[counter][:-1])) + \" Powerball: \" + str(favorite_number[counter][5])\n\t\tcounter += 1\n\n\tprint \" \\n\\n \"\n\n\t#If No employee info was entered.\n\tif n_employees==0:\n\t\tprint \"No Employee Found\"\n\n\t#Calculating the numbers with max frequency in each slot. If not unique, a random number would be used.\n\telse : \n\t\titr = 0\n\t\twhile itr < 6 :\n\t\t\tcount=Counter(freq[itr])\n\n\t\t\t#There is just one number to choose from in this slot. \n\t\t\tif len(count)==1 :\n\t\t\t\tres.append(count.most_common()[0][0])\n\n\t\t\t#There is no unique number with max frequency.\t\n\t\t\telif count.most_common()[0][1] == count.most_common()[1][1] :\n\t\t\t\tif itr < 5 :\n\t\t\t\t\tres.append(random.randint(1,69))\n\t\t\t\telse :\n\t\t\t\t\tres.append(random.randint(1,26))\n\n\t\t\t#The number with max frequency is unique. \t\n\t\t\telse :\n\t\t\t\tres.append(count.most_common()[0][0])\n\n\t\t\titr += 1\n\n\t\t#Printing out the winning Powerball number.\n\t\tprint \"Powerball winning number:\\n\"\n\t\tprint \" \".join(map(str,res[:-1])) + \" Powerball: \" + str(res[5])\n\t\n\treturn", "def get_n_best(self):\n pass", "def ring_winners(b, players):\n winners = []\n winrank = ''\n s = [evaluator.evaluate(b, p) for p in players]\n for i, rank in enumerate(s):\n if rank == min(s):\n winners.append(i)\n winrank = evaluator.class_to_string(evaluator.get_rank_class(rank))\n return [winners, winrank]", "def nb_feature_select(self,estimator, X, y,cv_kfold=5):\n\n try:\n selector = RFECV(estimator, step=1,cv=cv_kfold, min_features_to_select=round((len(X.columns)/2)))\n selector = selector.fit(X,y)\n support = selector.support_\n selected = []\n for a, s in zip(X.columns, support):\n if(s):\n selected.append(a)\n return selected\n except Exception as e:\n print(e)", "def find_best_k(data, anots, neibhours_range):\r\n \r\n best_k = 0\r\n best_acc = 0\r\n for n_neighbors in neibhours_range:\r\n accur = iterate_over_chanels(data, anots, n_neighbors)\r\n mean_acc = accur.mean()\r\n if mean_acc > best_acc:\r\n best_acc = mean_acc\r\n best_k = n_neighbors\r\n return best_k", "def determine_offer_tuple_NN(nn, ti, ca, eps, eps_ran, revenues, A, arrival_probabilities, preference_weights, no_purchase_preference):\n\n\n # opportunity costs\n opp_costs = 1.0*np.zeros_like(revenues)\n for pro in products:\n if functools.reduce(operator.or_, ca - A[:, pro] < 0):\n # set opp costs to infty if not enough capacity for product\n opp_costs[pro] = np.inf\n else:\n # calculate opportunity costs via Bellman: V(t+1, c) - V(t+1, c-A[i])\n t_df = pd.DataFrame([np.zeros(T + 1)] * 1)\n t_df.columns = [\"t\" + str(i) for i in t_df.columns]\n t_df.iloc[0, ti+1] = 1\n\n cs_unsold = {}\n for h in resources:\n c_df = pd.DataFrame([np.zeros(T +1)] * 1)\n c_df.columns = [\"c-h\" + str(h) + \"-t\" + str(i) for i in c_df.columns]\n c_df.iloc[0, ti+1] = ca[h]\n cs_unsold[h] = c_df\n\n cs_sold = {}\n for h in resources:\n c_df = pd.DataFrame([np.zeros(T + 1)] * 1)\n c_df.columns = [\"c-h\" + str(h) + \"-t\" + str(i) for i in c_df.columns]\n c_df.iloc[0, ti + 1] = ca[h] - A[h, pro]\n cs_sold[h] = c_df\n\n X_unsold = pd.concat([t_df, *[cs_unsold[h] for h in resources]], axis=1)\n X_sold = pd.concat([t_df, *[cs_sold[h] for h in resources]], axis=1)\n\n opp_costs[pro] = nn.predict(X_unsold) - nn.predict(X_sold)\n\n # epsilon greedy strategy - offer no products\n if eps_ran < eps / 2:\n return tuple(np.zeros_like(revenues))\n\n # epsilon greedy strategy - offer all products\n if eps_ran < eps:\n offer_tuple = np.ones_like(revenues)\n offer_tuple[opp_costs == np.inf] = 0 # one resource not available => don't offer product\n return tuple(offer_tuple)\n\n # setup\n offer_tuple = np.zeros_like(revenues)\n\n # line 1\n s_prime = revenues - opp_costs > 0\n if all(np.invert(s_prime)):\n return tuple(offer_tuple)\n\n # line 2-3\n # offer_sets_to_test has in each row an offer set, we want to test\n offer_sets_to_test = np.zeros((sum(s_prime), len(revenues)))\n offer_sets_to_test[np.arange(sum(s_prime)), np.where(s_prime)] = 1\n offer_sets_to_test += offer_tuple\n offer_sets_to_test = (offer_sets_to_test > 0)\n\n value_marginal = np.apply_along_axis(calc_value_marginal_nn, 1, offer_sets_to_test, opp_costs, revenues,\n arrival_probabilities, preference_weights, no_purchase_preference)\n\n offer_tuple = offer_sets_to_test[np.argmax(value_marginal)]*1\n s_prime = s_prime & offer_tuple == 0\n v_s = np.amax(value_marginal)\n\n # line 4\n while True:\n # 4a\n # offer_sets_to_test has in each row an offer set, we want to test\n offer_sets_to_test = np.zeros((sum(s_prime), len(revenues)))\n offer_sets_to_test[np.arange(sum(s_prime)), np.where(s_prime)] = 1\n offer_sets_to_test += offer_tuple\n offer_sets_to_test = (offer_sets_to_test > 0)\n\n # 4b\n value_marginal = np.apply_along_axis(calc_value_marginal_nn, 1, offer_sets_to_test, opp_costs, revenues,\n arrival_probabilities, preference_weights, no_purchase_preference)\n\n if np.amax(value_marginal) >= v_s:\n v_s = np.amax(value_marginal)\n offer_tuple = offer_sets_to_test[np.argmax(value_marginal)]*1 # to get 1 for product offered\n s_prime = (s_prime - offer_tuple) == 1 # only those products remain, that are neither in the offer_tuple\n if all(offer_tuple == 1):\n break\n else:\n break\n return tuple(offer_tuple)", "def choose_features(stocks, init_param, C, gamma):\r\n \r\n chosen_features = []\r\n available_features = init_param.features[:]\r\n \"\"\"The code is written to edit init_param.features but make a copy to \r\n restore things after the loop\"\"\"\r\n init_param_features = init_param.features[:]\r\n aoc = []\r\n \r\n while (len(available_features) > 5):\r\n best_aoc = 0\r\n for feature in available_features:\r\n input_features = chosen_features[:]\r\n input_features.append(feature)\r\n init_param.features = input_features\r\n feature_aoc = examine(stocks, init_param, C, gamma, False)\r\n if feature_aoc > best_aoc:\r\n best_aoc = feature_aoc\r\n best_feature = feature\r\n \r\n chosen_features.append(best_feature)\r\n available_features.remove(best_feature)\r\n aoc.append(best_aoc)\r\n \r\n \"\"\" Restore init_param.features \"\"\"\r\n init_param.features = init_param_features[:]\r\n return chosen_features, available_features, aoc", "def ElectronIsEMH4l2011SelectorConfig(theTool):\n\n theTool = GetTool(theTool)\n\n # the eta ranges\n theTool.CutBinEta += [0.1, 0.6, 0.8, 1.15,\n 1.37, 1.52, 1.81, 2.01, 2.37, 2.47]\n\n # range of ET bins for e-ID\n theTool.CutBinET += [\n 5.0*GeV, 10.0*GeV, 15.0*GeV, 20.0*GeV,\n 30.0*GeV, 40.0*GeV, 50.0*GeV, 60.0*GeV,\n 70.0*GeV, 80.0*GeV]\n # cut on fraction of energy deposited in 1st sampling\n theTool.CutF1 += [0.005]\n\n # cut on hadronic energy\n theTool.CutHadLeakage += [\n 0.031, 0.031, 0.021, 0.021, 0.019, 0.028, 0.065, 0.065, 0.046, 0.034, # < 5\n 0.018, 0.018, 0.016, 0.015, 0.016, 0.028, 0.053, 0.038, 0.028, 0.025, # 5-10\n 0.018, 0.018, 0.018, 0.020, 0.016, 0.033, 0.036, 0.033, 0.024, 0.025, # 10-15\n 0.015, 0.015, 0.015, 0.016, 0.014, 0.029, 0.033, 0.022, 0.019, 0.018, # 15-20\n 0.012, 0.012, 0.012, 0.012, 0.012, 0.015, 0.030, 0.022, 0.016, 0.016, # 20-30\n 0.011, 0.011, 0.011, 0.011, 0.011, 0.011, 0.021, 0.021, 0.015, 0.015, # 30-40\n 0.011, 0.011, 0.011, 0.011, 0.011, 0.011, 0.015, 0.015, 0.010, 0.010, # 40-50\n 0.011, 0.011, 0.011, 0.011, 0.011, 0.011, 0.015, 0.015, 0.010, 0.010, # 50-60\n 0.011, 0.011, 0.011, 0.011, 0.011, 0.011, 0.015, 0.015, 0.010, 0.010, # 60-70\n 0.011, 0.011, 0.011, 0.011, 0.011, 0.011, 0.015, 0.015, 0.010, 0.010, # 70-80\n 0.011, 0.011, 0.011, 0.011, 0.011, 0.011, 0.015, 0.015, 0.010, 0.010, # >80\n ]\n\n # cut on ratio e237/e277\n theTool.CutReta37 += [\n 0.700, 0.700, 0.700, 0.700, 0.700, 0.690, 0.848, 0.876, 0.870, 0.888, # < 5\n 0.700, 0.700, 0.700, 0.700, 0.700, 0.715, 0.860, 0.880, 0.880, 0.880, # 5-10\n 0.875, 0.875, 0.875, 0.875, 0.875, 0.740, 0.860, 0.875, 0.870, 0.870, # 10-15\n 0.900, 0.900, 0.895, 0.895, 0.890, 0.740, 0.880, 0.900, 0.880, 0.880, # 15-20\n 0.910, 0.910, 0.910, 0.910, 0.910, 0.750, 0.890, 0.900, 0.890, 0.890, # 20-30\n 0.920, 0.920, 0.920, 0.915, 0.915, 0.790, 0.895, 0.915, 0.895, 0.890, # 30-40\n 0.920, 0.920, 0.920, 0.915, 0.915, 0.790, 0.895, 0.915, 0.895, 0.890, # 40-50\n 0.920, 0.920, 0.920, 0.915, 0.915, 0.790, 0.895, 0.915, 0.895, 0.890, # 50-60\n 0.920, 0.920, 0.920, 0.915, 0.915, 0.790, 0.895, 0.915, 0.895, 0.890, # 60-70\n 0.920, 0.920, 0.920, 0.915, 0.915, 0.790, 0.895, 0.915, 0.895, 0.890, # 70-80\n 0.920, 0.920, 0.920, 0.915, 0.915, 0.790, 0.895, 0.915, 0.895, 0.890, # >80\n ]\n\n # cut on shower width in 2nd sampling\n theTool.CutWeta2c += [\n 0.014, 0.014, 0.014, 0.014, 0.014, 0.028, 0.017, 0.014, 0.014, 0.014, # <5\n 0.014, 0.014, 0.014, 0.014, 0.014, 0.026, 0.017, 0.014, 0.014, 0.014, # 5-10\n 0.014, 0.014, 0.015, 0.016, 0.017, 0.025, 0.017, 0.015, 0.015, 0.015, # 10-15\n 0.013, 0.013, 0.015, 0.016, 0.017, 0.025, 0.017, 0.015, 0.015, 0.014, # 15-20\n 0.013, 0.013, 0.014, 0.015, 0.015, 0.025, 0.016, 0.015, 0.015, 0.014, # 20-30\n 0.012, 0.012, 0.013, 0.013, 0.013, 0.025, 0.015, 0.014, 0.014, 0.013, # 30-40\n 0.011, 0.011, 0.012, 0.013, 0.013, 0.025, 0.015, 0.014, 0.014, 0.013, # 40-50\n 0.011, 0.011, 0.012, 0.013, 0.013, 0.025, 0.015, 0.014, 0.014, 0.013, # 50-60\n 0.011, 0.011, 0.012, 0.013, 0.013, 0.025, 0.015, 0.014, 0.014, 0.013, # 60-70\n 0.011, 0.011, 0.012, 0.013, 0.013, 0.025, 0.015, 0.014, 0.014, 0.013, # 70-80\n 0.011, 0.011, 0.012, 0.013, 0.013, 0.025, 0.015, 0.014, 0.014, 0.013] # 80<\n\n # cut on total width in 1st sampling\n theTool.CutWtot += [\n 9999., 9999., 9999., 9999., 9999., 9999., 9999., 9999., 9999., 9999., # < 5\n 9999., 9999., 9999., 9999., 9999., 9999., 9999., 9999., 9999., 9999., # 5-10\n 3.20, 3.20, 3.20, 3.85, 3.85, 9999., 3.80, 3.00, 2.00, 9999., # 10-15\n 3.00, 3.00, 3.00, 3.75, 3.75, 9999., 3.80, 3.00, 2.00, 9999., # 15-20\n 2.90, 2.90, 2.90, 3.50, 3.50, 9999., 3.80, 3.00, 2.00, 9999., # 20-30\n 2.80, 2.80, 2.80, 3.30, 3.40, 9999., 3.70, 3.00, 1.70, 9999., # 30-40\n 2.80, 2.80, 2.80, 3.20, 3.40, 9999., 3.70, 2.90, 1.60, 9999., # 40-50\n 2.80, 2.80, 2.80, 3.20, 3.40, 9999., 3.70, 2.90, 1.60, 9999., # 50-60\n 2.80, 2.80, 2.80, 3.20, 3.40, 9999., 3.70, 2.90, 1.60, 9999., # 60-70\n 2.80, 2.80, 2.80, 3.20, 3.40, 9999., 3.70, 2.90, 1.60, 9999., # 70-80\n 2.80, 2.80, 2.80, 3.20, 3.40, 9999., 3.70, 2.90, 1.60, 9999., # >80\n ]\n\n # cut on (Emax - Emax2)/(Emax + Emax2) in 1st sampling\n theTool.CutDEmaxs1 += [\n 0.390, 0.390, 0.200, 0.070, 0.060, -9999, 0.070, 0.430, 0.750, -9999, # < 5\n 0.650, 0.660, 0.560, 0.460, 0.530, -9999, 0.600, 0.680, 0.750, -9999, # 5-10\n 0.790, 0.790, 0.750, 0.590, 0.530, -9999., 0.600, 0.790, 0.840, -9999., # 10-15\n 0.790, 0.790, 0.790, 0.700, 0.580, -9999., 0.600, 0.790, 0.850, -9999., # 15-20\n 0.800, 0.800, 0.820, 0.720, 0.650, -9999., 0.780, 0.790, 0.850, -9999., # 20-30\n 0.800, 0.800, 0.825, 0.720, 0.690, -9999., 0.780, 0.810, 0.880, -9999., # 30-40\n 0.800, 0.800, 0.825, 0.730, 0.690, -9999., 0.790, 0.810, 0.880, -9999., # 40-50\n 0.800, 0.800, 0.825, 0.730, 0.690, -9999., 0.790, 0.810, 0.880, -9999., # 50-60\n 0.800, 0.800, 0.825, 0.730, 0.690, -9999., 0.790, 0.810, 0.880, -9999., # 60-70\n 0.800, 0.800, 0.825, 0.730, 0.690, -9999., 0.790, 0.810, 0.880, -9999., # 70-80\n 0.800, 0.800, 0.825, 0.730, 0.690, -9999., 0.790, 0.810, 0.880, -9999., # >80\n ]\n\n # cut on Track quality cut\n theTool.usePIXOutliers = True\n theTool.useSCTOutliers = True\n\n # cut on pixel-layer hits\n theTool.CutPi += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n\n # cut on precision hits\n theTool.CutSi += [7, 7, 7, 7, 7, 7, 7, 7, 7, 7]\n\n # cut on delta eta\n theTool.CutDeltaEta += [\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015\n ]\n\n # END OF USEFULL CUTS\n\n # The following ARE NOT APPLIED FOR THE H4L or the LOOSE MENU ISEM\n\n # cut on transverse impact parameter\n theTool.CutA0 += [5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0]\n\n # cut on transverse impact parameter for tight selection\n theTool.CutA0Tight += [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n\n # cut on delta eta for tight selection\n theTool.CutDeltaEtaTight += [\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015\n ]\n theTool.useTRTOutliers = True\n theTool.useBLOutliers = True\n\n # cut on b-layer hits\n theTool.CutBL += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n # cut max on delta phi\n theTool.CutmaxDeltaPhi += [\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015,\n 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015\n ]\n # cut min on deltaphi\n theTool.CutminDeltaPhi += [\n -0.03, -0.03, -0.03, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04,\n -0.03, -0.03, -0.03, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04,\n -0.03, -0.03, -0.03, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04,\n -0.03, -0.03, -0.03, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04,\n -0.03, -0.03, -0.03, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04,\n -0.03, -0.03, -0.03, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04,\n -0.03, -0.03, -0.03, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04,\n -0.03, -0.03, -0.03, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04,\n -0.03, -0.03, -0.03, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04,\n -0.03, -0.03, -0.03, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04,\n -0.03, -0.03, -0.03, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04, -0.04,\n ]\n\n # cut min on E/P\n theTool.CutminEp += [\n 0.80, 0.80, 0.80, 0.80, 0.80, 0.80, 0.80, 0.80, 0.80, 0.80,\n 0.80, 0.80, 0.80, 0.80, 0.80, 0.80, 0.80, 0.80, 0.80, 0.80,\n 0.90, 0.90, 0.90, 0.90, 0.90, 0.90, 0.90, 0.90, 0.90, 0.90,\n 0.90, 0.90, 0.90, 0.90, 0.90, 0.90, 0.90, 0.90, 0.90, 0.90,\n 0.80, 0.80, 0.80, 0.80, 0.80, 0.80, 0.80, 0.80, 0.80, 0.80,\n 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70,\n 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70,\n 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70,\n 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70,\n 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70, 0.70,\n 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00\n ]\n # cut max on E/P\n theTool.CutmaxEp += [\n 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 3.0, 3.0, 3.0, 3.0,\n 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 3.0, 3.0, 3.0, 3.0,\n 2.5, 2.5, 2.5, 2.5, 2.5, 3.0, 3.5, 3.5, 4.0, 4.0,\n 2.5, 2.5, 2.5, 2.5, 2.5, 3.0, 3.5, 3.5, 4.0, 4.0,\n 2.5, 2.5, 2.5, 2.5, 2.5, 3.0, 3.5, 3.5, 4.5, 4.5,\n 3.0, 3.0, 3.0, 3.0, 3.0, 3.5, 3.5, 4.0, 4.5, 4.5,\n 3.0, 3.0, 3.0, 3.0, 3.0, 3.5, 4.0, 5.0, 5.0, 5.0,\n 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0,\n 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0,\n 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0,\n 10., 10., 10., 10., 10., 10., 10., 10., 10., 10.\n ]\n\n # cuts on TRT\n # range of eta bins for e-ID for TRT\n theTool.CutBinEta_TRT += [0.1, 0.625, 1.07, 1.304, 1.752, 2.0]\n # cuts on Number of TRT hits with Outliers\n theTool.CutNumTRT += [-15., -15., -15., -15., -15., -15.]\n # cuts on TRT ratio with Outliers\n theTool.CutTRTRatio += [0.04, 0.04, 0.04, 0.05, 0.07, 0.07]\n # cuts on TRT ratio with Outliers for 90% efficiency\n theTool.CutTRTRatio90 += [0.10, 0.10, 0.125, 0.13, 0.13, 0.13]", "def bestBeta(sample,bins,N,l,u):\r\n\r\n betaGrid,df,traces=modelOnBetaGrid(sample,bins,N,l,u)\r\n minIndex=df.index[0]\r\n\r\n return betaGrid[minIndex]", "def select(individuals, n):\r\n # return selBest(individuals, n)\r\n return individuals[:n]", "def test_multi_armed_bandit_strategy_pool(self):\n environment.set_value('STRATEGY_SELECTION_METHOD', 'default')\n strategy_selection.generate_weighted_strategy_pool(\n strategy_list=strategy.AFL_STRATEGY_LIST,\n use_generator=True,\n engine_name='afl')\n environment.set_value('STRATEGY_SELECTION_METHOD', 'multi_armed_bandit')\n strategy_selection.generate_weighted_strategy_pool(\n strategy_list=strategy.AFL_STRATEGY_LIST,\n use_generator=True,\n engine_name='afl')" ]
[ "0.5976898", "0.5713007", "0.5497896", "0.5418031", "0.5372128", "0.5295576", "0.5200423", "0.51970977", "0.5166391", "0.5161692", "0.5148483", "0.51379246", "0.5100337", "0.5083833", "0.5076801", "0.5060033", "0.50580966", "0.5048066", "0.5047098", "0.50349486", "0.50295734", "0.5019924", "0.4996505", "0.49846426", "0.49751547", "0.4972299", "0.49436018", "0.49412787", "0.4936401", "0.4928655" ]
0.7377097
0
Creates an Ab CDR seq of length nkey consisting of 20 different symbols which are used probabilistically according to the codon number leading to each group.
def Ab_seq(RNs): seq = [] for res in range(cf.nkey): randi = RNs.getR() for i in range(20): if randi < cf.cumprob20[i]: seq.append(i + 1) # want amino acids between 1 and 20 break return seq
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Ag_seq(RNs):\n seq = []\n for res in range(cf.lAg):\n randi = RNs.getR()\n for i in range(20):\n if randi < cf.cumprob20[i]:\n seq.append(i + 1) # want amino acids between 1 and 20\n break\n return seq", "def _generate(self, n):\n # See https://en.wikipedia.org/wiki/De_Bruijn_sequence\n\n k = len(self.alphabet)\n a = [0] * k * n\n sequence = []\n\n def db(t, p):\n if t > n:\n if n % p == 0:\n sequence.extend(a[1:p + 1])\n else:\n a[t] = a[t - p]\n db(t + 1, p)\n for j in range(a[t - p] + 1, k):\n a[t] = j\n db(t + 1, t)\n db(1, 1)\n return ''.join(self.alphabet[i] for i in sequence)", "def aa(seq):\n global codontable\n seq = seq.upper()\n if codontable is None:\n # TODO: figure out the right place for the pre-computed information here\n bases = ['T', 'C', 'A', 'G']\n codons = [a+b+c for a in bases for b in bases for c in bases]\n codons = codons + list(map(lambda x: x.lower(), codons))\n amino_acids = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'\n amino_acids = amino_acids + amino_acids.lower()\n codontable = dict(zip(codons, amino_acids))\n res = ''\n for i in range(0, len(seq) - 2, 3):\n res += codontable[seq[i:(i+3)]]\n return res", "def generate(n, k=12, mnemonic=Mnemonic(\"english\")):\n ok, error = validate_n_k(n, k)\n if not ok:\n raise ValueError(error)\n\n coder = RSCodec(nsize=n, nsym=(n-k), c_exp=BIP39_SYMBOL_SIZE)\n for i in itertools.count():\n bits = random_bits(k*BIP39_SYMBOL_SIZE)\n symbols = bits_to_symbols(bits, BIP39_SYMBOL_SIZE)\n coded = coder.encode(symbols)\n phrase = symbols_to_mnemonic(coded, mnemonic)\n if not mnemonic.check(phrase):\n continue\n\n return phrase", "def aa_generator_DNA(dnaseq):\n return (translate_DNA_codon(dnaseq[n:n+3])\n for n in range(0, len(dnaseq), 3))", "def main():\n\n args = get_args()\n seq = args.seq.upper()\n codon_to_aa = {\n 'AAA': 'K',\n 'AAC': 'N',\n 'AAG': 'K',\n 'AAU': 'N',\n 'ACA': 'T',\n 'ACC': 'T',\n 'ACG': 'T',\n 'ACU': 'T',\n 'AGA': 'R',\n 'AGC': 'S',\n 'AGG': 'R',\n 'AGU': 'S',\n 'AUA': 'I',\n 'AUC': 'I',\n 'AUG': 'M',\n 'AUU': 'I',\n 'CAA': 'Q',\n 'CAC': 'H',\n 'CAG': 'Q',\n 'CAU': 'H',\n 'CCA': 'P',\n 'CCC': 'P',\n 'CCG': 'P',\n 'CCU': 'P',\n 'CGA': 'R',\n 'CGC': 'R',\n 'CGG': 'R',\n 'CGU': 'R',\n 'CUA': 'L',\n 'CUC': 'L',\n 'CUG': 'L',\n 'CUU': 'L',\n 'GAA': 'E',\n 'GAC': 'D',\n 'GAG': 'E',\n 'GAU': 'D',\n 'GCA': 'A',\n 'GCC': 'A',\n 'GCG': 'A',\n 'GCU': 'A',\n 'GGA': 'G',\n 'GGC': 'G',\n 'GGG': 'G',\n 'GGU': 'G',\n 'GUA': 'V',\n 'GUC': 'V',\n 'GUG': 'V',\n 'GUU': 'V',\n 'UAA': 'Stop',\n 'UAC': 'Y',\n 'UAG': 'Stop',\n 'UAU': 'Y',\n 'UCA': 'S',\n 'UCC': 'S',\n 'UCG': 'S',\n 'UCU': 'S',\n 'UGA': 'Stop',\n 'UGC': 'C',\n 'UGG': 'W',\n 'UGU': 'C',\n 'UUA': 'L',\n 'UUC': 'F',\n 'UUG': 'L',\n 'UUU': 'F',\n }\n\n k = 3\n\n # 1: for loop\n # protein = ''\n # for codon in [seq[i:i + k] for i in range(0, len(seq), k)]:\n # aa = codon_to_aa.get(codon, '-')\n # if aa == 'Stop':\n # break\n # protein += aa\n\n # 2: list comprehension, slice to remove Stop\n # codons = [seq[i:i + k] for i in range(0, len(seq), k)]\n # aa = [codon_to_aa.get(codon, '-') for codon in codons]\n # if 'Stop' in aa:\n # aa = aa[:aa.index('Stop')]\n # print(''.join(aa))\n\n # 3: L.C. -> map(), slice -> takewhile\n # codons = map(lambda i: seq[i:i + k], range(0, len(seq), k))\n # aa = map(lambda codon: codon_to_aa.get(codon, '-'), codons)\n # print(''.join(takewhile(lambda c: c != 'Stop', aa)))\n\n # 4: combine map()\n # aa = map(lambda c: codon_to_aa.get(c, '-'),\n # map(lambda i: seq[i:i + k], range(0, len(seq), k)))\n # print(''.join(takewhile(lambda c: c != 'Stop', aa)))\n\n # 5: combine all\n # print(''.join(\n # takewhile(\n # lambda c: c != 'Stop',\n # map(lambda c: codon_to_aa.get(c, '-'),\n # map(lambda i: seq[i:i + k], range(0, len(seq), k))))))\n\n # 6: Seq\n print(str(Seq(args.seq).translate()).replace('*', ''))", "def generateSubSequences(k, ch):\n seq = [\"\".join(c) for c in itertools.product(ch, repeat = k)]\n# discussion about the best way to do this:\n# https://stackoverflow.com/questions/7074051/what-is-the-best-way-to-generate-all-possible-three-letter-strings\n return seq", "def Sequence(nbr_by_label, nbr_by_label_test, nbr_comp, plot_graph):\n normal, muta = genere_chains(nbr_comp, 4)\n n_tot = (nbr_by_label + nbr_by_label_test + 1)\n X_n = [mutation(normal, [0.1, 0.1]) for _ in range(n_tot)]\n X_m = [mutation(muta, [0.1, 0.1]) for _ in range(n_tot)]\n X_crash_n = []\n X_crash_m = []\n for seq in X_n:\n crash = []\n for nucleotid in seq:\n crash.append((0 * (nucleotid == 'A') + 1 * (nucleotid == 'C') + 2 * (nucleotid == 'T') + 3 * (\n nucleotid == 'G')) * np.pi / 2)\n X_crash_n.append(crash)\n for seq in X_m:\n crash = []\n for nucleotid in seq:\n crash.append((0 * (nucleotid == 'A') + 1 * (nucleotid == 'C') + 2 * (nucleotid == 'T') + 3 * (\n nucleotid == 'G')) * np.pi / 2)\n X_crash_m.append(crash)\n X_n = np.array(X_crash_n)\n X_m = np.array(X_crash_m)\n if plot_graph:\n plt.scatter(X_n[:, 0][:nbr_by_label], X_n[:, 0][:nbr_by_label])\n plt.scatter(X_m[:, 0][:nbr_by_label], X_m[:, 0][:nbr_by_label])\n\n plt.title(\"ADN sequences\")\n plt.show()\n training_input = {\"N\": X_n[:nbr_by_label], \"M\": X_m[:nbr_by_label]}\n test_input = {\"N\": X_n[nbr_by_label:n_tot], \"M\": X_m[nbr_by_label:n_tot]}\n return [X_n, X_m], training_input, test_input, [\"N\", \"M\"]", "def number_to_pattern(cls, number: int, k: int, nucleotide_map: dict=None) -> str:\n if nucleotide_map is None:\n nucleotide_map = cls._nucleotide_int_map\n\n base = 4\n new_str = ''\n while number > 0:\n new_str += str(number % base)\n number = number // base\n for nuc, num in nucleotide_map.items():\n new_str = new_str.replace(str(num), nuc)\n while len(new_str) < k:\n new_str = new_str + 'A'\n return new_str[::-1]", "def unique_chains_fasta (number_to_letter, outputs_dir):\n fo = open(outputs_dir+\"/unique_chains_fasta.mfa\", \"w\")\n for key, value in number_to_letter.items():\n name_chain = \"chain_\" + value\n fo.write(\">%s\\n%s\\n\" % (name_chain, get_seq_from_pdbchain(key)))\n fo.close()\n return fo", "def gen_key():\n key = []\n chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\n for i in xrange(20):\n key.append(random.choice(chars))\n return ''.join(key)", "def generate_l1ca_codes(self, prn):\n output_taps = self.l1_code_phase_assignments.loc[prn, 'CA_Phase_Select']\n g1 = self.generate_mls(10, self.g1_feedback_taps, [10])\n g2 = self.generate_mls(10, self.g2_feedback_taps, output_taps)\n ca_code = []\n for index, bit in enumerate(g1):\n ca_code.append(int((bit + g2[index]) % 2))\n return ca_code", "def gen_random_chars(n: int = 10) -> Text:\n if n < 1:\n raise Exception('Number of random chars to generate has to be > 0')\n\n return ''.join(choice(ascii_lowercase + '-_')\n for i in range(n))", "def __convert_group(n):\n output = ''\n\n if(n == '100'):\n output = \"CIEN \"\n elif(n[0] != '0'):\n output = CENTENAS[int(n[0]) - 1]\n\n k = int(n[1:])\n if(k <= 20):\n output += UNIDADES[k]\n else:\n if((k > 30) & (n[2] != '0')):\n output += '%sY %s' % (DECENAS[int(n[1]) - 2], UNIDADES[int(n[2])])\n else:\n output += '%s%s' % (DECENAS[int(n[1]) - 2], UNIDADES[int(n[2])])\n\n return output", "def generate_ca(valid_attributes):\n attr_list = valid_attributes.split(',')\n nb_attributes = len(attr_list)\n\n gen_g1 = G1.generator()\n gen_g2 = G2.generator()\n exp = [G1.order().random() for _ in range(nb_attributes + 1)]\n\n pk = [gen_g1] + [gen_g1 ** i for i in exp[1:]] + [gen_g2] + [gen_g2 ** i for i in exp]\n sk = gen_g1 ** exp[0]\n\n sk = [sk, pk, attr_list]\n pk = [pk, attr_list]\n\n\n return (jsonpickle.encode(pk).encode(), jsonpickle.encode(sk).encode())", "def new_barcode(num_digits=5, chars=string.digits+string.uppercase):\n return 'FLIM-'+(''.join([random.choice(chars) for _ in xrange(num_digits)]))", "def coding_strand_to_AA(dna):\n num_codons = int(len(dna)/3)\n num = 0\n list_codons = []\n aacids = ''\n while num < num_codons:\n num_start = int(num*3)\n num_end = int(num*3 + 3)\n list_codons.append(dna[num_start:num_end])\n num = num + 1\n for element in list_codons:\n thing = aa_table[element]\n aacids = aacids + thing\n return aacids", "def gen_code():\n return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(10)])", "def gen_random_id(self, n: int = 12) -> object:\n random_source = string.ascii_letters + string.digits\n id_ = random.choice(string.ascii_lowercase)\n id_ += random.choice(string.ascii_uppercase)\n id_ += random.choice(string.digits)\n\n for i in range(n):\n id_ += random.choice(random_source)\n\n _list = list(id_)\n random.SystemRandom().shuffle(_list)\n clid = ''.join(_list)\n return clid", "def _prepare(self):\n for n in range(4):\n self._code += str(random.randint(1, 9))", "def _get_id_ac_string(accession: str, gene: str, sequence_len: int) -> str:\n id_str = \"ID {GENE:<24}{REVIEW:<18}{AA_COUNT} AA.\\n\".format(\n GENE=gene,\n REVIEW=\"Unreviewed;\",\n AA_COUNT=sequence_len\n )\n acc_str = \"AC {};\".format(accession)\n return id_str + acc_str", "def codonComposition(self):#works\n return {codon: self.countDicNuc.get(codon) for codon in self.rnaCodonTable.keys()}", "def genCharGroup(self):\n alphabet = list('abcdefghijklmnopqrstuvwxyz') #Creates a list of all the alphabet characters\n group = []\n count = 0\n while count != 3: #While the loop total does not equal 3\n i = random.choice(alphabet) #Make a random choice\n alphabet.remove(i) #Remove it from the alphabet\n group.append(i) #And add it to the group array\n count += 1 #Add one to the loop total\n return str(''.join(group)) #Return the string of 3 characters to the user", "def _generate_seq(sn):\n a, b = 0, 1\n for i in range(sn):\n yield str(a) + ' '\n a, b = b, a+b", "def translate(codon):\n \n table = { \n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M', \n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T', \n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K', \n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R', \n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', \n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', \n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q', \n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R', \n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', \n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A', \n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E', \n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', \n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', \n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L', \n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*', \n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W', \n } \n \n assert codon in table.keys(), \"Not a valid codon sequence.\"\n \n return table[codon]", "def n_char_generate(self,char,n):\n return char*n", "def nmer_dictionary(self,n,dic={}):\n if self.sequence == \"\":\n self.fetchSequence()\n self.sequence = self.sequence.upper()\n for i in range(0,len(self.sequence)-n):\n subseq = self.sequence[i:][:n]\n dic[subseq]=1+dic.get(subseq,0)\n del subseq\n return dic", "def Codingfunc(N,L): #Coding:[N:number of repetitions, L:length of single/multiple sequence]\r\n C=0 #int variable containing code number\r\n if N==1:\r\n C=L-1\r\n else:\r\n C=-(L-1)*16-(N-1)\r\n #print(\"C =\",C,end=' ')\r\n \r\n return struct.pack('b',C)", "def codon_usage(self):\n codons_dict = CodonUsage.CodonsDict.copy()\n codons = [str(self.sequence[i:i+3]) for i in range(0, len(self.sequence), 3)]\n for codon in codons:\n codons_dict[codon] += 1\n return codons_dict", "def sequence(self, keys):\n out = ''\n for j in range(len(keys)):\n out += '>SEQUENCE_{}'.format(keys[j]) + '\\n'\n for i in range(len(self._d_seqs[keys[j]])):\n out += self._d_seqs[keys[j]][i] + '\\n'\n return out" ]
[ "0.60891575", "0.58298236", "0.572854", "0.57153106", "0.5714318", "0.5624596", "0.5576946", "0.5558232", "0.5541624", "0.5505016", "0.5482114", "0.5454591", "0.5376457", "0.5367231", "0.5347369", "0.5342323", "0.5340728", "0.53314793", "0.5306699", "0.5301995", "0.52934927", "0.5289704", "0.5279139", "0.52724475", "0.5255422", "0.5252428", "0.5242001", "0.52395767", "0.5237023", "0.52280945" ]
0.7048876
0
Creates an Ag epitope seq of length lAg consisting of 20 different symbols which are used probabilistically according to the codon number leading to each group. [identical to Ab_seq in this version]
def Ag_seq(RNs): seq = [] for res in range(cf.lAg): randi = RNs.getR() for i in range(20): if randi < cf.cumprob20[i]: seq.append(i + 1) # want amino acids between 1 and 20 break return seq
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Ab_seq(RNs):\n seq = []\n for res in range(cf.nkey):\n randi = RNs.getR()\n for i in range(20):\n if randi < cf.cumprob20[i]:\n seq.append(i + 1) # want amino acids between 1 and 20\n break\n return seq", "def Sequence(nbr_by_label, nbr_by_label_test, nbr_comp, plot_graph):\n normal, muta = genere_chains(nbr_comp, 4)\n n_tot = (nbr_by_label + nbr_by_label_test + 1)\n X_n = [mutation(normal, [0.1, 0.1]) for _ in range(n_tot)]\n X_m = [mutation(muta, [0.1, 0.1]) for _ in range(n_tot)]\n X_crash_n = []\n X_crash_m = []\n for seq in X_n:\n crash = []\n for nucleotid in seq:\n crash.append((0 * (nucleotid == 'A') + 1 * (nucleotid == 'C') + 2 * (nucleotid == 'T') + 3 * (\n nucleotid == 'G')) * np.pi / 2)\n X_crash_n.append(crash)\n for seq in X_m:\n crash = []\n for nucleotid in seq:\n crash.append((0 * (nucleotid == 'A') + 1 * (nucleotid == 'C') + 2 * (nucleotid == 'T') + 3 * (\n nucleotid == 'G')) * np.pi / 2)\n X_crash_m.append(crash)\n X_n = np.array(X_crash_n)\n X_m = np.array(X_crash_m)\n if plot_graph:\n plt.scatter(X_n[:, 0][:nbr_by_label], X_n[:, 0][:nbr_by_label])\n plt.scatter(X_m[:, 0][:nbr_by_label], X_m[:, 0][:nbr_by_label])\n\n plt.title(\"ADN sequences\")\n plt.show()\n training_input = {\"N\": X_n[:nbr_by_label], \"M\": X_m[:nbr_by_label]}\n test_input = {\"N\": X_n[nbr_by_label:n_tot], \"M\": X_m[nbr_by_label:n_tot]}\n return [X_n, X_m], training_input, test_input, [\"N\", \"M\"]", "def AnBn(nseq, nT, L, eps=0.5, cue=True, align=False, atfront=True):\n \n p_gram = (1-eps)\n p_nois = eps\n # here's one way to generate the sequences, \n # going to create an empty array, fill it with the valid sequences first\n seqs = -1*np.ones((nseq, nT))\n \n n = int(p_gram*nseq/len(L))\n N = 0\n for l in L:\n \n valid_seqs = np.apply_along_axis(np.repeat, 1, np.repeat([[0,1]],n,0), [l, l])\n \n if align:\n idx = np.arange(0,nT-np.mod(nT,2*l),np.floor(nT/(2*l)))\n idx = np.ones(n,nT)*idx[None,:]\n else:\n idx = np.random.rand(n,nT).argsort(1)[:,:(2*l)]\n idx = np.sort(idx,1)\n np.put_along_axis(seqs[N:N+n,:], idx, valid_seqs, axis=1)\n N+=n\n \n # now I want to add noise sequences, i.e. random number of A and B tokens\n # but I want to make sure that the sparseness of the sequences isn't\n # too different from the grammatical ones -- so I set that manually\n \n thr = sts.norm.ppf(2*np.mean(L)/nT)\n noise_seqs = ((np.ones(nseq-N)[:,None]*np.arange(nT) - np.random.choice(nT-5,(nseq-N,1)))>0).astype(int)\n noise_seqs[np.random.randn(nseq-N,nT)>thr] = -1\n \n seqs[N:,:] = noise_seqs\n labels = (seqs == 0).sum(1) == (seqs==1).sum(1)\n \n if cue:\n seqs = np.append(seqs, np.ones(nseq)[:,None]*2, axis=1)\n if atfront:\n # push to the front\n seqs = np.where(seqs==-1, np.nan, seqs)\n seqs = np.sort(seqs,1)\n seqs = np.where(np.isnan(seqs),-1,seqs)\n \n shf = np.random.choice(nseq,nseq,replace=False)\n seqs = seqs[shf,:]\n labels = labels[shf]\n \n return seqs, labels", "def sequence(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['sequence']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n label = \"SEQ\"\n for t in ['C','L']:\n run_label = label+'_'+t\n t1Mag_label = '{0}1MAG'.format(t)\n t2Mag_label = '{0}2MAG'.format(t)\n t3Mag_label = '{0}3MAG'.format(t)\n t1Ang_label = '{0}1ANG'.format(t)\n t2Ang_label = '{0}2ANG'.format(t)\n t3Ang_label = '{0}3ANG'.format(t)\n distillate_label = \"{0}-ALL\".format(t)\n\n # header\n inigen.emit_run_header(run_label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_1Mag_label = t1Mag_label\n dep_1Mag_name = fields['deps'][0]\n dep_1Mag_uuid = self.uuid_map[t1Mag_label]\n\n dep_2Mag_label = t2Mag_label\n dep_2Mag_name = fields['deps'][1]\n dep_2Mag_uuid = self.uuid_map[t2Mag_label]\n\n dep_3Mag_label = t3Mag_label\n dep_3Mag_name = fields['deps'][2]\n dep_3Mag_uuid = self.uuid_map[t3Mag_label]\n\n dep_1Ang_label = t1Ang_label\n dep_1Ang_name = fields['deps'][3]\n dep_1Ang_uuid = self.uuid_map[t1Ang_label]\n\n dep_2Ang_label = t2Ang_label\n dep_2Ang_name = fields['deps'][4]\n dep_2Ang_uuid = self.uuid_map[t2Ang_label]\n\n dep_3Ang_label = t3Ang_label\n dep_3Ang_name = fields['deps'][5]\n dep_3Ang_uuid = self.uuid_map[t3Ang_label]\n \n deps = [[dep_1Mag_label, dep_1Mag_name, dep_1Mag_uuid],\n [dep_2Mag_label, dep_2Mag_name, dep_2Mag_uuid],\n [dep_3Mag_label, dep_3Mag_name, dep_3Mag_uuid],\n [dep_1Ang_label, dep_1Ang_name, dep_1Ang_uuid],\n [dep_2Ang_label, dep_2Ang_name, dep_2Ang_uuid],\n [dep_3Ang_label, dep_3Ang_name, dep_3Ang_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"SEQ\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[\"ZER_{0}ANG\".format(t)] = emitted[-9][-36:]\n output_uuid_map[\"ZER_{0}MAG\".format(t)] = emitted[-8][-36:]\n output_uuid_map[\"POS_{0}ANG\".format(t)] = emitted[-7][-36:]\n output_uuid_map[\"POS_{0}MAG\".format(t)] = emitted[-6][-36:]\n output_uuid_map[\"NEG_{0}ANG\".format(t)] = emitted[-5][-36:]\n output_uuid_map[\"NEG_{0}MAG\".format(t)] = emitted[-4][-36:]\n output_uuid_map[\"UNB_{0}NEG\".format(t)] = emitted[-3][-36:]\n output_uuid_map[\"UNB_{0}ZER\".format(t)] = emitted[-2][-36:]\n\n filename = \"{0}/SEQ_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def design_grna(seq):\n\n transcript = {'A': 'U', 'C': 'G', 'G': 'C', 'T': 'A'}\n grna = \"\".join(transcript[n] for n in seq)\n\n return grna", "def translate_sequence(rna_sequence, genetic_code):\n #Crate an empty list to store AA sequence:\n AA_list = []\n # Convert all rna_sequence to upper case:\n rna_sequence=rna_sequence.upper()\n # Convert all rna_sequence into a list:\n rna_list = list(rna_sequence)\n # This conditon will run if rna_sequence is at least 3 bases long, and only once it find start codon ,\n #and stop once it finds stop codon.\n while True:\n if len(rna_list) > 2:\n codon=''.join(rna_list[0:3])\n #Delete first 3 bases since its alread added as codon, thus no longer needed.\n del rna_list[0:3]\n else:\n break\n #Using genetic code dictionary to find AA for each corresponding codon:\n AA=genetic_code[codon]\n #Break loop once it finds stop codon\n if AA=='*':\n break\n #Add add translatable AA to the AA_list:\n AA_list.append(AA)\n return ''.join(AA_list)", "def aa(seq):\n global codontable\n seq = seq.upper()\n if codontable is None:\n # TODO: figure out the right place for the pre-computed information here\n bases = ['T', 'C', 'A', 'G']\n codons = [a+b+c for a in bases for b in bases for c in bases]\n codons = codons + list(map(lambda x: x.lower(), codons))\n amino_acids = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'\n amino_acids = amino_acids + amino_acids.lower()\n codontable = dict(zip(codons, amino_acids))\n res = ''\n for i in range(0, len(seq) - 2, 3):\n res += codontable[seq[i:(i+3)]]\n return res", "def random_strings(sequence, GC_array):\r\n\r\n AT = 0\r\n GC = 0\r\n\r\n for nt in sequence:\r\n if nt == \"A\" or nt == \"T\":\r\n AT += 1\r\n elif nt == \"G\" or nt == \"C\":\r\n GC += 1\r\n\r\n probabilities = []\r\n\r\n #Calculate probability of G = probability of C = %GC / 2\r\n #Calculate probability of A = probability of T = (1 - %GC) / 2\r\n\r\n #For each consecutive base in provided sequence:\r\n #1. Convert total probability to logarithm using math.log(probability, base=10)\r\n #2. Total probability to be multiplied by probability of specifically that base\r\n\r\n for i in range(len(GC_array)):\r\n prob = (AT * math.log10((1 - GC_array[i])/2)) + (GC * math.log10(GC_array[i]/2))\r\n\r\n probabilities.append('%0.3f' % prob)\r\n\r\n print(*probabilities, sep= \" \")", "def generate_aa_sequence_for_disp(aa_seq):\n return re.sub(\"(.{50})\", \"\\\\1\\n\", aa_seq, 0, re.DOTALL)", "def translateORFtoAAs(self,sequence,number):\r\n AAStringfromORF = str()\r\n startingM = int()\r\n for i in range(0,len(sequence)-2,3):\r\n if sequence[i:i+3] != \"AUG\":\r\n pass\r\n else:\r\n startingM = i\r\n for i in range(startingM,len(sequence)-2,3):\r\n x = self.tabletoTranslate(sequence[i:i+3])\r\n AAStringfromORF+=x\r\n if x == \"-\":\r\n self.listofSequences.append(AAStringfromORF.rstrip(\"-\").lstrip().rstrip())\r\n AAStringfromORF = str()\r\n break", "def generate_aa_sequence(chain):\n\n chain.strip()\n chain_list = chain.split(' ')\n # TODO: What if aa is not in the lookup\n seq = [IUPAC_AA_codes[aa] for aa in chain_list]\n return ''.join(seq)", "def generate_sequence(\n size: int,\n omissions: int,\n edge_perc: Union[int, float],\n tdef,\n max_iter: int = 500,\n on_diverge: str = \"warn\",\n) -> NDArray[int]:\n _check_type(size, (\"int\",), \"size\")\n _check_type(omissions, (\"int\",), \"omissions\")\n _check_type(edge_perc, (\"numeric\",), \"edge_perc\")\n _check_type(max_iter, (\"int\",), \"max_iter\")\n _check_value(on_diverge, (\"warn\", \"raise\"), \"on_diverge\")\n if size <= 0:\n raise ValueError(\n \"Argument 'size' must be a strictly positive integer. \"\n f\"Provided: '{size}'.\"\n )\n if omissions < 0:\n raise ValueError(\n \"Argument 'omissions' must be a strictly positive integer. \"\n f\"Provided: '{omissions}'.\"\n )\n if not (0 <= edge_perc <= 100):\n raise ValueError(\n \"Argument 'edge_perc' must be a valid percentage between 0 and \"\n f\"100. Provided {edge_perc}%.\"\n )\n if max_iter <= 0:\n raise ValueError(\n \"Argument 'max_iter' must be a strictly positive integer. \"\n f\"Provided: '{max_iter}'.\"\n )\n\n n_edge = math.ceil(edge_perc * size / 100)\n start = [tdef.sound] * n_edge\n\n middle = [tdef.sound] * (size - omissions - 2 * n_edge)\n middle += [tdef.omission] * omissions\n random.shuffle(middle)\n iter_ = 0\n while True:\n groups = [(n, list(group)) for n, group in groupby(middle)]\n\n if all(len(group[1]) == 1 for group in groups if group[0] == tdef.omission):\n converged = True\n break\n\n if max_iter < iter_:\n msg = \"Randomize sequence generation could not converge.\"\n if on_diverge == \"warn\":\n logger.warning(msg)\n converged = False\n else:\n raise RuntimeError(msg)\n break\n\n for i, (n, group) in enumerate(groups):\n if n == tdef.sound or len(group) == 1:\n continue\n\n # find the longest group of TRIGGERS['sound']\n idx = np.argmax([len(g) if n == tdef.sound else 0 for n, g in groups])\n pos_sound = sum(len(g) for k, (_, g) in enumerate(groups) if k < idx)\n pos_sound = pos_sound + len(groups[idx][1]) // 2 # center\n\n # find position of current group\n pos_omission = sum(len(g) for k, (_, g) in enumerate(groups) if k < i)\n\n # swap first element from omissions with center of group of sounds\n middle[pos_sound], middle[pos_omission] = (\n middle[pos_omission],\n middle[pos_sound],\n )\n\n break\n\n iter_ += 1\n\n # sanity-check\n if converged:\n assert all(len(group) == 1 for n, group in groups if n == tdef.omission)\n assert not any(\n middle[i - 1] == middle[i] == tdef.omission for i in range(1, len(middle))\n )\n\n end = [tdef.sound] * n_edge\n return np.array(start + middle + end)", "def generate_random_sequence():\n\n seq = []\n [seq.append(np.random.choice(cs.DNA_BASES)) for _ in range(cs.LENGTH)]\n\n return seq", "def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein", "def generate_sequence(sequence):\n if sequence[len(sequence) - 1] == 'Z':\n return sequence + \"A\"\n\n s = SeqGen(26, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n\n if sequence[0].isdigit() and sequence[0] == '9':\n sequence = list(sequence)\n sequence[0] = 'A'\n sequence = ''.join(sequence)\n elif sequence[0] == 'Z':\n sequence = list(sequence)\n sequence[0] = '0'\n sequence = ''.join(sequence)\n sequence = sequence[0] + increment(sequence[1:], s)\n else:\n sequence = list(sequence)\n sequence[0] = (chr(ord(sequence[0]) + 1))\n sequence = ''.join(sequence)\n return sequence", "def __init__(self, length, alphabet=IUPAC.unambiguous_dna):\n seq_str = self.SampleLetters(alphabet.letters, length)\n \n Seq.__init__(self, seq_str.upper(), alphabet)", "def aa_generator_DNA(dnaseq):\n return (translate_DNA_codon(dnaseq[n:n+3])\n for n in range(0, len(dnaseq), 3))", "def gen_seq(self,ntrials=20,pm_trial_position=None):\n # insert ranomly positioned pm trials\n if type(pm_trial_position)==type(None):\n ntrials -= 1+self.num_pm_trials\n pm_trial_position = np.random.randint(self.min_start_trials,ntrials,self.num_pm_trials) \n else:\n ntrials -= 1+len(pm_trial_position)\n pm_trial_position = pm_trial_position\n # generate og stim\n seq = np.random.randint(0,self.ntokens_og,ntrials)\n X = np.insert(seq,[0,*pm_trial_position],self.pm_token)\n # form Y \n Xroll = np.roll(X,self.nback)\n Y = (X == Xroll).astype(int) # nback trials\n Y[X==self.pm_token]=2 # pm trials\n return X,Y", "def main():\n\n args = get_args()\n seq = args.seq.upper()\n codon_to_aa = {\n 'AAA': 'K',\n 'AAC': 'N',\n 'AAG': 'K',\n 'AAU': 'N',\n 'ACA': 'T',\n 'ACC': 'T',\n 'ACG': 'T',\n 'ACU': 'T',\n 'AGA': 'R',\n 'AGC': 'S',\n 'AGG': 'R',\n 'AGU': 'S',\n 'AUA': 'I',\n 'AUC': 'I',\n 'AUG': 'M',\n 'AUU': 'I',\n 'CAA': 'Q',\n 'CAC': 'H',\n 'CAG': 'Q',\n 'CAU': 'H',\n 'CCA': 'P',\n 'CCC': 'P',\n 'CCG': 'P',\n 'CCU': 'P',\n 'CGA': 'R',\n 'CGC': 'R',\n 'CGG': 'R',\n 'CGU': 'R',\n 'CUA': 'L',\n 'CUC': 'L',\n 'CUG': 'L',\n 'CUU': 'L',\n 'GAA': 'E',\n 'GAC': 'D',\n 'GAG': 'E',\n 'GAU': 'D',\n 'GCA': 'A',\n 'GCC': 'A',\n 'GCG': 'A',\n 'GCU': 'A',\n 'GGA': 'G',\n 'GGC': 'G',\n 'GGG': 'G',\n 'GGU': 'G',\n 'GUA': 'V',\n 'GUC': 'V',\n 'GUG': 'V',\n 'GUU': 'V',\n 'UAA': 'Stop',\n 'UAC': 'Y',\n 'UAG': 'Stop',\n 'UAU': 'Y',\n 'UCA': 'S',\n 'UCC': 'S',\n 'UCG': 'S',\n 'UCU': 'S',\n 'UGA': 'Stop',\n 'UGC': 'C',\n 'UGG': 'W',\n 'UGU': 'C',\n 'UUA': 'L',\n 'UUC': 'F',\n 'UUG': 'L',\n 'UUU': 'F',\n }\n\n k = 3\n\n # 1: for loop\n # protein = ''\n # for codon in [seq[i:i + k] for i in range(0, len(seq), k)]:\n # aa = codon_to_aa.get(codon, '-')\n # if aa == 'Stop':\n # break\n # protein += aa\n\n # 2: list comprehension, slice to remove Stop\n # codons = [seq[i:i + k] for i in range(0, len(seq), k)]\n # aa = [codon_to_aa.get(codon, '-') for codon in codons]\n # if 'Stop' in aa:\n # aa = aa[:aa.index('Stop')]\n # print(''.join(aa))\n\n # 3: L.C. -> map(), slice -> takewhile\n # codons = map(lambda i: seq[i:i + k], range(0, len(seq), k))\n # aa = map(lambda codon: codon_to_aa.get(codon, '-'), codons)\n # print(''.join(takewhile(lambda c: c != 'Stop', aa)))\n\n # 4: combine map()\n # aa = map(lambda c: codon_to_aa.get(c, '-'),\n # map(lambda i: seq[i:i + k], range(0, len(seq), k)))\n # print(''.join(takewhile(lambda c: c != 'Stop', aa)))\n\n # 5: combine all\n # print(''.join(\n # takewhile(\n # lambda c: c != 'Stop',\n # map(lambda c: codon_to_aa.get(c, '-'),\n # map(lambda i: seq[i:i + k], range(0, len(seq), k))))))\n\n # 6: Seq\n print(str(Seq(args.seq).translate()).replace('*', ''))", "def mrna_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n mrnaacc = ''\n mrnalen = 0\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n mrnalen += int(fields[4]) - int(fields[3]) + 1\n accmatch = re.search(r'accession=([^;\\n]+)', fields[8])\n assert accmatch, 'Unable to parse mRNA accession: %s' % fields[8]\n mrnaacc = accmatch.group(1)\n elif entry.startswith('###'):\n mrnaseq = seqs[mrnaacc]\n if len(mrnaseq) != mrnalen:\n message = 'mature mRNA \"%s\": length mismatch' % mrnaacc\n message += ' (gff3=%d, fa=%d)' % (mrnalen, len(mrnaseq))\n message += '; most likely a duplicated accession, discarding'\n print(message, file=sys.stderr)\n else:\n gccontent = gc_content(mrnaseq)\n gcskew = gc_skew(mrnaseq)\n ncontent = n_content(mrnaseq)\n values = '%s %d %.3f %.3f %.3f' % (\n mrnaacc, mrnalen, gccontent, gcskew, ncontent)\n yield values.split(' ')\n mrnaacc = ''\n mrnalen = 0", "def _get_id_ac_string(accession: str, gene: str, sequence_len: int) -> str:\n id_str = \"ID {GENE:<24}{REVIEW:<18}{AA_COUNT} AA.\\n\".format(\n GENE=gene,\n REVIEW=\"Unreviewed;\",\n AA_COUNT=sequence_len\n )\n acc_str = \"AC {};\".format(accession)\n return id_str + acc_str", "def sequence(self):\n\n\t\tseq = \"\"\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tseq += res.aa1()\n\n\t\treturn seq", "def random_seq(length, nucleic_acid='DNA'):\n \n if nucleic_acid == 'DNA':\n alphabet = ('A','C','T','G')\n elif nucleic_acid == 'RNA':\n alphabet = ('A','C','U','G')\n\n so_far = ''\n for i in range(length):\n so_far += random.sample(alphabet, 1)[0]\n return so_far", "def createAlignment(sequences, alphabet):\n align = Alignment(alphabet)\n counter = 0\n for sequence in sequences:\n name = \"sequence\" + str(counter)\n align.add_sequence(name, sequence)\n counter+=1\n return align", "def __init__(self, seq, peptide):\r\n self.seq = seq # original DNA sequence\r\n self.peptide = peptide # original peptide sequence\r\n self.allPepSeqs = [] # list to hold all possible nuc sequences based on the peptide sequence\r\n self.codonTable = { # holds all amino acids and their associated codons\r\n 'F': ['TTT', 'TTC'], 'S': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],\r\n 'Y': ['TAT', 'TAC'], 'C': ['TGT', 'TGC'], 'L': ['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],\r\n '-': ['TAA', 'TGA', 'TAG'], 'W': ['TGG'], 'P': ['CCT', 'CCC', 'CCA', 'CCG'],\r\n 'H': ['CAT', 'CAC'], 'R': ['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Q': ['CAA', 'CAG'],\r\n 'I': ['ATT', 'ATC', 'ATA'], 'T': ['ACT', 'ACC', 'ACA', 'ACG'], 'N': ['AAT', 'AAC'],\r\n 'K': ['AAA', 'AAG'], 'M': ['ATG'], 'V': ['GTT', 'GTC', 'GTA', 'GTG'],\r\n 'A': ['GCT', 'GCC', 'GCA', 'GCG'], 'D': ['GAT', 'GAC'], 'G': ['GGT', 'GGC', 'GGA', 'GGG'],\r\n 'E': ['GAA', 'GAG']\r\n }", "def intuitive_abel_seq(p,n):\n assert n>=2, \"Carleman matrix must at least be of size 2 to retrieve the coefficients.\"\n B=p.carleman_matrix(n)-diagonal_matrix([1]*(n))\n x=B[range(1,n),range(n-1)].solve_left(matrix([[1] + [0]*(n-2)]))\n return [-1]+x[0].list()", "def seq_gc(seq, mapped_only=True):\n if not isinstance(seq, str):\n raise ValueError(\"reformat input sequence as a str\")\n g = seq.count(\"G\")\n g += seq.count(\"g\")\n c = seq.count(\"C\")\n c += seq.count(\"c\")\n nbases = len(seq)\n if mapped_only:\n n = seq.count(\"N\")\n n += seq.count(\"n\")\n nbases -= n\n return (g + c) / nbases if nbases > 0 else np.nan", "def GetPseudoAAC2(ProteinSequence,lamda=30,weight=0.05,AAP=[_Hydrophobicity,_hydrophilicity]):\n\trightpart=[]\n\tfor i in range(lamda):\n\t\trightpart.append(GetSequenceOrderCorrelationFactor(ProteinSequence,i+1,AAP))\n\t\n\tresult={}\n\ttemp=1+weight*sum(rightpart)\n\tfor index in range(20,20+lamda):\n\t\tresult['PAAC'+str(index+1)]=round(weight*rightpart[index-20]/temp*100,3)\n\t\n\treturn result", "def test_degap_fasta_aln(self):\r\n\r\n test_aln = [(\"a\", \"AAAAAAAAAGGGG\"),\r\n (\"b\", \"-A-A-G-G-A-G-C.\"),\r\n ('c', \"..-----------\"),\r\n ('d', \"--------AAAAAAA\"),\r\n ('e', \"\")]\r\n\r\n expected_result = map(lambda a_b: DNASequence(a_b[1],\r\n id=a_b[0]),\r\n [(\"a\", \"AAAAAAAAAGGGG\"),\r\n (\"b\", \"AAGGAGC\"),\r\n ('c', \"\"),\r\n ('d', \"AAAAAAA\"),\r\n ('e', \"\")])\r\n\r\n self.assertEqual(list(degap_fasta_aln(test_aln)), expected_result)\r\n\r\n self.assertEqual(list(degap_fasta_aln([])), [])", "def create_seqeunce_helper(i, text, dsl, char_count, char_to_n, extra, length = seq_length):\n\n seq_int = [] # Sequence mapped to integers\n output_seq = np.zeros((length, char_count)) # Output sequence which will become one item in input array \n\n # Get the next sequence and map its characters to integers\n for v in text[i * length + extra : (i + 1) * length + extra]:\n # If the seed_text is missing a character we append 0\n if v in char_to_n:\n seq_int.append(char_to_n[v])\n else:\n seq_int.append(0)\n\n # For character in sequence\n for j in range(length):\n # Set column corrpsonding to that character to 1\n output_seq[j][seq_int[j]] = 1.0 \n\n return output_seq" ]
[ "0.6658332", "0.6241003", "0.6202885", "0.6068937", "0.603792", "0.5953803", "0.58824986", "0.5805228", "0.5768547", "0.5682909", "0.5623935", "0.5590501", "0.5581104", "0.5535313", "0.5436303", "0.5406636", "0.5395256", "0.5393105", "0.53856844", "0.53707266", "0.5350533", "0.5347609", "0.53240395", "0.5308318", "0.52999836", "0.52547586", "0.5205258", "0.5192391", "0.51788765", "0.51670116" ]
0.75282496
0
Calculates the normalized binding energy given Ab and Ag sequences, binding matrix and the best and worst binding values possible for the given Ag (required for normalization). 1 then means the best binder, 0 the worst.
def E_norm(Ab, Ag, top, bottom): # calculate binding energy before normalization Esum = sum([cf.TD20[int(Ab[i]) - 1][int(Ag[i]) - 1] for i in range(len(Ab))]) # normalize using the supplied top and bottom values Enormal = (Esum - bottom) / (top - bottom) return Enormal
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def E_best(Ab, AgEpitope):\n\n E = E_norm(Ab, AgEpitope[0], AgEpitope[1], AgEpitope[2])\n\n return E", "def _sigma_ee_rel(self,gam,eps):\n A = 1 - 8 / 3 * (gam - 1)**0.2 / (gam + 1) * (eps / gam)**(1./3.)\n\n return (self._sigma_1(gam,eps) + self._sigma_2(gam,eps)) * A", "def update_model(self,update_dict,eta=None):\n\n A = self.A.get_value()\n param_max = np.max(np.abs(A),axis=0)\n\n if eta is None:\n update_max = np.max(np.abs(update_dict['dA']),axis=0)\n A -= update_dict['dA']\n else:\n update_max = eta*np.max(np.abs(update_dict['dA']),axis=0)\n A -= eta*update_dict['dA']\n\n update_max = np.max(update_max/param_max)\n\n A = self.normalize_A(A)\n self.A.set_value(A)\n\n return update_max", "def fit_gaussian(position, energy, dx=0.005, a=2, b=1.5, db=0.01, tolerance=0.05, max_iterations=1000):\n min_energy, max_energy = min(energy), max(energy)\n x_start, x_range = min(position), max(position) - min(position)\n x_gauss = np.arange(0, x_range, dx)\n f_gauss = np.exp(-a * (x_range / 2 - x_gauss) ** 2 + b)\n delta_energy = abs(max(f_gauss) - max_energy)\n b_direction = np.sign(max_energy - max(f_gauss))\n print('E_WHAM: %.3f | E_GAUSS: %.3f | b_direction: %i' % (max_energy, max(f_gauss), b_direction))\n for i in range(max_iterations):\n b = b + b_direction * db\n f_gauss_trial = np.exp(-a * (x_range / 2 - x_gauss) ** 2 + b)\n delta_energy_trial = abs(max(f_gauss_trial) - max_energy)\n if delta_energy_trial < tolerance:\n f_gauss = f_gauss_trial\n print('Found b value: %.2f with dE: %.3f within tolerance in %i iterations' % (b, delta_energy, i))\n break\n elif delta_energy_trial < delta_energy:\n f_gauss = f_gauss_trial\n delta_energy = delta_energy_trial\n print('Finished fitting. %i iterations | dE: %.3f | b_final: %.2f' % (i, delta_energy, b))\n return (x_gauss + x_start, f_gauss)", "def eval_intrares_energy(self, res, pose, sf, emap):\n\t\tpose = pose\n\t\temv = EMapVector()\n\t\tsf_env.eval_cd_1b(res,pose,emv)\n\t\tweighted_score_env = emv[env]*sec_struct_weight[pose.secstruct()[res.seqpos()-1]]\n\t\tsf_cbeta.eval_cd_1b(res,pose,emv)\n\t\tweighted_score_cbeta = emv[cbeta]*sec_struct_weight[pose.secstruct()[res.seqpos()-1]]\n\t\tweighted_score = -1*(weighted_score_env + weighted_score_cbeta)\n\t\temap.set(self.scoreType, weighted_score)", "def estimate_wf_energy(self):\n nblocks = 50\n blocksize = int(len(self.local_energies) / nblocks)\n enmean = 0\n enmeansq = 0\n enmean_unblocked = 0\n enmeansq_unblocked = 0\n\n for b in range(nblocks):\n eblock = 0.0\n for j in range(b * blocksize, (b + 1) * blocksize):\n eblock += self.local_energies[j].real\n delta = self.local_energies[j].real - enmean_unblocked\n enmean_unblocked += delta / (j + 1)\n delta2 = self.local_energies[j].real - enmean_unblocked\n # delta != delta2 because of update to enmean_unblocked\n enmeansq_unblocked += delta * delta2\n eblock /= blocksize\n delta = eblock - enmean\n enmean += delta / (b + 1)\n delta2 = eblock - enmean\n # delta != delta2 because of update to enmean\n enmeansq += delta * delta2\n\n enmeansq /= (nblocks - 1)\n enmeansq_unblocked /= (nblocks * blocksize - 1)\n est_avg = enmean / self.n_visible\n est_error = math.sqrt(enmeansq / nblocks) / self.n_visible\n self.nqs_energy = np.squeeze(est_avg)\n self.nqs_energy_err = np.squeeze(est_error)\n\n energy_report = 'Estimated average energy per spin: {} +/- {}'\n print(energy_report.format(est_avg, est_error))\n bin_report = 'Estimate from binning analysis. ' + \\\n '{} bins of {} samples each'\n print(bin_report.format(nblocks, blocksize))\n autocorrelation = 'Estimated autocorrelation time is {}'\n self.correlation_time = 0.5 * blocksize * enmeansq / enmeansq_unblocked\n print(autocorrelation.format(self.correlation_time))", "def worst_B(Ag):\n bottom = 0\n for i in range(len(Ag)):\n etop = np.max(cf.TD20[int(Ag[i]) - 1])\n bottom += etop\n return bottom", "def residue_pair_energy(self, res1, res2, pose, sf, emap):\n\t\tpose = pose\n\t\temv = EMapVector()\n\t\tsf_pair.eval_ci_2b(res1,res2,pose,emv)\n\t\tweighted_score = -1*(emv[pair]*sec_struct_weight[pose.secstruct()[res1.seqpos()-1]]*sec_struct_weight[pose.secstruct()[res2.seqpos()-1]])\n\t\temap.set(self.scoreType, weighted_score)", "def norm_bound(self, input_mags):\n return input_mags[0]", "def _excitonic_reorg_energy(self, SS, AG, n):\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n rg = 0.0\n \n # electronic states corresponding to single excited states\n elst = numpy.where(AG.which_band == 1)[0]\n for el1 in elst:\n reorg = cfm.get_reorganization_energy(el1-1,el1-1)\n for kk in AG.vibindices[el1]:\n rg += ((SS[kk,n]**2)*(SS[kk,n]**2)*reorg)\n return rg", "def algo(GENE_VALUES_MATRIX):\n\n\tA = GENE_VALUES_MATRIX\n\n\tAA = np.zeros_like(A)\n\n\tI = np.argsort(A,axis=0)\n\n\tAA[I,np.arange(A.shape[1])] = np.mean(A[I,np.arange(A.shape[1])],axis=1)[:,np.newaxis]\n\n\treturn AA", "def abv(og, fg):\n return abw(og, fg) * fg / 0.794", "def normalize(A: np.array) -> np.array:\n for i in range(A.shape[1]):\n A[:, i] = (A[:, i] - np.min(A[:, i])) / (np.max(A[:, i] - np.min(A[:, i])))\n return A", "def _calc_energy_perturb( self, V_a, eos_d ):\n\n fname = 'energy'\n scale_a, paramkey_a = self.get_param_scale\\\n ( eos_d, apply_expand_adj=self.expand_adj )\n Eperturb_a = []\n for paramname in paramkey_a:\n iEperturb_a = self.param_deriv( fname, paramname, V_a, eos_d)\n Eperturb_a.append(iEperturb_a)\n\n Eperturb_a = np.array(Eperturb_a)\n\n return Eperturb_a, scale_a, paramkey_a", "def _calc_energy( self, V_a, eos_d ):\n pass", "def best_B(Ag):\n top = 0\n for i in range(len(Ag)):\n etop = np.min(cf.TD20[int(Ag[i]) - 1])\n top += etop\n return top", "def _normalize_input(self, Gs, dGs):\n Gs_norm = copy.deepcopy(Gs)\n dGs_norm = copy.deepcopy(dGs)\n # Iterate over atomtypes\n for t in self.atomtypes:\n if self.normalize_input == 'norm':\n norm_i = np.linalg.norm(Gs[t], axis=1)\n Gs_norm[t] = Gs[t]/norm_i[:, np.newaxis]\n dGs_norm[t] = np.einsum('ijkl,i->ijkl', dGs[t], 1.0/norm_i)\n else:\n Gs_norm[t] = (Gs[t]-self.Gs_norm1[t])/self.Gs_norm2[t]\n dGs_norm[t] = np.einsum('ijkl,j->ijkl', dGs[t],\n 1.0/self.Gs_norm2[t])\n return Gs_norm, dGs_norm", "def compute_norm(self):\n\n # logger.info(\" Normalization factor:\")\n\n # loop over all the complexes in the database\n first = True\n for comp in tqdm(self.index_complexes):\n fname, molname = comp[0], comp[1]\n\n # get the feature/target\n if self.mapfly:\n feature, target = self.map_one_molecule(\n fname, mol=molname)\n else:\n feature, target = self.load_one_molecule(\n fname, mol=molname)\n\n # create the norm isntances at the first passage\n if first:\n self.param_norm = {'features': [], 'targets': None}\n for ifeat in range(feature.shape[0]):\n self.param_norm['features'].append(NormParam())\n self.param_norm['targets'] = MinMaxParam()\n first = False\n\n # update the norm instances\n for ifeat, mat in enumerate(feature):\n self.param_norm['features'][ifeat].add(\n np.mean(mat), np.var(mat))\n self.param_norm['targets'].update(target)\n\n # process the std of the features and make array for fast access\n nfeat, ncomplex = len(\n self.param_norm['features']), len(self.index_complexes)\n self.feature_mean, self.feature_std = [], []\n for ifeat in range(nfeat):\n\n # process the std and check\n self.param_norm['features'][ifeat].process(ncomplex)\n if self.param_norm['features'][ifeat].std == 0:\n logger.info(' Final STD Null. Changed it to 1')\n self.param_norm['features'][ifeat].std = 1\n\n # store as array for fast access\n self.feature_mean.append(\n self.param_norm['features'][ifeat].mean)\n self.feature_std.append(\n self.param_norm['features'][ifeat].std)\n\n self.target_min = self.param_norm['targets'].min[0]\n self.target_max = self.param_norm['targets'].max[0]\n\n logger.info(f'{self.target_min}, {self.target_max}')", "def EVBMF(Y, sigma2=None, H=None): \n L,M = Y.shape #has to be L<=M\n\n if H is None:\n H = L\n\n alpha = L/M\n tauubar = 2.5129*np.sqrt(alpha)\n \n #SVD of the input matrix, max rank of H\n _,s,_ = torch.svd(Y)\n s = s[:H]\n\n #Calculate residual\n residual = 0.\n if H<L:\n # residual = np.sum(np.sum(Y**2)-np.sum(s**2))\n residual = torch.sum(torch.sum(Y**2)-torch.sum(s**2))\n\n #Estimation of the variance when sigma2 is unspecified\n if sigma2 is None: \n xubar = (1+tauubar)*(1+alpha/tauubar)\n eH_ub = int(np.min([np.ceil(L/(1+alpha))-1, H]))-1\n\n upper_bound = (torch.sum(s**2)+residual)/(L*M)\n lower_bound = np.max([s[eH_ub+1]**2/(M*xubar), torch.mean(s[eH_ub+1:]**2)/M])\n\n scale = 1.#/lower_bound\n s = s*np.sqrt(scale)\n residual = residual*scale\n lower_bound = float(lower_bound)*scale\n upper_bound = float(upper_bound)*scale\n\n sigma2_opt = minimize_scalar(EVBsigma2, args=(L,M,s,residual,xubar), bounds=[lower_bound, upper_bound], method='Bounded')\n sigma2 = sigma2_opt.x\n\n #Threshold gamma term\n threshold = np.sqrt(M*sigma2*(1+tauubar)*(1+alpha/tauubar))\n\n pos = torch.sum(s>threshold)\n if pos == 0: return np.array([])\n #Formula (15) from [2]\n d = torch.mul(s[:pos]/2, 1-(L+M)*sigma2/s[:pos]**2 + torch.sqrt( (1-((L+M)*sigma2)/s[:pos]**2)**2 - (4*L*M*sigma2**2)/s[:pos]**4) )\n return torch.diag(d)", "def binder(self):\n\t\tfor i in range(len(self.EH)):\n\t\t\tif self.EH[i] and self.EHr[i]:\n\t\t\t\tself.E2[self.EH[i][0]] += self.EHr[i]", "def energy_exp(self, rbm_vis, rbm_hid):\n logger.debug(\"GumBolt::energy_exp\")\n \n # Broadcast W to (pcd_batchSize * nVis * nHid)\n w, vbias, hbias = self.prior.weights, self.prior.visible_bias, self.prior.hidden_bias\n w = w + torch.zeros((rbm_vis.size(0),) + w.size(), device=rbm_vis.device)\n vbias = vbias.to(rbm_vis.device)\n hbias = hbias.to(rbm_hid.device)\n \n # Prepare H, V for torch.matmul()\n # Change V.size() from (batchSize * nVis) to (batchSize * 1 * nVis)\n vis = rbm_vis.unsqueeze(2).permute(0, 2, 1)\n # Change H.size() from (batchSize * nHid) to (batchSize * nHid * 1)\n hid = rbm_hid.unsqueeze(2)\n \n batch_energy = (- torch.matmul(vis, torch.matmul(w, hid)).reshape(-1) \n - torch.matmul(rbm_vis, vbias)\n - torch.matmul(rbm_hid, hbias))\n \n return torch.mean(batch_energy, 0)", "def energy_s(s_mat):\n \n itr = int(np.shape(s_mat)[0])\n sums = sum(s_mat)\n norm_1 = s_mat[0]/sums\n norm_c = []\n norm_i = []\n \n for i in range(itr):\n \n norm_c.append(sum(s_mat[0:i+1])/sums)\n norm_i.append(s_mat[i]/sums)\n return(norm_1, norm_c, norm_i)", "def energy(self):\n sum_energy = 0.0\n for i in range(0,self.natoms-1):\n for j in range(i+1,self.natoms):\n rij = (self.atoms[i].xyz - self.atoms[j].xyz)\n rij = rij - self.pbc_correction(rij)\n mag_rij = la.norm(rij)\n sum_energy = sum_energy + self.pair_energy(self.epsilon, self.sigma, mag_rij) \n return sum_energy", "def _sigma_ep(self,gam,eps):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return self._sigma_1(gam,eps)", "def viterbiMeansEstimate(self):\n for i in range(self.noOfEmmittingStates):\n self.outputProbabilities[i,0] = \\\n self.observationSequence[0, nonzero(self.mostLikelyPath ==\n i+1)[1]-1].mean()", "def energyscore(pwm_dictionary, seq):\n if \"N\" in seq:\n return 0\n else:\n pwm_length = len(pwm_dictionary[\"A\"])\n energy_list = []\n pwm_dictionary_rc = rc_pwm(pwm_dictionary, pwm_length)\n for i in range(len(seq) - 1):\n energy = 0\n energy_rc = 0\n for j in range(pwm_length - 1):\n if (j + i) >= len(seq):\n energy += 0.25\n energy_rc += 0.25\n else:\n energy += pwm_dictionary[seq[j + i]][j]\n energy_rc += pwm_dictionary_rc[seq[j + i]][j]\n\n energy_list.append(1 / (1 + (exp(energy))))\n energy_list.append(1 / (1 + (exp(energy_rc))))\n energy_score = min(energy_list)\n return energy_score", "def max_evidence(self):\n self.A = np.linalg.inv(self.Sn)\n A_eigval = np.linalg.eigvals(self.A)\n gamma = 0\n for i in range(len(A_eigval)):\n gamma += A_eigval[i]/(self.alpha + A_eigval[i])\n new_alpha = gamma/([email protected])\n\n sum = 0\n for i in range(self.n):\n sum +=(self.t[i][email protected]_matrix[i])**2\n new_beta = 1/((1/(self.n-gamma))*sum)\n\n return new_alpha, new_beta", "def normalise(self):\n fitness_sum = np.sum(self.fitness)\n for i in range(self.loops):\n self.normalised_fitness[i] = self.fitness[i] / fitness_sum", "def get_total_BMA_effect_size(self):\n \n if self.total_bma_es is None:\n # clean up these long expressions on Isle 2\n log_evidences = [self.results[kernel].summary(b=self.b)['evidence']['md'] \n for kernel in self.kernel_dict.keys()] + \\\n [self.results[kernel].summary(b=self.b)['evidence']['mc'] \n for kernel in self.kernel_dict.keys()]\n \n M = len(log_evidences)\n Z = logSumExp(log_evidences)\n evidences = np.exp(log_evidences - Z)\n disc_stats = [self.results[kernel].summary(b=self.b)['es_disc_stats'] \n for kernel in self.kernel_dict.keys()]\n nsamples = 50000\n samples = list() \n for i in range(int(M/2)):\n samples += list(np.random.normal(loc=disc_stats[i][0], \n scale=disc_stats[i][1], \n size=int(nsamples*evidences[i])))\n samples += list(np.zeros(nsamples - len(samples)))\n \n if np.sum(np.abs(samples))==0:\n xrange = np.linspace(-2, 2, 500)\n ix = np.argmin((xrange-self.b)**2)\n es_bma = np.zeros((500))\n es_bma[ix] = 1.0/ (xrange[1] - xrange[0])\n else: \n kde_fit = stats.gaussian_kde(samples, bw_method='silverman')\n xrange = np.linspace(np.min(samples), np.max(samples), 500)\n es_bma = kde_fit(xrange)\n self.total_bma_es = np.sum(xrange*es_bma) * (xrange[1]-xrange[0])\n self.total_bma_pdf = (xrange, es_bma)\n return self.total_bma_es", "def score(matrix,seq,ns=True):\n #specific_binding = sum([row[base_dict[b]] for row,b in zip(matrix,seq)])\n specific_binding = 0\n for i in xrange(len(matrix)): \n specific_binding += matrix[i][base_dict[seq[i]]]\n if ns:\n return log(exp(-beta*specific_binding) + exp(-beta*ns_binding_const))/-beta\n else:\n return specific_binding" ]
[ "0.5989815", "0.53740555", "0.5230325", "0.5117697", "0.50739473", "0.49998295", "0.49388465", "0.49281248", "0.49246195", "0.4903912", "0.4902104", "0.488608", "0.48850486", "0.4882683", "0.48631883", "0.4862339", "0.48141176", "0.4811674", "0.48107347", "0.48089716", "0.48011422", "0.47942543", "0.47849748", "0.4775333", "0.47675428", "0.4761964", "0.47591427", "0.47562096", "0.47430134", "0.4710862" ]
0.7355312
0
Given an Ab and a prepared list containing the epitope as well as its best and worst binding partner energies, this function calculates the normalized binding energy of the Ab towards the epitope. It then returns the binding energy value.
def E_best(Ab, AgEpitope): E = E_norm(Ab, AgEpitope[0], AgEpitope[1], AgEpitope[2]) return E
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def E_norm(Ab, Ag, top, bottom):\n\n # calculate binding energy before normalization\n Esum = sum([cf.TD20[int(Ab[i]) - 1][int(Ag[i]) - 1]\n for i in range(len(Ab))])\n\n # normalize using the supplied top and bottom values\n Enormal = (Esum - bottom) / (top - bottom)\n\n return Enormal", "def _calc_energy_perturb( self, V_a, eos_d ):\n\n fname = 'energy'\n scale_a, paramkey_a = self.get_param_scale\\\n ( eos_d, apply_expand_adj=self.expand_adj )\n Eperturb_a = []\n for paramname in paramkey_a:\n iEperturb_a = self.param_deriv( fname, paramname, V_a, eos_d)\n Eperturb_a.append(iEperturb_a)\n\n Eperturb_a = np.array(Eperturb_a)\n\n return Eperturb_a, scale_a, paramkey_a", "def estimate_wf_energy(self):\n nblocks = 50\n blocksize = int(len(self.local_energies) / nblocks)\n enmean = 0\n enmeansq = 0\n enmean_unblocked = 0\n enmeansq_unblocked = 0\n\n for b in range(nblocks):\n eblock = 0.0\n for j in range(b * blocksize, (b + 1) * blocksize):\n eblock += self.local_energies[j].real\n delta = self.local_energies[j].real - enmean_unblocked\n enmean_unblocked += delta / (j + 1)\n delta2 = self.local_energies[j].real - enmean_unblocked\n # delta != delta2 because of update to enmean_unblocked\n enmeansq_unblocked += delta * delta2\n eblock /= blocksize\n delta = eblock - enmean\n enmean += delta / (b + 1)\n delta2 = eblock - enmean\n # delta != delta2 because of update to enmean\n enmeansq += delta * delta2\n\n enmeansq /= (nblocks - 1)\n enmeansq_unblocked /= (nblocks * blocksize - 1)\n est_avg = enmean / self.n_visible\n est_error = math.sqrt(enmeansq / nblocks) / self.n_visible\n self.nqs_energy = np.squeeze(est_avg)\n self.nqs_energy_err = np.squeeze(est_error)\n\n energy_report = 'Estimated average energy per spin: {} +/- {}'\n print(energy_report.format(est_avg, est_error))\n bin_report = 'Estimate from binning analysis. ' + \\\n '{} bins of {} samples each'\n print(bin_report.format(nblocks, blocksize))\n autocorrelation = 'Estimated autocorrelation time is {}'\n self.correlation_time = 0.5 * blocksize * enmeansq / enmeansq_unblocked\n print(autocorrelation.format(self.correlation_time))", "def energy(self):\n sum_energy = 0.0\n for i in range(0,self.natoms-1):\n for j in range(i+1,self.natoms):\n rij = (self.atoms[i].xyz - self.atoms[j].xyz)\n rij = rij - self.pbc_correction(rij)\n mag_rij = la.norm(rij)\n sum_energy = sum_energy + self.pair_energy(self.epsilon, self.sigma, mag_rij) \n return sum_energy", "def bind_energy(self):\n return self._bind_energy", "def get_econs(self):\n eham = self.beads.vpath*self.nm.omegan2 + self.nm.kin + self.forces.pot\n eham += self.bias.pot # bias\n for e in self._elist:\n eham += e.get()\n\n return eham + self.eens", "def binder(self):\n\t\tfor i in range(len(self.EH)):\n\t\t\tif self.EH[i] and self.EHr[i]:\n\t\t\t\tself.E2[self.EH[i][0]] += self.EHr[i]", "def energy_balance_func(self):\n residual = []\n T_in = T_mix_ph(self.inl[0].get_flow(), T0=self.inl[0].T.val_SI)\n for o in self.outl:\n residual += [T_in - T_mix_ph(o.get_flow(), T0=o.T.val_SI)]\n return residual", "def _calc_energy( self, V_a, eos_d ):\n pass", "def calculate_energy(self):\n temp_e = 0\n\n for i in range(0,self.neuron_count):\n for j in range(0, self.neuron_count):\n if i != j:\n temp_e += self.get_weight(i, j) * self.current_state[i] * \\\n self.current_state[j]\n return -1 * temp_e / 2", "def absorption_energy_eV(self):\n return self._absorption_energy_eV.copy()", "def rel_energies(self) -> np.ndarray:\n if len(self) == 0:\n logger.warning(\"Cannot determine relative energies with no points\")\n return np.array([])\n\n return self.units.conversion * (self.energies - np.min(self.energies))", "def potential_energy(xy, BL, bo=1., kL=1.):\n bL = bond_length_list(xy, BL)\n bU = 0.5 * sum(kL * (bL - bo) ** 2)\n return bU", "def get_abnormal_price_values(ls_ls_prices, lower_bound, upper_bound):\n ls_abnormal_prices = []\n for indiv_ind, ls_prices in enumerate(ls_ls_prices):\n day_ind = 0\n while day_ind < len(ls_prices):\n if (ls_prices[day_ind] < lower_bound) or (ls_prices[day_ind] > upper_bound):\n relative_day = 0\n ls_day_inds = []\n while (day_ind + relative_day < len(ls_prices)) and\\\n (ls_prices[day_ind] == ls_prices[day_ind + relative_day]):\n ls_day_inds.append(day_ind + relative_day)\n relative_day += 1\n ls_abnormal_prices.append((indiv_ind, ls_prices[day_ind], ls_day_inds))\n day_ind += relative_day\n else:\n day_ind += 1\n return ls_abnormal_prices", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def _normalize(self):\n\n n = len(self.e2[0])\n E = []\n\n for e2 in self.e2:\n if len(e2) != n:\n print 'WARNING: non consistent length in error statistics!!!'\n E.append(np.nansum(np.sqrt(e2))) # temporal aggregation\n\n E = np.asarray(E)\n EM = E.mean() # take square root, as e2 is still the squared error!\n self.e_norm = (E - EM) / EM # see Glecker et al, eq.2", "def bethe_free_energy(self, potential):\n xn, xe, lpn, lpe, alpha = self(None, full_out=False)\n fn, fe = potential((xn, xe))\n bfe = -(tf.reduce_sum((fn + self.tw * lpn) * self.wn * alpha, [2, 3, 4]) +\n tf.reduce_sum((fe - lpe) * self.we * alpha, [2, 3, 4]))\n return bfe", "def _emiss_ee(self,Eph):\n if self.weight_ee == 0.0:\n return np.zeros_like(Eph)\n\n gam = np.vstack(self._gam)\n # compute integral with electron distribution\n emiss = c.cgs * trapz_loglog(np.vstack(self._nelec) * self._sigma_ee(gam,Eph),\n self._gam, axis=0)\n return emiss", "def calcEVals(self):\n self.eVals,self.eVecs = np.linalg.eigh(self.rhoOp)", "def bond_strain_list(xy, BL, bo):\n bL = bond_length_list(xy, BL)\n # print 'len(bL) = ', len(bL)\n # print 'len(bo) = ', len(bo)\n bs = (bL - bo) / bo\n return bs", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def bel(self, element):\n if element.is_empty():\n return 0\n\n if self.is_empty():\n return 0\n\n if not element.is_compatible(next(iter(self.focals))):\n return 0\n \n result = 0\n for focal, value in self.items():\n if not focal.is_empty() and focal.is_subset(element):\n result += value\n return round(result, 6)", "def meanAdjustELE(site_residuals, azSpacing=0.5,zenSpacing=0.5):\n tdata = res.reject_absVal(site_residuals,100.)\n del site_residuals \n data = res.reject_outliers_elevation(tdata,5,0.5)\n del tdata\n\n numd = np.shape(data)[0]\n numZD = int(90.0/zenSpacing) + 1\n\n Neq = np.eye(numZD,dtype=float) * 0.01\n Apart = np.zeros((numd,numZD))\n sd = np.zeros(numd)\n\n for i in range(0,numd):\n iz = np.floor(data[i,2]/zenSpacing)\n sd[i] = np.sin(data[i,2]/180.*np.pi)\n Apart[i,iz] = 1.#-(data[i,2]-iz*zenSpacing)/zenSpacing)\n\n prechi = np.dot(data[:,3].T,data[:,3])\n Neq = np.add(Neq, np.dot(Apart.T,Apart) )\n Bvec = np.dot(Apart.T,data[:,3])\n Cov = np.linalg.pinv(Neq)\n \n Sol = np.dot(Cov,Bvec)\n \n postchi = prechi - np.dot(Bvec.T,Sol)\n \n pwl = Sol\n pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)\n\n model = np.dot(Apart,Sol)\n f = loglikelihood(data[:,3],model)\n dof = numd - np.shape(Sol)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n #print(\"My loglikelihood:\",f,aic,bic,dof,numd)\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)\n stats = {}\n stats['prechi'] = np.sqrt(prechi/numd)\n stats['postchi'] = np.sqrt(postchi/numd)\n stats['chi_inc'] = np.sqrt((prechi-postchi)/numd)\n stats['aic'] = aic\n stats['bic'] = bic\n\n return pwl,pwlsig,stats", "def nuclear_binding_energy(\n particle: Particle, mass_numb: Optional[Integral] = None\n) -> u.J:\n return particle.binding_energy.to(u.J)", "def residue_pair_energy(self, res1, res2, pose, sf, emap):\n\t\tpose = pose\n\t\temv = EMapVector()\n\t\tsf_pair.eval_ci_2b(res1,res2,pose,emv)\n\t\tweighted_score = -1*(emv[pair]*sec_struct_weight[pose.secstruct()[res1.seqpos()-1]]*sec_struct_weight[pose.secstruct()[res2.seqpos()-1]])\n\t\temap.set(self.scoreType, weighted_score)", "def get_low_binder(RNs, AgEpitope, ntest):\n E_collect = []\n while len(E_collect) < ntest:\n ab = Ab_seq(RNs)\n Emax = E_best(ab, AgEpitope)\n if Emax >= cf.thr:\n E_collect.append(Emax)\n return min(E_collect)", "def _compute_epera(self, units='erg'):\n if 'a' not in self:\n raise ValueError('Photons must have effective area data to permit the computation of fluxes.')\n\n energy = _const.h * _const.c / self['w']\n energy = energy.to(units).value\n epera = energy / self['a']\n return epera", "def normalize_emission(self):\n self._e /= self._e.sum(0)", "def normEsquared(self):\n\n # Get the magnitude of E and add it to our data\n E_magsq = np.zeros_like(self.data['Ex'], dtype=np.float64)\n for comp in ('Ex', 'Ey', 'Ez'):\n E_magsq += np.absolute(self.data[comp])**2\n # E_magsq += self.data[comp].real**2\n self.extend_data('normEsquared', E_magsq)\n return E_magsq", "def calc_e_final(all_et_lst, size_of_batch):\r\n e_final_lst = []\r\n\r\n for i in range(len(all_et_lst[0])): # For each index of the Etotal list\r\n et_sum = 0 # Sum of ETotal values with same index\r\n for lst in all_et_lst: # For each Etotal list\r\n et_sum += lst[i]\r\n\r\n e_final = (1/size_of_batch) * et_sum\r\n e_final_lst.append(e_final)\r\n\r\n return e_final_lst" ]
[ "0.68311244", "0.5498993", "0.5194535", "0.51908374", "0.5127645", "0.5109397", "0.50883734", "0.50480145", "0.5023572", "0.5000986", "0.49812147", "0.4969033", "0.49680704", "0.49594802", "0.495168", "0.4943552", "0.49178055", "0.4909567", "0.48931104", "0.48774794", "0.48691463", "0.4850278", "0.48471305", "0.48394516", "0.48145178", "0.48073548", "0.48061576", "0.47988382", "0.4794204", "0.47920224" ]
0.638375
1
Prepares a naive B cell with a sequence that binds to the epitope with above threshold affinity and returns a B cell object.
def make_naive(RNs, seq_list, AgEpitope, tnow): # pick a random sequence from the pregenerated pool ab = random.choice(seq_list) Emax = E_best(ab, AgEpitope) if tnow == 0: # in initialisation, distribute ages evenly over # lifespan birthtime = -np.round(RNs.getR() * cf.tlifeN) else: birthtime = tnow newcell = Bcell(sequence=ab, sequence0=ab, affinity=Emax, affinity0=Emax, origin='naive', mutations=0, family=None, birthtime=birthtime, GCentrytime=None, AIDstart=None, block=False) return newcell
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def constructCell():\n\t\tself.weightGenerate()", "def setup_b_instance(self,norm,add_ps_mask=True):\n inst_tag = self.tag + '_'+str(self.flux_array_ebin)\n b = bsm.bayesian_scan_NPTF(tag=inst_tag,nside=self.nside,work_dir='/tmp/'+self.tag+'/',psf_dir=psf_dir,nlive=700)\n # Input the data, using the external data if provided\n if self.use_external_data:\n b.load_external_data(self.f1.CTB_en_bins,[self.external_data[self.flux_array_ebin]],self.f1.CTB_exposure_maps)\n else:\n b.load_external_data(self.f1.CTB_en_bins,self.f1.CTB_count_maps,self.f1.CTB_exposure_maps)\n\n if add_ps_mask:\n b.make_mask_total(band_mask_range = [-self.band_mask,self.band_mask],mask_ring = False,ps_mask_array = self.f1.ps_mask_array)\n else:\n b.make_mask_total(band_mask_range = [-self.band_mask,self.band_mask],mask_ring = False)\n\n b.add_new_template(self.f1.template_dict)\n b.rebin_external_data(1)\n\n b.add_poiss_model('ps_model','$A_{ps}$',[0.0,3.0],False)\n b.add_poiss_model('p7','$A_{p7}$',[0.0,2.0],False)\n b.add_poiss_model('bubs','$A_{bubs}$',[0.0,2.0],False)\n b.add_poiss_model('iso','$A_{iso}$',[0.0,3.0],False)\n # Add in a fixed J_map template\n b.add_fixed_templates({'J_map':[norm*self.J_map_arr[self.flux_array_ebin]/np.mean(self.J_map_arr[self.flux_array_ebin])]})\n\n b.initiate_poissonian_edep()\n return b", "def make_memory(RNs, seq_list, AgEpitope, tnow):\n ab = random.choice(seq_list)\n Emax = E_best(ab, AgEpitope)\n mutcount = np.round(RNs.getR() * 40)\n newcell = Bcell(sequence=ab, sequence0=ab, affinity=Emax, affinity0=Emax,\n origin='umem', mutations=mutcount,\n family=None, birthtime=tnow, GCentrytime=None,\n AIDstart=None, block=False)\n return newcell", "def bias_prior(self):", "def assign(self,\n bboxes,\n num_level_bboxes,\n cls_scores,\n bbox_preds,\n gt_bboxes,\n gt_bboxes_ignore=None,\n gt_labels=None):\n INF = 100000000\n bboxes = bboxes[:, :4]\n bbox_preds = bbox_preds.detach()\n cls_scores = cls_scores.detach()\n\n num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)\n\n # NOTE DeFCN style cost function\n # compute iou between all bbox and gt\n overlaps = self.iou_calculator(bbox_preds, gt_bboxes)\n # compute cls cost for bbox and GT\n cls_cost = torch.sigmoid(cls_scores[:, gt_labels])\n # make sure that we are in element-wise multiplication\n assert cls_cost.shape == overlaps.shape\n # overlaps is actually a cost matrix\n overlaps = cls_cost**(1 - self.alpha) * overlaps**self.alpha\n\n # assign 0 by default\n assigned_gt_inds = overlaps.new_full((num_bboxes, ),\n 0,\n dtype=torch.long)\n\n if num_gt == 0 or num_bboxes == 0:\n # No ground truth or boxes, return empty assignment\n max_overlaps = overlaps.new_zeros((num_bboxes, ))\n if num_gt == 0:\n # No truth, assign everything to background\n assigned_gt_inds[:] = 0\n if gt_labels is None:\n assigned_labels = None\n else:\n assigned_labels = overlaps.new_full((num_bboxes, ),\n -1,\n dtype=torch.long)\n return AssignResult(\n num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n\n # compute center distance between all bbox and gt\n gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0\n gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0\n gt_points = torch.stack((gt_cx, gt_cy), dim=1)\n\n bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0\n bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0\n bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1)\n\n distances = (bboxes_points[:, None, :] -\n gt_points[None, :, :]).pow(2).sum(-1).sqrt()\n\n if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None\n and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):\n ignore_overlaps = self.iou_calculator(\n bboxes, gt_bboxes_ignore, mode='iof')\n ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)\n ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr\n distances[ignore_idxs, :] = INF\n assigned_gt_inds[ignore_idxs] = -1\n\n # Selecting candidates based on the center distance\n candidate_idxs = []\n start_idx = 0\n for level, bboxes_per_level in enumerate(num_level_bboxes):\n # on each pyramid level, for each gt,\n # select k bbox whose center are closest to the gt center\n end_idx = start_idx + bboxes_per_level\n distances_per_level = distances[start_idx:end_idx, :]\n selectable_k = min(self.topk, bboxes_per_level)\n _, topk_idxs_per_level = distances_per_level.topk(\n selectable_k, dim=0, largest=False)\n candidate_idxs.append(topk_idxs_per_level + start_idx)\n start_idx = end_idx\n candidate_idxs = torch.cat(candidate_idxs, dim=0)\n\n # get corresponding iou for the these candidates, and compute the\n # mean and std, set mean + std as the iou threshold\n candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]\n overlaps_mean_per_gt = candidate_overlaps.mean(0)\n overlaps_std_per_gt = candidate_overlaps.std(0)\n overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt\n\n is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]\n\n # limit the positive sample's center in gt\n for gt_idx in range(num_gt):\n candidate_idxs[:, gt_idx] += gt_idx * num_bboxes\n ep_bboxes_cx = bboxes_cx.view(1, -1).expand(\n num_gt, num_bboxes).contiguous().view(-1)\n ep_bboxes_cy = bboxes_cy.view(1, -1).expand(\n num_gt, num_bboxes).contiguous().view(-1)\n candidate_idxs = candidate_idxs.view(-1)\n\n # calculate the left, top, right, bottom distance between positive\n # bbox center and gt side\n l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]\n t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]\n r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt)\n b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt)\n is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01\n is_pos = is_pos & is_in_gts\n\n # if an anchor box is assigned to multiple gts,\n # the one with the highest IoU will be selected.\n overlaps_inf = torch.full_like(overlaps,\n -INF).t().contiguous().view(-1)\n index = candidate_idxs.view(-1)[is_pos.view(-1)]\n overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]\n overlaps_inf = overlaps_inf.view(num_gt, -1).t()\n\n max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)\n assigned_gt_inds[\n max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1\n\n if gt_labels is not None:\n assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n pos_inds = torch.nonzero(\n assigned_gt_inds > 0, as_tuple=False).squeeze()\n if pos_inds.numel() > 0:\n assigned_labels[pos_inds] = gt_labels[\n assigned_gt_inds[pos_inds] - 1]\n else:\n assigned_labels = None\n return AssignResult(\n num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)", "def prepare_bpe():\r\n bpe = spm.SentencePieceProcessor()\r\n bpe.load(os.path.join(Params.dataset_path, Params.bpe_model))\r\n return bpe", "def make_automata(n, W, b, grid = None):\n\treturn ta.ThresholdAutomaton(n, W, b, grid = grid)", "def __init__(self,\n bias=0.0,\n clipping_lower_bound=-np.inf,\n clipping_upper_bound=np.inf):\n self._bias = bias\n self._clipping_lower_bound = clipping_lower_bound\n self._clipping_upper_bound = clipping_upper_bound", "def build_B_block(self):\n\n N = self.N # number of MPC steps\n row_list = [] # reocrd the every row in B_hat\n \n first_block = self.B\n zero = Variable(torch.zeros(self.num_input, self.num_output*(N-1)))\n zero = self.vari_gpu(zero)\n row= torch.cat([first_block, zero],1)\n row_list.append(row)\n \n for i in range(1, N):\n first_block = self.A.mm(first_block)\n row = torch.cat([first_block, row[:,:self.num_output*(N-1)]],1)\n row_list.append(row) \n \n return torch.cat(row_list,0)", "def post_process_for_bbox(bbox_pred):\n anchors = torch.FloatTensor(\n [(1.3221, 1.73145),\n (3.19275, 4.00944),\n (5.05587, 8.09892),\n (9.47112, 4.84053),\n (11.2364, 10.0071)]\n )\n\n outsize = (13, 13)\n width, height = outsize\n \n # restore cell pos to x, y\n for w in range(width):\n for h in range(height):\n bbox_pred[:, height*h + w, :, 0] += w\n bbox_pred[:, height*h + w, :, 1] += h\n bbox_pred[:, :, :, :2] /= 13\n \n # apply anchors to w, h\n anchor_w = anchors[:, 0].contiguous().view(-1, 1)\n anchor_h = anchors[:, 1].contiguous().view(-1, 1)\n bbox_pred[:, :, :, 2:3] *= anchor_w\n bbox_pred[:, :, :, 3:4] *= anchor_h\n\n return bbox_pred", "def _generate_bboxes(self, probs, offsets, scale, threshold):\n # applying P-Net is equivalent, in some sense, to\n # moving 12x12 window with stride 2\n stride = 2\n cell_size = 12\n\n # extract positive probability and resize it as [n, m] dim tensor.\n probs = probs[:, 1, :, :]\n\n # indices of boxes where there is probably a face\n mask = probs > threshold\n inds = mask.nonzero()\n\n if inds.shape[0] == 0:\n return torch.empty(0, dtype=torch.int32, device=self.device), \\\n torch.empty(0, dtype=torch.float32, device=self.device), \\\n torch.empty(0, dtype=torch.float32, device=self.device), \\\n torch.empty(0, dtype=torch.int32, device=self.device)\n\n # transformations of bounding boxes\n tx1, ty1, tx2, ty2 = [offsets[inds[:, 0], i, inds[:, 1], inds[:, 2]]\n for i in range(4)]\n\n offsets = torch.stack([tx1, ty1, tx2, ty2], 1)\n score = probs[inds[:, 0], inds[:, 1], inds[:, 2]]\n\n # P-Net is applied to scaled images\n # so we need to rescale bounding boxes back\n bounding_boxes = torch.stack([\n stride*inds[:, -1] + 1.0,\n stride*inds[:, -2] + 1.0,\n stride*inds[:, -1] + 1.0 + cell_size,\n (stride*inds[:, -2] + 1.0 + cell_size),\n ], 0).transpose(0, 1).float()\n\n bounding_boxes = torch.round(bounding_boxes / scale).int()\n return bounding_boxes, score, offsets, inds[:, 0].int()", "def _map_B(self, obs_seq):\n B_map = np.ones((self.n_states, len(obs_seq)))\n\n for j in range(self.n_states):\n for t, obs in enumerate(obs_seq):\n for i, symbol in enumerate(obs):\n if symbol == self.MISSING or (symbol is np.nan or symbol != symbol):\n # if the symbol is missing, use the maximum likelihood symbol for that state\n temp_symbol = np.argmax(\n self.B[i][j]\n )\n B_map[j][t] *= self.B[i][j][temp_symbol]\n else:\n B_map[j][t] *= self.B[i][j][symbol]\n return B_map", "def branch_precursor(state, time, d):\n assert d[\"alpha_IL2\"] < d[\"alpha1\"] and d[\"alpha_IL2\"] < d[\"alpha2\"]\n \n th0 = state[0]\n \n th1 = state[1:(d[\"alpha1\"]+d[\"alpha1_p\"]+1)]\n th2 = state[(d[\"alpha1\"]+d[\"alpha1_p\"]+1):]\n #print(len(state), len(th1))\n ### get all cytokine secreting cells \n th1_all = np.sum(th1[-d[\"alpha1_p\"]:])\n th2_all = np.sum(th2[-d[\"alpha2_p\"]:])\n \n t_eff = th1_all+th2_all\n t_il2 = np.sum(th1[:d[\"alpha_IL2\"]]) + np.sum(th2[:d[\"alpha_IL2\"]])\n\n ### calculate cytokine concentrations\n cyto_1 = d[\"beta_cyto_1\"]*th1_all + d[\"ifn_ext\"]\n cyto_2 = d[\"beta_cyto_2\"]*th2_all + d[\"il21_ext\"]\n \n conc_il2 = d[\"rate_il2\"]*t_il2/(d[\"K_il2\"]+t_eff)\n\n # compute feedbacks\n fb1 = d[\"fb_rate1\"]*cyto_1**3/(cyto_1**3+d[\"K_1\"]**3)\n fb2 = d[\"fb_rate2\"]*cyto_2**3/(cyto_2**3+d[\"K_2\"]**3)\n ### update differantiation rate\n beta1 = d[\"beta1\"]*(1+fb1)\n beta2 = d[\"beta2\"]*(1+fb2) \n \n ### calculate probability, note that these are adjusted to beta1 beta2 so that\n # they are not necessarily \\in (0,1)\n p1, p2 = get_prob(d, beta1, beta2, cyto_1, cyto_2)\n \n #print(beta1*p1_adj/(beta1*p1_adj+beta2))\n beta1_p = d[\"beta1_p\"]\n beta2_p = d[\"beta2_p\"]\n rate_death = d[\"d_eff\"] \n \n # check for homeostasis regulation\n if d[\"crit\"] == False:\n update_t0(d, time, conc_il2, t_eff)\n elif d[\"death_mode\"] == False:\n assert d[\"crit\"] == True \n beta1_p = beta1_p*np.exp(-d[\"decay_p\"]*(time-d[\"t0\"]))\n beta2_p = beta2_p*np.exp(-d[\"decay_p\"]*(time-d[\"t0\"]))\n\n else:\n rate_death = rate_death*np.exp(time-d[\"t0\"])\n\n # this is the actual differentiation where odes are computed \n dt_th1 = diff_precursor(th1, th0, d[\"alpha1\"], beta1, beta1_p, p1, rate_death, d)\n dt_th2 = diff_precursor(th2, th0, d[\"alpha2\"], beta2, beta2_p, p2, rate_death, d)\n dt_th0 = -(beta1*p1+beta2)*th0 \n dt_state = np.concatenate(([dt_th0], dt_th1, dt_th2))\n\n return dt_state", "def cell_to_blockB(self, cell):\r\n self.blockB += 1\r\n self.blockA -= 1\r\n if cell.locked is True:\r\n self.blockB_locked += 1\r\n self.blockA_locked -= 1\r\n else:\r\n self.blockB_free += 1\r\n self.blockA_free -= 1\r\n self.blockA_cells.remove(cell)\r\n self.blockB_cells.append(cell)\r\n self.__update_cut_state()\r\n assert self.blockA >= 0\r\n assert self.blockA_free >= 0\r\n assert self.blockB >= 0\r\n assert self.blockB_free >= 0\r\n assert self.blockA_free + self.blockA_locked == self.blockA\r\n assert self.blockB_free + self.blockB_locked == self.blockB", "def convert_brelu(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n t_max = op.attr(\"t_max\")\n t_min = op.attr(\"t_min\")\n out = _op.tensor.clip(x, t_min, t_max)\n g.add_node(op.output(\"Out\")[0], out)", "def init_bbox_head(self, mask_roi_extractor, mask_head):\n pass", "def scalarmult_B(e: int) -> Point:\n # scalarmult(B, l) is the identity\n e = e % l\n P = ident\n for i in range(253):\n if e & 1:\n P = edwards_add(P, Bpow[i])\n e = e // 2\n assert e == 0, e\n return P", "def BB ( self ) :\n return self.__bb", "def BB ( self ) :\n return self.__bb", "def prior_sample(self, bn):\n x = np.zeros(3)\n\n # first joint prob\n random_choice = np.random.choice(bn[0], 1, bn[0].all(), bn[0])\n x[0] = random_choice[0]\n\n # Second Joint Prob\n if x[0] == 0.1:\n random_choice = np.random.choice(bn[1][0], 1, bn[1][0].all(), bn[1][0])\n x[1] = random_choice\n elif x[0] == 0.9:\n random_choice = np.random.choice(bn[1][1], 1, bn[1][1].all(), bn[1][1])\n x[1] = random_choice\n\n # Third Joint Prob\n if random_choice[0] == 0.8 or random_choice == 0.1:\n random_choice = np.random.choice(bn[2][0], 1, bn[2][0].all(), bn[2][0])\n x[2] = random_choice\n else:\n random_choice = np.random.choice(bn[2][1], 1, bn[2][1].all(), bn[2][1])\n x[2] = random_choice\n return x", "def process_pain(x, lb, ub):\n x = x.abs()\n x.loc[(x > ub)] = 8\n x.loc[(x < lb) | (x > ub)] = np.nan\n return x", "def __init__(\n self,\n num_units,\n memory,\n memory_sequence_length = None,\n normalize = False,\n probability_fn = None,\n score_mask_value = float('-inf'),\n name = 'PointerGeneratorBahdanauAttention',\n coverage = False,\n ):\n super(PointerGeneratorBahdanauAttention, self).__init__(\n num_units = num_units,\n memory = memory,\n memory_sequence_length = memory_sequence_length,\n normalize = normalize,\n probability_fn = probability_fn,\n score_mask_value = score_mask_value,\n name = name,\n )\n self.coverage = coverage", "def cbam_block(cbam_feature, ratio=8):\n\n\tcbam_feature = channel_attention(cbam_feature, ratio)\n\tcbam_feature = spatial_attention(cbam_feature)\n\treturn cbam_feature", "def Bagging(df_train, df_test, B, threshold):\n\n # number of individual forecasts and number of periods\n K = df_test.shape[1]\n T = df_train.shape[0]\n\n # matrix for saving predictions from bootstrapped models\n pred_mat = np.full((df_test.shape[0], B), np.nan, dtype=float)\n\n # length of the boostrap block\n m = int(T**(1/3))\n p = np.int(T/m)\n\n # pairwise bootstrap\n # for i in range(B):\n #\n # # create the bootstrap sample from randomly drawn blocks\n # boot_indices = np.random.randint(T, size=T)\n # boot_X = df_train.iloc[boot_indices, 1:]\n # boot_y = df_train.iloc[boot_indices, 0]\n #\n # # pre-test\n # # estimate OLS on the block with all individual forecasts\n # boot_X_t = np.transpose(boot_X)\n # XX = np.linalg.inv(np.dot(boot_X_t, boot_X))\n # beta_hat = np.linalg.multi_dot([XX, boot_X_t, boot_y])\n #\n # # residuals\n # epsilon = boot_y - np.dot(boot_X, beta_hat)\n # epsilon_sq = epsilon**2\n #\n # # variances (heteroskedasticity robust)\n # Sigma = np.diag(epsilon_sq)\n # beta_var = np.diag(\n # np.linalg.multi_dot([XX, boot_X_t, Sigma, boot_X, XX])\n # )\n\n # moving block bootstrap (in the first dimension of the array)\n # divide the sample into blocks of length m\n blocks = np.asarray(np.split(df_train.values[:p*m, :], p, axis=0))\n\n # create bootstrap samples by sampling blocks\n boot_ind = np.random.randint(p, size=(B, p))\n boot_sample = np.reshape(blocks[boot_ind, :, :], (B, p*m, K+1))\n boot_X = boot_sample[:, :, 1:]\n boot_y = boot_sample[:, :, 0][:, :, np.newaxis]\n\n # pre-test\n # estimate OLS on the block filled sample with all individual forecasts\n boot_X_t = np.transpose(boot_X, axes=(0, 2, 1))\n beta_hat = np.matmul(\n np.linalg.inv(np.matmul(boot_X_t, boot_X)),\n np.matmul(boot_X_t, boot_y)\n )\n\n # residuals\n epsilon = boot_y - np.matmul(boot_X, beta_hat)\n\n # compute the absolute t-statistics\n # compute S\n S_sum = np.full((B, K, K), 0, dtype=float)\n for e in range(p):\n for f in range(m):\n for g in range(m):\n\n F_f = boot_X[:, e*m + f, :][:, :, np.newaxis]\n F_g = boot_X[:, e*m + g, :][:, :, np.newaxis]\n eps_f = epsilon[:, e*m + f][:, np.newaxis]\n eps_g = epsilon[:, e*m + g][:, np.newaxis]\n S_sum += np.matmul(\n np.multiply(F_f, eps_f),\n np.transpose(np.multiply(F_g, eps_g), axes=(0, 2, 1))\n )\n S = S_sum / (p*m)\n # compute H\n H_sum = np.full((B, K, K), 0, dtype=float)\n for e in range(p):\n for f in range(m):\n\n F_f = boot_X[:, e*m + f, :][:, :, np.newaxis]\n H_sum += np.matmul(F_f, np.transpose(F_f, axes=(0, 2, 1)))\n\n H = H_sum / (p*m)\n H_inv = np.linalg.inv(H)\n\n # variances\n beta_var = np.diagonal(\n 1/np.sqrt(T) * np.matmul(np.matmul(H_inv, S), H_inv),\n axis1=1, axis2=2\n )[:, :, np.newaxis]\n\n # near singularity may cause negative variance issues\n beta_var = np.abs(beta_var)\n # t-statistics\n t_stats_abs = np.abs(np.divide(beta_hat, np.sqrt(beta_var)))\n\n for i in range(B):\n\n sel_ind = np.squeeze(t_stats_abs[i] > threshold)\n # continue if there is atleas one predictor\n if np.sum(sel_ind) > 0:\n\n boot_X_sel_t = boot_X[i, :, sel_ind]\n boot_X_sel = np.transpose(boot_X_sel_t)\n boot_y_sel = boot_y[i]\n\n # estimate OLS on the block with the selected forecasts\n gamma_hat = np.linalg.multi_dot(\n [np.linalg.inv(np.dot(boot_X_sel_t, boot_X_sel)),\n boot_X_sel_t, boot_y_sel])\n\n # forecast out-of-sample\n pred_mat[:, i] = np.dot(\n df_test.iloc[:, sel_ind].values,\n gamma_hat\n ).flatten()\n else:\n # if no variable passes the pre-test, the prediction is 0\n pred_mat[:, i] = 0\n\n # aggregation of forecasts\n pred = np.nanmean(pred_mat, axis=1)\n\n df_pred = pd.DataFrame(\n {\"Bagging\": pred},\n index=df_test.index\n )\n\n return df_pred", "def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):\n assign_on_cpu = True if (self.gpu_assign_thr > 0) and (\n gt_bboxes.shape[0] > self.gpu_assign_thr) else False\n # compute overlap and assign gt on CPU when number of GT is large\n if assign_on_cpu:\n device = bboxes.device\n bboxes = bboxes.cpu()\n gt_bboxes = gt_bboxes.cpu()\n if gt_bboxes_ignore is not None:\n gt_bboxes_ignore = gt_bboxes_ignore.cpu()\n if gt_labels is not None:\n gt_labels = gt_labels.cpu()\n\n '''\n overlaps = self.iou_calculator(gt_bboxes, bboxes, mode=self.assign_metric)\n bboxes2 = self.anchor_rescale(bboxes, self.ratio)\n overlaps2 = self.iou_calculator(gt_bboxes, bboxes2, mode=self.assign_metric)\n '''\n\n overlaps = self.iou_calculator(gt_bboxes, bboxes, mode=self.assign_metric)\n bboxes2 = self.anchor_rescale(bboxes, self.ratio)\n overlaps2 = self.iou_calculator(gt_bboxes, bboxes2, mode=self.assign_metric)\n\n\n\n if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None\n and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):\n if self.ignore_wrt_candidates:\n ignore_overlaps = self.iou_calculator(\n bboxes, gt_bboxes_ignore, mode='iof')\n ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)\n else:\n ignore_overlaps = self.iou_calculator(\n gt_bboxes_ignore, bboxes, mode='iof')\n ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)\n overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1\n\n k1 = self.topk[0]\n k2 = self.topk[1] \n #k3 = self.topk[2]\n\n ## two_step anchor assigning\n assigned_gt_inds = self.assign_wrt_ranking(overlaps, k1, gt_labels)\n #assign_result = self.reassign_wrt_ranking_v5(assigned_gt_inds, overlaps2, k2, gt_labels)\n assign_result = self.reassign_wrt_ranking(assigned_gt_inds, overlaps2, k2, gt_labels)\n\n ## filter out low quality candidates\n if self.inside==True:\n num_anchors = bboxes.size(0)\n num_gts = gt_bboxes.size(0)\n\n anchor_cx = (bboxes[...,0]+bboxes[...,2])/2\n anchor_cy = (bboxes[...,1]+bboxes[...,3])/2\n ext_gt_bboxes = gt_bboxes[:,None,:].expand(num_gts, num_anchors, 4)\n left = anchor_cx - ext_gt_bboxes[...,0]\n right = ext_gt_bboxes[..., 2] - anchor_cx\n top = anchor_cy - ext_gt_bboxes[..., 1]\n bottom = ext_gt_bboxes[..., 3] - anchor_cy\n\n bbox_targets = torch.stack((left, top, right, bottom), -1)\n inside_flag = bbox_targets.min(-1)[0] > 0\n length = range(assign_result.gt_inds.size(0))\n inside_mask = inside_flag[(assign_result.gt_inds-1).clamp(min=0), length]\n assign_result.gt_inds *= inside_mask\n\n '''\n center_distance = self.iou_calculator(gt_bboxes, bboxes, mode='center_distance2')\n # calculate the long side of gt_box\n\n gt_width = gt_bboxes[..., 2] - gt_bboxes[..., 0]\n gt_height = gt_bboxes[..., 3] - gt_bboxes[..., 1]\n radius = 1.5\n gt_circle = (gt_width*0.5*radius) ** 2 + (gt_height*0.5*radius) ** 2\n inside_flag = center_distance <= gt_circle[...,None]\n length = range(assign_result.gt_inds.size(0))\n inside_mask = inside_flag[(assign_result.gt_inds-1).clamp(min=0), length]\n assign_result.gt_inds *= inside_mask\n \n \n device1 = gt_bboxes.device\n eps = 1e-6\n cxy_g = (gt_bboxes[..., None, :, :2] + gt_bboxes[..., None, :, 2:]) / 2\n cxy_a = (bboxes[..., :, None, :2] + bboxes[..., :, None, 2:]) / 2\n\n cxy_g = cxy_g.unsqueeze(-1)\n cxy_a = cxy_a.unsqueeze(-1)\n\n wg = gt_bboxes[..., :, None, 2] - gt_bboxes[..., :, None, 0] + eps\n hg = gt_bboxes[..., :, None, 3] - gt_bboxes[..., :, None, 1] + eps\n\n inv_sigma = torch.stack((4/(wg**2), torch.zeros_like(wg),\n torch.zeros_like(hg), 4/(hg**2))).reshape(-1,2,2)\n gaussian = torch.exp(-0.5*(cxy_a-cxy_g).permute(0, 1, 3, 2).matmul(inv_sigma).matmul(cxy_a-cxy_g)).squeeze(-1).squeeze(-1)\n inside_flag = gaussian >= torch.exp(torch.tensor([-2.0])).to(device1)\n length = range(assign_result.gt_inds.size(0))\n inside_mask = inside_flag[length, (assign_result.gt_inds-1).clamp(min=0)]\n assign_result.gt_inds *= inside_mask\n '''\n \n \n if assign_on_cpu:\n assign_result.gt_inds = assign_result.gt_inds.to(device)\n assign_result.max_overlaps = assign_result.max_overlaps.to(device)\n if assign_result.labels is not None:\n assign_result.labels = assign_result.labels.to(device)\n return assign_result", "def _generate_prior(self, text_lengths, feats_lengths,\n w=1) -> paddle.Tensor:\n B = len(text_lengths)\n T_text = text_lengths.max()\n T_feats = feats_lengths.max()\n\n bb_prior = paddle.full((B, T_feats, T_text), fill_value=-np.inf)\n for bidx in range(B):\n T = feats_lengths[bidx].item()\n N = text_lengths[bidx].item()\n\n key = str(T) + ',' + str(N)\n if self.cache_prior and key in self._cache:\n prob = self._cache[key]\n else:\n alpha = w * np.arange(1, T + 1, dtype=float) # (T,)\n beta = w * np.array([T - t + 1 for t in alpha])\n k = np.arange(N)\n batched_k = k[..., None] # (N,1)\n prob = betabinom.pmf(batched_k, N, alpha, beta) # (N,T)\n\n # store cache\n if self.cache_prior and key not in self._cache:\n self._cache[key] = prob\n\n prob = paddle.to_tensor(\n prob, place=text_lengths.place, dtype=\"float32\").transpose(\n (1, 0)) # -> (T,N)\n bb_prior[bidx, :T, :N] = prob\n\n return bb_prior", "def bow_tie(metadata: bool = False) -> Union[sparse.csr_matrix, Bunch]:\n row = np.array([0, 0, 0, 0, 1, 3])\n col = np.array([1, 2, 3, 4, 2, 4])\n adjacency = sparse.csr_matrix((np.ones(len(row), dtype=int), (row, col)), shape=(5, 5))\n adjacency = (adjacency + adjacency.T).astype(bool)\n\n if metadata:\n x = np.array([0, -1, 1, -1, 1])\n y = np.array([0, 1, 1, -1, -1])\n graph = Bunch()\n graph.adjacency = adjacency\n graph.position = np.vstack((x, y)).T\n graph.name = 'bow_tie'\n return graph\n else:\n return adjacency", "def DefinePPBinConstraint( self, placement, officeData, persoData ) :\n\n officeFilter = pd.pivot_table(officeData.loc[:,self.roomTag], columns=self.roomTag, index=officeData.index, aggfunc=len).fillna(0)\n officeFilter = np.dot(placement, officeFilter)\n \n \n self.wish = np.array([np.dot( persoData[self.weightLabel].values.T, officeFilter )])\n \n if self.removeSelf : \n Pp = np.diag( persoData.loc[:, self.inLabel]*persoData.loc[:, self.weightLabel])\n self.wish -= np.dot( Pp, officeFilter).sum(0)\n \n self.dispo = persoData[self.inLabel]\n self.dispo = np.array([np.dot( self.dispo.T, officeFilter )])", "def cbam_block(cbam_feature, ratio=8):\n\n cbam_feature = channel_attention(cbam_feature, ratio)\n cbam_feature = spatial_attention(cbam_feature)\n return cbam_feature", "def get_noisy_init_from_bb(reference_shape, bb, noise_percentage=.02):\n bb = PointCloud(bb)\n reference_shape = PointCloud(reference_shape)\n\n bb = noisy_shape_from_bounding_box(\n reference_shape,\n bb,\n noise_percentage=[noise_percentage, 0, noise_percentage]).bounding_box(\n )\n\n return align_shape_with_bounding_box(reference_shape, bb).points" ]
[ "0.5430549", "0.54229766", "0.53464353", "0.52291906", "0.5195737", "0.5010717", "0.49687093", "0.48980033", "0.48972934", "0.48607045", "0.4837311", "0.48047954", "0.47999904", "0.47738087", "0.4773558", "0.47519714", "0.47448894", "0.47322154", "0.47322154", "0.47309393", "0.47130743", "0.47025207", "0.46901023", "0.46896374", "0.46796352", "0.46629345", "0.46550307", "0.46351635", "0.4621441", "0.4618775" ]
0.61007017
0
Prepares an unspecific memory B cell with a sequence that binds to the epitope with above threshold affinity and returns a B cell object with a random number of mutations from its past (040).
def make_memory(RNs, seq_list, AgEpitope, tnow): ab = random.choice(seq_list) Emax = E_best(ab, AgEpitope) mutcount = np.round(RNs.getR() * 40) newcell = Bcell(sequence=ab, sequence0=ab, affinity=Emax, affinity0=Emax, origin='umem', mutations=mutcount, family=None, birthtime=tnow, GCentrytime=None, AIDstart=None, block=False) return newcell
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_naive(RNs, seq_list, AgEpitope, tnow):\n # pick a random sequence from the pregenerated pool\n ab = random.choice(seq_list)\n Emax = E_best(ab, AgEpitope)\n if tnow == 0: # in initialisation, distribute ages evenly over\n # lifespan\n birthtime = -np.round(RNs.getR() * cf.tlifeN)\n else:\n birthtime = tnow\n newcell = Bcell(sequence=ab, sequence0=ab, affinity=Emax, affinity0=Emax,\n origin='naive', mutations=0,\n family=None, birthtime=birthtime,\n GCentrytime=None,\n AIDstart=None, block=False)\n return newcell", "def get_B100():\n m = 100\n random.seed(1111*m)\n A = random.randn(m, m) + 1j*random.randn(m, m)\n A[np.tril_indices(m, -2)] = 0\n return A", "def constructCell():\n\t\tself.weightGenerate()", "def __init__(\n self,\n num_units,\n memory,\n memory_sequence_length = None,\n normalize = False,\n probability_fn = None,\n score_mask_value = float('-inf'),\n name = 'PointerGeneratorBahdanauAttention',\n coverage = False,\n ):\n super(PointerGeneratorBahdanauAttention, self).__init__(\n num_units = num_units,\n memory = memory,\n memory_sequence_length = memory_sequence_length,\n normalize = normalize,\n probability_fn = probability_fn,\n score_mask_value = score_mask_value,\n name = name,\n )\n self.coverage = coverage", "def rand(n, W, b, proportion = 0.5):\n\n\treturn ta.ThresholdAutomaton(n, W, b, random_grid = True, proportion = proportion)", "def distribute_waterbag(self):\n # Generate particles by creating trials and finding particles with potential less than emittance, then assign the rest to momentum\n ptclsMade = 0\n phaseSpaceList = []\n while ptclsMade < self.npart:\n ranU = 0.0\n while ranU <= 0:\n ranU = random.random()\n\n # Generate some bounds on the transverse size to reduce waste in generating the bunch\n # Use the lemming method to find the maximum y\n trialH = np.sqrt(ranU)\n newH = self.emit*trialH\n y0 = np.sqrt(newH)\n #self.emittance = newH\n yMax = newton(self.whatsleft, y0)\n\n #bounding the horizontal coordinate is difficult, but it should not exceed the pole\n xMax = self.c\n #xMax = yMax\n\n trialValue = 1e10\n while trialValue >= newH:\n xTrial = 2.*(0.5 - random.random())*xMax\n yTrial = 2.*(0.5 - random.random())*yMax\n trialValue = self.compute_potential(xTrial, yTrial)\n\n initialValue = trialValue\n if initialValue < newH:\n pMag = np.sqrt(2*(newH - initialValue))\n pDir = 2*np.pi* random.random()\n pxHat = pMag * np.cos(pDir)\n pyHat = pMag * np.sin(pDir)\n xReal = xTrial * np.sqrt(self.betax)\n yReal = yTrial * np.sqrt(self.betay)\n pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)\n pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)\n ptclCoords = np.array([xReal, pxReal, yReal, pyReal])\n phaseSpaceList.append(ptclCoords)\n ptclsMade += 1\n\n #Add 3 more particles if creating a quiet start\n if self.quiet:\n self.exact_centroids(ptclCoords, phaseSpaceList)\n ptclsMade += 3\n else:\n print(\"Initial value generated exceeds limiting H. Sampling new value.\")\n\n self.particles[:,:4] = np.asarray(phaseSpaceList)", "def warmUpExercise():\n\n return np.identity(5)", "def assign_rand_cell(self, init=False):\n if len(self.emptiesSet):\n cell = rnd.sample(self.emptiesSet, 1)[0]\n if init:\n self.set_cell(cell, 2)\n else:\n cdf = rnd.random()\n if cdf > 0.75:\n self.set_cell(cell, 4)\n self.new_val = 4\n else:\n self.set_cell(cell, 2)\n self.new_val = 2\n self.new_cell = cell\n self.emptiesSet.remove(cell)", "def generate_random(self, prob_alive=0.3):\n self.generation = 0\n for i in range(self.lines):\n for j in range(self.cols):\n if random.random() < prob_alive:\n self[i][j] = self.cell_state['alive']", "def create_fixed_generator(anchor_boxes, valid_indices,\n lower_threshold, upper_threshold,\n ratio=1., metric='iou', minibatch_size=256, seed=42):\n assert minibatch_size <= len(valid_indices), 'Minibatch length must be greater than valid regions number'\n assert metric in _metrics.keys(), 'Only available metrics are \\'iou\\', \\'positive_overlap\\' and \\'overlap\\''\n valid_ab = anchor_boxes[valid_indices]\n compute_metric = _metrics[metric](valid_ab)\n neg_samples = floor(minibatch_size / (1 + ratio))\n pos_samples = ceil(neg_samples * ratio)\n targets_shape = (len(anchor_boxes), 5)\n random_generator = np.random.default_rng(seed=seed)\n\n def targets_generator(gt_boxes):\n metrics, gt_boxes = compute_metric(gt_boxes)\n neg_ind = np.flatnonzero(metrics < lower_threshold)\n pos_ind = np.flatnonzero(metrics > upper_threshold)\n\n if len(neg_ind) > neg_samples:\n neg_ind = random_generator.choice(neg_ind, neg_samples, replace=False)\n elif len(neg_ind) < neg_samples:\n neg_ind = np.argpartition(metrics, neg_samples)[:neg_samples]\n if len(pos_ind) > pos_samples:\n pos_ind = random_generator.choice(pos_ind, pos_samples, replace=False)\n elif len(pos_ind) < pos_samples:\n pos_ind = np.argpartition(metrics, len(metrics) - pos_samples)[-pos_samples:]\n labels = np.full_like(metrics, -1, dtype='int')\n labels[pos_ind] = 1\n labels[neg_ind] = 0\n\n deltas = np.full_like(gt_boxes, 0, dtype='float')\n deltas[pos_ind] = compute_deltas(valid_ab[pos_ind], gt_boxes[pos_ind])\n\n targets = np.zeros(targets_shape, dtype='float')\n targets[:, 0] = -1\n targets[valid_indices] = np.hstack([labels[:, np.newaxis], deltas])\n # Since there is no way to give a loss function two tensors,\n # we have to make one, containing all required labels\n return targets\n return targets_generator", "def create_minibatch(self):\r\n if self.experience_batch.shape[0] <= self.minibatch_size:\r\n self.minibatch = self.experience_batch\r\n\r\n else:\r\n ind = np.random.randint(self.experience_batch.shape[0], size=self.minibatch_size) # same sample can be in the minibatch multiple times --> problem for algorithm ?\r\n self.minibatch = self.experience_batch[ind]", "def jump(self):\n global jumpSize\n print \"jumping...\"\n # create a range that includes all the available feature indices\n featureIndices = range(0, len(self.features))\n # remove indices until there are only jumpSize left\n while len(featureIndices) > jumpSize:\n # choose a random index\n index = random.randint(0, len(featureIndices)-1)\n # remove that item from the list of indices\n del featureIndices[index]\n for featureIndex in featureIndices:\n # get a pointer to that feature\n feature = self.features[featureIndex]\n # pick a random number based on the size of the feature's domain\n domainIncrement = random.randint(0, len(feature.domain) - 1)\n # get the index within the domain of the current feature value\n domainIndex = feature.domain.index(feature.value)\n # go to a different value in the domain\n newDomainIndex = (domainIndex + domainIncrement) % len(feature.domain)\n # assign the value from the domain\n feature.value = feature.domain[newDomainIndex]", "def propose(x, jump = 0.1):\n\treturn (x[0] + random.gauss(0, jump), x[1] + random.gauss(0, jump))", "def polynomial_bounded(ind, cmap, eta: float, low: float, up: float, mut_pb: float):\n mut_heights = copy(ind.heights)\n for i in range(len(ind.heights)):\n if random() < mut_pb:\n x = ind.heights[i].astype(float)\n if(x<low):\n x=low\n if(x>up):\n x=up\n delta_1 = (x - low) / (up - low)\n delta_2 = (up - x) / (up - low)\n rand = random()\n mut_pow = 1. / (eta + 1.)\n\n if rand < 0.5:\n xy = 1. - delta_1\n val = 2. * rand + (1. - 2. * rand) * xy**(eta + 1.)\n delta_q = val**mut_pow - 1.\n else:\n xy = 1. - delta_2\n val = 2. * (1. - rand) + 2. * (rand - 0.5) * xy**(eta + 1.)\n delta_q = 1. - val**mut_pow\n\n x += delta_q * (up - low)\n x = min(max(x, low), up)\n if(math.isnan(x)):\n x = randrange(low, up)\n mut_heights[i] = x\n\n mut_colors = np.array([height_to_color(cmap, height) for height in mut_heights]).astype(int)\n offspring = OffspringGrid(ind.polygons, mut_colors, mut_heights, ind.grid_ids, ind.status, ind.building_ids,\n ind.added, ind.dropped)\n return offspring", "def __init__(\n self,\n node_size_x,\n node_size_y,\n bin_center_x,\n bin_center_y,\n target_density,\n xl,\n yl,\n xh,\n yh,\n bin_size_x,\n bin_size_y,\n num_movable_nodes,\n num_terminals,\n num_filler_nodes,\n padding,\n deterministic_flag, # control whether to use deterministic routine\n sorted_node_map,\n movable_macro_mask=None,\n fast_mode=False,\n region_id=None,\n fence_regions=None, # [n_subregion, 4] as dummy macros added to initial density. (xl,yl,xh,yh) rectangles\n node2fence_region_map=None,\n placedb=None\n ):\n\n if(region_id is not None):\n ### reconstruct data structure\n num_nodes = placedb.num_nodes\n if(region_id < len(placedb.regions)):\n self.fence_region_mask = node2fence_region_map[:num_movable_nodes] == region_id\n else:\n self.fence_region_mask = node2fence_region_map[:num_movable_nodes] >= len(placedb.regions)\n\n node_size_x = torch.cat([node_size_x[:num_movable_nodes][self.fence_region_mask],\n node_size_x[num_movable_nodes:num_nodes-num_filler_nodes],\n node_size_x[num_nodes-num_filler_nodes+placedb.filler_start_map[region_id]:num_nodes-num_filler_nodes+placedb.filler_start_map[region_id+1]]], 0)\n node_size_y = torch.cat([node_size_y[:num_movable_nodes][self.fence_region_mask],\n node_size_y[num_movable_nodes:num_nodes-num_filler_nodes],\n node_size_y[num_nodes-num_filler_nodes+placedb.filler_start_map[region_id]:num_nodes-num_filler_nodes+placedb.filler_start_map[region_id+1]]], 0)\n\n num_movable_nodes = (self.fence_region_mask).long().sum().item()\n num_filler_nodes = placedb.filler_start_map[region_id+1]-placedb.filler_start_map[region_id]\n if(movable_macro_mask is not None):\n movable_macro_mask = movable_macro_mask[self.fence_region_mask]\n ## sorted cell is recomputed\n sorted_node_map = torch.sort(node_size_x[:num_movable_nodes])[1].to(torch.int32)\n ## make pos mask for fast forward\n self.pos_mask = torch.zeros(2, placedb.num_nodes, dtype=torch.bool, device=node_size_x.device)\n self.pos_mask[0,:placedb.num_movable_nodes].masked_fill_(self.fence_region_mask, 1)\n self.pos_mask[1,:placedb.num_movable_nodes].masked_fill_(self.fence_region_mask, 1)\n self.pos_mask[:,placedb.num_movable_nodes:placedb.num_nodes-placedb.num_filler_nodes] = 1\n self.pos_mask[:,placedb.num_nodes-placedb.num_filler_nodes+placedb.filler_start_map[region_id]:placedb.num_nodes-placedb.num_filler_nodes+placedb.filler_start_map[region_id+1]] = 1\n self.pos_mask = self.pos_mask.view(-1)\n\n super(ElectricPotential,\n self).__init__(node_size_x=node_size_x,\n node_size_y=node_size_y,\n bin_center_x=bin_center_x,\n bin_center_y=bin_center_y,\n target_density=target_density,\n xl=xl,\n yl=yl,\n xh=xh,\n yh=yh,\n bin_size_x=bin_size_x,\n bin_size_y=bin_size_y,\n num_movable_nodes=num_movable_nodes,\n num_terminals=num_terminals,\n num_filler_nodes=num_filler_nodes,\n padding=padding,\n deterministic_flag=deterministic_flag,\n sorted_node_map=sorted_node_map,\n movable_macro_mask=movable_macro_mask)\n self.fast_mode = fast_mode\n self.fence_regions = fence_regions\n self.node2fence_region_map = node2fence_region_map\n self.placedb = placedb\n self.target_density = target_density\n self.region_id = region_id\n ## set by build_density_op func\n self.filler_start_map = None\n self.filler_beg = None\n self.filler_end = None", "def _populate_exp(self):\n old_s = self._current_ob\n comb_mask = self._comb_mask\n if not self._comb_mask and self._fine_mask is not None:\n fine_mask = self._fine_mask if self._fine_mask.shape[0] == max(self.num_actions[0], self.num_actions[1]) \\\n else np.pad(self._fine_mask, (0, max(self.num_actions[0], self.num_actions[1]) - self._fine_mask.shape[0]), 'constant', constant_values=(0, 0))\n else:\n fine_mask = np.ones([max(self.num_actions[0], self.num_actions[1])], dtype=np.bool)\n last_cards_value = self.player.get_last_outcards()\n if self.rng.rand() <= self.exploration:\n if not self._comb_mask and self._fine_mask is not None:\n q_values = np.random.rand(self.num_actions[1])\n q_values[np.where(np.logical_not(self._fine_mask))[0]] = np.nan\n act = np.nanargmax(q_values)\n # print(q_values)\n # print(act)\n else:\n act = self.rng.choice(range(self.num_actions[0 if comb_mask else 1]))\n else:\n q_values = self.predictor(old_s[None, :, :, :], np.array([comb_mask]), np.array([fine_mask]))[0][0]\n if not self._comb_mask and self._fine_mask is not None:\n q_values = q_values[:self.num_actions[1]]\n assert np.all(q_values[np.where(np.logical_not(self._fine_mask))[0]] < -100)\n q_values[np.where(np.logical_not(self._fine_mask))[0]] = np.nan\n act = np.nanargmax(q_values)\n assert act < self.num_actions[0 if comb_mask else 1]\n # print(q_values)\n # print(act)\n # clamp action to valid range\n act = min(act, self.num_actions[0 if comb_mask else 1] - 1)\n if comb_mask:\n reward = 0\n isOver = False\n else:\n if last_cards_value.size > 0:\n if act > 0:\n if not CardGroup.to_cardgroup(self._action_space[act]).bigger_than(CardGroup.to_cardgroup(to_char(last_cards_value))):\n print('warning, some error happened')\n # print(to_char(self.player.get_curr_handcards()))\n reward, isOver, _ = self.player.step_manual(to_value(self._action_space[act]))\n\n # print(self._action_space[act])\n\n # step for AI\n while not isOver and self.player.get_role_ID() != ROLE_ID_TO_TRAIN:\n _, reward, _ = self.player.step_auto()\n isOver = (reward != 0)\n # if landlord negate the reward\n if ROLE_ID_TO_TRAIN == 2:\n reward = -reward\n self._current_game_score.feed(reward)\n\n if isOver:\n # print('lord wins' if reward > 0 else 'farmer wins')\n self._player_scores.feed(self._current_game_score.sum)\n # print(self._current_game_score.sum)\n while True:\n self.player.reset()\n # init_cards = np.arange(36)\n # self.player.prepare_manual(init_cards)\n self.player.prepare()\n self._comb_mask = True\n early_stop = False\n while self.player.get_role_ID() != ROLE_ID_TO_TRAIN:\n _, reward, _ = self.player.step_auto()\n isOver = (reward != 0)\n if isOver:\n print('prestart ends too early! now resetting env')\n early_stop = True\n break\n if early_stop:\n continue\n self._current_ob, self._action_space = self.get_state_and_action_spaces()\n break\n self._current_game_score.reset()\n else:\n self._comb_mask = not self._comb_mask\n self._current_ob, self._action_space = self.get_state_and_action_spaces(act if not self._comb_mask else None)\n self.mem.append(Experience(old_s, act, reward, isOver, comb_mask, fine_mask))", "def _allocate(self, n_resource, beliefs):\n # With probability epsilon allocate with uniform probability.\n # With probability 1-epsilon, allocate according to belief.\n if self.rng.binomial(1, self.params.epsilon):\n self.last_allocation = self.sample_from(self.action_space)\n else:\n optimal_allocation = None\n max_expected_yield = 0\n\n # Construct entire fi table, and corresponding min and max fi tables.\n # The fi table is a table of the expected probability that a incident\n # in a bin is discovered by an attention unit, for each bin and each\n # possible allocation amount for that bin.\n fi_table = self._construct_approx_fi_table(self._n_bins, beliefs,\n self._n_resource + 1)\n min_fi_table = np.maximum(fi_table - self.params.alpha, 0)\n max_fi_table = min_fi_table + self.params.alpha\n\n # For every bin.\n for bin_i in range(self._n_bins):\n current_allocation = np.zeros(\n self._n_bins, dtype=self.action_space.dtype)\n alloc_upperbound = np.zeros(self._n_bins, dtype=self.action_space.dtype)\n\n # Get all upper and lower bounds with bin_i as starting bin.\n rows = np.array([i for i in range(self._n_bins) if i != bin_i])\n broadcast_shape = (self._n_resource + 1, len(rows),\n self._n_resource + 1)\n lower_bounds = np.argmax(\n (np.broadcast_to(fi_table[rows, :], broadcast_shape).T >=\n min_fi_table[bin_i]).T,\n axis=2)\n upper_bounds = np.argmin(\n (np.broadcast_to(fi_table[rows, :], broadcast_shape).T <=\n max_fi_table[bin_i]).T,\n axis=2) - 1\n upper_bounds[upper_bounds == -1] = self._n_resource\n\n # For every possible allocation to that bin.\n for alloc_to_i in range(self._n_resource + 1):\n current_allocation = np.zeros(\n self._n_bins, dtype=self.action_space.dtype)\n current_allocation[bin_i] = alloc_to_i\n alloc_upperbound[rows] = upper_bounds[alloc_to_i]\n # Set current allocation values to lower bounds.\n current_allocation[rows] = lower_bounds[alloc_to_i]\n alloc_upperbound[bin_i] = alloc_to_i\n\n if np.sum(current_allocation) > self._n_resource or np.any(\n current_allocation > alloc_upperbound):\n # This allocation scheme requires more resource than available.\n # Move on to next possible allocation scheme.\n continue\n remaining_resource = self._n_resource - np.sum(current_allocation)\n\n # Now greedily allocate remaining resources to bins that have maximal\n # marginal probability of making another discovery.\n for _ in range(remaining_resource):\n marginal_probs = []\n for j in range(self._n_bins):\n if current_allocation[j] < alloc_upperbound[j]:\n marginal_probs.append(\n ((self._calculate_tail_probability(\n current_allocation[j] + 1, beliefs[j]) -\n self._calculate_tail_probability(current_allocation[j],\n beliefs[j])), j))\n if not marginal_probs:\n # Allocation cannot make full use of resources and satisfy\n # fairness constraint go to next allocation.\n break\n next_bin = max(marginal_probs, key=lambda i: i[0])[1]\n current_allocation[next_bin] += 1\n if np.sum(current_allocation) < self._n_resource or np.any(\n current_allocation > alloc_upperbound):\n # This allocation scheme requires more resource than available\n # or doesn't make full use of resources.\n # Move on to next possible allocation scheme.\n continue\n\n # If current_allocation has the highest expected yield, store it as\n # the optimal allocation.\n # pylint: disable=g-complex-comprehension\n expected_yield = np.sum([\n np.sum([\n self._calculate_tail_probability(\n np.array(range(1, current_allocation[i] + 1)), beliefs[i])\n ]) for i in range(self._n_bins)\n ])\n # pylint: enable=g-complex-comprehension\n\n if expected_yield >= max_expected_yield:\n max_expected_yield = expected_yield\n optimal_allocation = current_allocation\n\n if optimal_allocation is None:\n print(\"No allocation found for this alpha: %f\" % self.params.alpha)\n logging.warning(\"No allocation found for this alpha: %f\",\n self.params.alpha)\n optimal_allocation = np.zeros(\n self._n_bins, dtype=self.action_space.dtype)\n raise gym.error.InvalidAction(\"Invalid action: %s with alpha %f\" %\n (optimal_allocation, self.params.alpha))\n\n self.last_allocation = optimal_allocation\n\n return self.last_allocation", "def bias_prior(self):", "def __init__(self,\r\n input_size: int,\r\n output_size: int,\r\n alpha: float = 0.1,\r\n beta: float = 0.1,\r\n alpha_int: float = 0.1,\r\n beta_int: float = 0.1,\r\n discount_factor: float = 0.997,\r\n discount_factor_int: float = 0.997,\r\n off_policy: bool = True,\r\n off_policy_int: bool = True,\r\n softmax_beta: float = 1.0,\r\n epsilon_noise: float = 0.0,\r\n priority_ext: float = 1.0,\r\n priority_int: float = 1.0,\r\n td_error_threshold: float = 0.01,\r\n priority_inc_factor: float = 1.2,\r\n priority_dec_factor: float = 0.9,\r\n use_reward_modulation: bool = False,\r\n min_reward_decay: float = 0.99,\r\n max_reward_decay: float = 0.99,\r\n sm_max_reward: float = 0.9,\r\n sm_min_reward: float = 0.9,\r\n sm_reward_inc: float = 0.9,\r\n sm_reward_dec: float = 0.99,\r\n intrinsic_decay: float = 0.999,\r\n seed: int = 0):\r\n self._input_size = input_size\r\n self._output_size = output_size\r\n\r\n self.stri_ext = Striatum(input_size, output_size, discount_factor, alpha, beta)\r\n self.stri_int = Striatum(input_size, output_size, discount_factor_int, alpha_int, beta_int)\r\n self.priority_ext_init = priority_ext\r\n self.priority_int_init = priority_int\r\n self.priority_ext = 1\r\n self.priority_int = 0\r\n self.priority_inc_factor = priority_inc_factor\r\n self.priority_dec_factor = priority_dec_factor\r\n self.td_error_threshold = td_error_threshold\r\n self.use_reward_modulation = use_reward_modulation\r\n\r\n self.stn = STN(input_size, input_size)\r\n self.gpi = GPi(output_size, output_size, seed)\r\n self.gpe = GPe(output_size, output_size)\r\n self.tha = Thalamus(output_size, output_size, seed)\r\n\r\n self.off_policy = off_policy\r\n self.off_policy_int = off_policy_int\r\n self.softmax_beta = softmax_beta\r\n self.epsilon_noise = epsilon_noise\r\n\r\n self.responses_values_ext = np.empty(0)\r\n self.responses_values_int = np.empty(0)\r\n\r\n self.mean_reward = 0\r\n self.max_reward = 0\r\n self.min_reward = 0\r\n self.sm_reward_inc = sm_reward_inc\r\n self.sm_reward_dec = sm_reward_dec\r\n self.max_reward_decay = max_reward_decay\r\n self.min_reward_decay = min_reward_decay\r\n self.sm_max_reward = sm_max_reward\r\n self.sm_min_reward = sm_min_reward\r\n self.intrinsic_off = 1\r\n self.intrinsic_decay = intrinsic_decay\r\n\r\n self.reward_modulation_signal = 1", "def _initialize_mapbias(self):\n self.mapbias = sharedX(\n numpy.zeros(self.nmap),\n name='mb',\n borrow=True\n )", "def init_block():\n final_locs = [[1 for x in range(LOC_SIZE)] for y in range(LOC_SIZE)]\n for a in range(int(LOC_SIZE / 2)):\n for b in range(a, int(LOC_SIZE / 2)):\n # creating and ringing each of the fleas individually\n print(a, b)\n locs = [[1 if x == a and y == b else 0 for x in range(LOC_SIZE)] for y in range(LOC_SIZE)]\n for i in range(50):\n locs = ring(locs)\n # finding complement of all probabilities to find probabilities of not having a flea there\n for r in range(LOC_SIZE):\n for s in range(LOC_SIZE):\n locs[r][s] = 1 - locs[r][s]\n # transposes and adds the set of probabilities to not have to recalculate for mirrored values\n if a != b:\n locs = operate_on_narray(locs, zip(*locs), lambda o, p: o*p)\n # multiplying the probabilities together\n final_locs = operate_on_narray(final_locs, locs, lambda o, p: o*p)\n return final_locs", "def setup_b_instance(self,norm,add_ps_mask=True):\n inst_tag = self.tag + '_'+str(self.flux_array_ebin)\n b = bsm.bayesian_scan_NPTF(tag=inst_tag,nside=self.nside,work_dir='/tmp/'+self.tag+'/',psf_dir=psf_dir,nlive=700)\n # Input the data, using the external data if provided\n if self.use_external_data:\n b.load_external_data(self.f1.CTB_en_bins,[self.external_data[self.flux_array_ebin]],self.f1.CTB_exposure_maps)\n else:\n b.load_external_data(self.f1.CTB_en_bins,self.f1.CTB_count_maps,self.f1.CTB_exposure_maps)\n\n if add_ps_mask:\n b.make_mask_total(band_mask_range = [-self.band_mask,self.band_mask],mask_ring = False,ps_mask_array = self.f1.ps_mask_array)\n else:\n b.make_mask_total(band_mask_range = [-self.band_mask,self.band_mask],mask_ring = False)\n\n b.add_new_template(self.f1.template_dict)\n b.rebin_external_data(1)\n\n b.add_poiss_model('ps_model','$A_{ps}$',[0.0,3.0],False)\n b.add_poiss_model('p7','$A_{p7}$',[0.0,2.0],False)\n b.add_poiss_model('bubs','$A_{bubs}$',[0.0,2.0],False)\n b.add_poiss_model('iso','$A_{iso}$',[0.0,3.0],False)\n # Add in a fixed J_map template\n b.add_fixed_templates({'J_map':[norm*self.J_map_arr[self.flux_array_ebin]/np.mean(self.J_map_arr[self.flux_array_ebin])]})\n\n b.initiate_poissonian_edep()\n return b", "def test_generator_upward(narrow_power_range):\n with patch('random.randint', side_effect=lambda a,b: 1):\n range_min, range_max = narrow_power_range\n for msg in it.islice(generate_msgs(range_min, range_max), 0, 5):\n pass\n power = Message.parse(msg).power\n assert power == range_max", "def test_am_threshold(Simulator, plt, seed, rng):\n d = 64\n vocab = Vocabulary(d, pointer_gen=rng)\n vocab.populate('A; B; C; D')\n\n d2 = int(d / 2)\n vocab2 = Vocabulary(d2, pointer_gen=rng)\n vocab2.populate('A; B; C; D')\n\n def input_func(t):\n return '0.49 * A' if t < 0.1 else '0.8 * B'\n\n with spa.Network('model', seed=seed) as m:\n m.am = ThresholdingAssocMem(\n threshold=0.5, input_vocab=vocab, output_vocab=vocab2,\n function=filtered_step_fn, mapping='by-key')\n m.stimulus = spa.Transcode(input_func, output_vocab=vocab)\n m.stimulus >> m.am\n\n in_p = nengo.Probe(m.am.input)\n out_p = nengo.Probe(m.am.output, synapse=0.03)\n\n with Simulator(m) as sim:\n sim.run(0.3)\n t = sim.trange()\n below_th = t < 0.1\n above_th = t > 0.25\n\n plt.subplot(2, 1, 1)\n plt.plot(t, similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.subplot(2, 1, 2)\n plt.plot(t, similarity(sim.data[out_p], vocab2))\n plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.9, c='g', lw=2)\n plt.ylabel(\"Output\")\n\n assert np.mean(sim.data[out_p][below_th]) < 0.01\n assert_sp_close(t, sim.data[out_p], vocab2['B'], skip=0.25, duration=0.05)", "def corrupt_example(self, e):\n import random\n import copy\n e = copy.copy(e)\n last = e[-1]\n cnt = 0\n while e[-1] == last:\n e[-1] = random.randint(0, self.parameters.vocab_size-1)\n pr = 1./self.parameters.vocab_size\n cnt += 1\n # Backoff to 0gram smoothing if we fail 10 times to get noise.\n if cnt > 10: e[-1] = random.randint(0, self.parameters.vocab_size-1)\n weight = 1./pr\n return e, weight", "def prepare_for_use(self):\n self._random_state = check_random_state(self.random_state)\n self._offset = self.offset\n self._x_idx = 0", "def prepare_for_use(self):\n self._random_state = check_random_state(self.random_state)\n self._offset = self.offset\n self._x_idx = 0", "def prepare_for_use(self):\n self._random_state = check_random_state(self.random_state)\n self._offset = self.offset\n self._x_idx = 0", "def __init__(\n self,\n capacity: int = 10000,\n *,\n batch_size_B: int = 16,\n batch_length_T: int = 64,\n ):\n self.capacity = capacity\n self.batch_size_B = batch_size_B\n self.batch_length_T = batch_length_T\n\n # The actual episode buffer. We are using a deque here for faster insertion\n # (left side) and eviction (right side) of data.\n self.episodes = deque()\n # Maps (unique) episode IDs to the index under which to find this episode\n # within our `self.episodes` deque.\n # Note that even after eviction started, the indices in here will NOT be\n # changed. We will therefore need to offset all indices in\n # `self.episode_id_to_index` by the number of episodes that have already been\n # evicted (self._num_episodes_evicted) in order to get the actual index to use\n # on `self.episodes`.\n self.episode_id_to_index = {}\n # The number of episodes that have already been evicted from the buffer\n # due to reaching capacity.\n self._num_episodes_evicted = 0\n\n # List storing all index tuples: (eps_idx, ts_in_eps_idx), where ...\n # `eps_idx - self._num_episodes_evicted' is the index into self.episodes.\n # `ts_in_eps_idx` is the timestep index within that episode\n # (0 = 1st timestep, etc..).\n # We sample uniformly from the set of these indices in a `sample()`\n # call.\n self._indices = []\n\n # The size of the buffer in timesteps.\n self._num_timesteps = 0\n # The number of timesteps added thus far.\n self._num_timesteps_added = 0\n\n # How many timesteps have been sampled from the buffer in total?\n self.sampled_timesteps = 0\n\n self.rng = np.random.default_rng(seed=None)", "def get_random_neighbor(self):\r\n r_mask = np.random.randint(0, 2, 7)\r\n r_exp = np.random.randint(0, 2, 7)\r\n changes = r_mask*self.attr_factors*np.power(-1.0, r_exp)\r\n Sp = self.S + changes\r\n basal_C = Sp[4]\r\n Sp = np.maximum(Sp, 0.0)\r\n Sp[4] = basal_C\r\n return Sp" ]
[ "0.5811449", "0.5382434", "0.5294318", "0.5251425", "0.5192706", "0.5065947", "0.49759793", "0.49485117", "0.49466747", "0.4929278", "0.491417", "0.4913077", "0.487761", "0.48603368", "0.48593628", "0.48590797", "0.4839911", "0.48218063", "0.47829044", "0.47814372", "0.47763816", "0.47629017", "0.47409523", "0.47212082", "0.47198817", "0.4715452", "0.4715452", "0.4715452", "0.471089", "0.47019744" ]
0.631812
0
If simulation is run for the purpose of comparing affinity maturation with different numbers of hotspots in equally seeded GCs, the following function checks out what low quality binders in the range just above 0.6 can realistically be expected to appear (relevant for low HS case, where larger binding energy intervals may not be populated).
def get_low_binder(RNs, AgEpitope, ntest): E_collect = [] while len(E_collect) < ntest: ab = Ab_seq(RNs) Emax = E_best(ab, AgEpitope) if Emax >= cf.thr: E_collect.append(Emax) return min(E_collect)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check(self):\n self.lower_bound(5e-4)\n self.upper_bound(5e2)", "def test_constructed_is_small(self):\n self.assertTrue(all(elt<10 for elt in goodwinsheaf.checkradii()))#check all entries have small radii", "def _guess_firm_size_upper_too_low(self, bound, tol):\n theta = self.ivp.y[1]\n return abs(theta - bound) / theta <= tol # use relative values!", "def specific_binding_fraction(matrix,n=10000):\n return mean([score(matrix,random_site(10)) < -8 for i in xrange(n)])", "def _get_energy_range(self):\n\n e0_min = self.network.isomers[0].E0\n e0_max = e0_min\n\n for isomer in self.network.isomers[1:]:\n E0 = isomer.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for reactant in self.network.reactants:\n E0 = reactant.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for product in self.network.products:\n E0 = product.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for rxn in self.network.path_reactions:\n E0 = rxn.transition_state.conformer.E0.value_si\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n\n return e0_min, e0_max", "def check_location_confidence(self):\n\t\t## not the best way of doing things, but since the number of targets is fairly small its not a big deal\n\t\tepsilon_pixels = .05 * self.horizontal_resolution #arbitrary confidence factor\n\t\tepsilon_meters = .08\n\t\tpixel_distances = []\n\t\tactual_distances = []\n\t\tnum_observed = 0\n\t\tfor ti in self.targs:\n\t\t\tif ti.props_are_set:\n\t\t\t\tfor tj in self.targs:\n\t\t\t\t\tif tj.props_are_set: \n\t\t\t\t\t\tpixel_dist = np.linalg.norm(tj.position_camera - ti.position_camera)\n\t\t\t\t\t\tactual_dist = np.abs(tj.d_cam_image - ti.d_cam_image)\n\t\t\t\t\t\tif pixel_dist == 0:\n\t\t\t\t\t\t\tpixel_dist = 10000 #ignore two of the same points\n\t\t\t\t\t\t\tactual_dist = 10000\n\t\t\t\t\t\tpixel_distances.append(pixel_dist)\t\n\t\t\t\t\t\tactual_distances.append(actual_dist)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpixel_distances.append(10000)\n\t\t\t\t\t\tactual_distances.append(10000)\n\t\t\telse:\n\t\t\t\tfor _ in self.targs:\n\t\t\t\t\tpixel_distances.append(10000)\n\t\t\t\t\tactual_distances.append(10000)\n\t\tmin_ind_pixel = np.argmin(pixel_distances)\n\t\tmin_ind_actual = np.argmin(actual_distances)\n\t\t#min_ind is encoded in base (num_targets); decode it to find the closest two points\n\t\tbest_guys = [self.targs[min_ind_pixel/len(self.targs)],self.targs[min_ind_pixel%len(self.targs)]]\n\t\tif pixel_distances[min_ind_pixel] > epsilon_pixels or actual_distances[min_ind_actual] > epsilon_meters:\n\t\t\t#measurements are not trustworthy, return nothing\n\t\t\treturn None\n\n\t\treturn best_guys", "def sbound(self, u, s):\n sele = u.select_atoms(s)\n calc = u.select_atoms('name CAL')\n \n dist = MDAnalysis.analysis.distances.distance_array(calc.coordinates(), sele.coordinates())\n for i, row in enumerate(dist):\n \n if any([d<2.5 for d in row]):\n\treturn (True, i)\n return (False, -1)", "def quality_data(self, s):\n known_symbols = np.mod(range(176),48)>=32\n print('quality_data',np.sum(np.real(s[known_symbols])<0))\n success = np.sum(np.real(s[known_symbols])<0) < 20\n return success,0 ## no doppler estimate for data frames", "def check_binning_parameter_range(x_min, x_max, ws_unit):\n if ws_unit == 'dSpacing' and not 0 < x_min < x_max < 20:\n # dspacing within (0, 20)\n x_range_is_wrong = True\n elif ws_unit == 'TOF' and not 1000 < x_min < x_max < 1000000:\n # TOF within (1000, 1000000)\n x_range_is_wrong = True\n elif ws_unit != 'dSpacing' and ws_unit != 'TOF':\n raise NotImplementedError('Impossible case for unit {}'.format(ws_unit))\n else:\n # good cases\n x_range_is_wrong = False\n\n if x_range_is_wrong:\n ero_msg = 'For {0}, X range ({1}, {2}) does not make sense' \\\n ''.format(ws_unit, x_min, x_max)\n print('[ERROR CAUSING CRASH] {}'.format(ero_msg))\n raise RuntimeError(ero_msg)\n\n return", "def learning_rate_range():\n # Lower and upper bounds\n #######\n lower_bound = 0.1 \n upper_bound = 1e-6\n #######\n return lower_bound, upper_bound", "def ram_condition(min_gb=3):\n return get_free_gb() < min_gb", "def test_threshold_range_b(self):\n code, out, err = self.t.runError(\"--threshold --min 3.2 --max 3.1\")\n self.assertIn(\"The max value must be higher than the min value.\", out)", "def test_220_boosted_goal_difference_for_home_models_with_various_upper_home_win_threshold(self):\n\n def create_model_fn(fn_team: str):\n team_stat = Stats.n_sample_stats_for_team(cursor=db_in_cursor,\n team=fn_team,\n last_sample_date=self.model_date,\n n_samples=self.num_samples,\n normalize_by_matches=True)\n\n\n return FeatureModel(\n input_data=[self.home_boost + team_stat.goal_diff, team_stat.goal_diff],\n id=team_stat.team_name\n )\n\n default_threshold_lower = 0.3\n default_threshold_upper = 0.9\n\n explore_range = (default_threshold_lower, 5.0)\n num_steps_wanted = 60\n step_size = (explore_range[1] - explore_range[0])/num_steps_wanted\n\n threshold_lower = default_threshold_lower\n for threshold_upper in StatsPredictionPremierLeague.crange(first=explore_range[0], test=lambda x: x <= explore_range[1],\n update=lambda x: x + step_size):\n for match_date in played_home_OR_away_before_dates:\n ####\n #  Build model up to the day before the match\n ####\n self.home_boost = 0.72\n self.model_date = match_date - timedelta(days=1)\n self.num_samples = num_matches_in_season\n\n models: {str: FeatureModel} = FeatureModel.create_models_for_all_teams(\n model_making_fn=create_model_fn, entities=teams)\n\n self.persist_models(model_gen_date=self.model_date, model_description=self.shortDescription(), models=models)\n\n # variant_string = 'threshold_lower=%f, threshold_upper=%f' % (threshold_lower, threshold_upper)\n self.make_and_store_predictions_for_date(match_date=match_date, models=models, draw_range=(threshold_lower, threshold_upper),\n variants=threshold_upper)", "def _is_this_healthy_rDNA(self):\n if self.length < 3000:\n return 0\n mapping_state = []\n for item in self.sam_summary:\n if item[1] != '0':\n mapping_state.append(1)\n else:\n mapping_state.append(0)\n threshold = 0.8\n if sum(mapping_state)/len(mapping_state) > threshold:\n return 1\n else:\n for i in range(1, len(mapping_state) - 50):\n if sum(mapping_state[i:])/len(mapping_state[i:]) > threshold or \\\n sum(mapping_state[:-i])/len(mapping_state[:-i]) > threshold:\n healthy = 2\n return 0", "def check_sim_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.sim_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.sim_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.sim_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.sim_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.sim_spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def in_range(center_bot, nanobots):\n return [b for b in nanobots if center_bot.distance_to(b) <= center_bot.strength]", "def metropolis_hastings_accept(energy_prev, energy_next, s_rng):\r\n ediff = energy_prev - energy_next\r\n return (TT.exp(ediff) - s_rng.uniform(size=energy_prev.shape)) >= 0", "def _check_bound(self, q):\n mat = ur_utils.forward(q, self._ik_params)\n xyz = mat[:3, 3]\n inside_bound = np.all(self._end_effector_low <= xyz) and np.all(xyz <= self._end_effector_high)\n inside_buffer_bound = (np.all(self._end_effector_low + self._box_bound_buffer <= xyz) and \\\n np.all(xyz <= self._end_effector_high - self._box_bound_buffer))\n return inside_bound, inside_buffer_bound, mat, xyz", "def test_210_boosted_goal_difference_for_home_models_with_various_lower_away_win_threshold(self):\n\n def create_model_fn(fn_team: str):\n team_stat = Stats.n_sample_stats_for_team(cursor=db_in_cursor,\n team=fn_team,\n last_sample_date=self.model_date,\n n_samples=self.num_samples,\n normalize_by_matches=True)\n\n\n return FeatureModel(\n input_data=[self.home_boost + team_stat.goal_diff, team_stat.goal_diff],\n id=team_stat.team_name\n )\n\n default_threshold_lower = 0.3\n default_threshold_upper = 0.9\n\n explore_range = (-2.0, default_threshold_upper)\n num_steps_wanted = 60\n step_size = (explore_range[1] - explore_range[0])/num_steps_wanted\n\n threshold_upper = default_threshold_upper\n for threshold_lower in StatsPredictionPremierLeague.crange(first=explore_range[0], test=lambda x: x <= explore_range[1],\n update=lambda x: x + step_size):\n for match_date in played_home_OR_away_before_dates:\n ####\n #  Build model up to the day before the match\n ####\n self.home_boost = 0.72\n self.model_date = match_date - timedelta(days=1)\n self.num_samples = num_matches_in_season\n\n models: {str: FeatureModel} = FeatureModel.create_models_for_all_teams(\n model_making_fn=create_model_fn, entities=teams)\n\n self.persist_models(model_gen_date=self.model_date, model_description=self.shortDescription(), models=models)\n\n # variant_string = 'threshold_lower=%f, threshold_upper=%f' % (threshold_lower, threshold_upper)\n self.make_and_store_predictions_for_date(match_date=match_date, models=models, draw_range=(threshold_lower, threshold_upper),\n variants=threshold_lower)", "def test_boundary_boxes(gt_detection_combo):\n found = False\n overlap_threshold = 0.7\n\n for found_box in gt_detection_combo.detected_boxes:\n if overlap_between(gt_detection_combo.gt_box, found_box) > overlap_threshold:\n found = True\n break\n\n assert found is True", "def BHS_standard(err):\n \n leq5 = 0\n leq10 = 0\n leq15 = 0\n \n for i in range(len(err)):\n \n if(abs(err[i])<=5):\n leq5 += 1\n leq10 += 1\n leq15 += 1\n \n elif(abs(err[i])<=10): \n leq10 += 1\n leq15 += 1\n \n elif(abs(err[i])<=15): \n leq15 += 1\n \n \n \n return (leq5*100.0/len(err), leq10*100.0/len(err), leq15*100.0/len(err))", "def checkCorrectLumisEventGEN(dataset):\n numlumis = dbs3Client.getLumiCountDataSet(dataset)\n numEvents = dbs3Client.getEventCountDataSet(dataset)\n # numEvents / numLumis >= 300\n if numlumis >= numEvents / 300.0:\n return True\n else:\n return False", "def frame_in_range(frame_):\n f = cv2.cvtColor(frame_, cv2.COLOR_BGR2GRAY)\n if f.shape != grey.shape:\n f = cv2.resize(f, (grey.shape[1], grey.shape[0]))\n score = structural_similarity(f, grey)\n self.logger.debug(f\"frame score: {score}, {score>kwargs['threshold']}\")\n return score > kwargs['threshold']", "def compute_sw_threshold(flanking_reads, paf_dict, fasta_dict, window_size):\n\n max_scores = []\n for query, target in itertools.product(flanking_reads, flanking_reads):\n\n if str(query + target) in paf_dict:\n overlap_info = paf_dict[query+target]\n elif str(target + query) in paf_dict:\n # get info and swap them\n overlap_info = paf_dict[target+query]\n query, target = target, query\n else:\n continue\n\n query_start = overlap_info['query_start']\n query_end = overlap_info['query_end']\n target_start = overlap_info['target_start']\n target_end = overlap_info['target_end']\n\n query_seq = fasta_dict[query][query_start:query_end]\n target_seq = fasta_dict[target][target_start:target_end]\n\n # Get scores for this pair; store in cur_scores\n cur_scores = []\n if window_size:\n # Use rolling window\n min_len = min(len(query_seq), len(target_seq))\n for start, end in utils.pairwise(range(0, min_len, window_size)):\n qs = query_seq[start:end]\n ts = target_seq[start:end]\n score = smith_waterman.smith_waterman(qs, ts)\n cur_scores.append(score)\n\n if cur_scores:\n score = max(cur_scores)\n max_scores.append(score)\n else:\n # No rolling window\n score = smith_waterman.smith_waterman(query_seq, target_seq)\n max_scores.append(score)\n\n threshold = 0.9 * max(max_scores)\n\n print(\"using {} as threshold\".format(threshold))\n\n plt.subplot(2, 3, 2)\n plt.hist(max_scores)\n plt.title(\"FLANKING READS\\nhistogram of num_gaps / len(aligned_sequence)\\nthreshold = {}\\nwindow_size = {}\\nshowing {} scores\"\n .format(threshold, window_size, len(max_scores)))\n\n\n\n return threshold", "def sanitize_energies(full_us, lamb_idx, cutoff=10000):\n ref_us = np.expand_dims(full_us[:, lamb_idx], axis=1)\n abs_us = np.abs(full_us - ref_us)\n return np.where(abs_us < cutoff, full_us, np.inf)", "def check_gaus_fit(hist):\n s = ROOT.TSpectrum(1)\n s.Search(hist, 1, \"new\")\n peaks_buff = s.GetPositionX()\n x_peak = peaks_buff[0]\n\n return (abs(hist.GetFunction('gaus').GetParameter(1) - x_peak) / abs(x_peak)) < 0.1", "def is_bound(pos1, el1, pos2, el2):\n threshold = 0.1\n if el1 == 'H' or el2 == 'H':\n threshold = 0.2\n if np.linalg.norm(np.array(pos1) - np.array(pos2)) < covalence_radius[el1] + covalence_radius[el2] + threshold:\n return True\n return False", "def _detect_available_bands(self):\n return (\n [col.rpartition('_')[0] for col in self._columns if col.endswith('_FLUXMAG0')] or\n [col.partition('_')[2] for col in self._columns if col.startswith('psFlux_')]\n )", "def quantum_value_lower_bound(\n self,\n dim: int = 2,\n iters: int = 5,\n tol: float = 10e-6,\n ):\n # Get number of inputs and outputs.\n _, num_outputs_bob, _, num_inputs_bob = self.pred_mat.shape\n\n best_lower_bound = float(\"-inf\")\n for _ in range(iters):\n # Generate a set of random POVMs for Bob. These measurements serve\n # as a rough starting point for the alternating projection\n # algorithm.\n bob_tmp = random_povm(dim, num_inputs_bob, num_outputs_bob)\n bob_povms = defaultdict(int)\n for y_ques in range(num_inputs_bob):\n for b_ans in range(num_outputs_bob):\n bob_povms[y_ques, b_ans] = bob_tmp[:, :, y_ques, b_ans]\n\n # Run the alternating projection algorithm between the two SDPs.\n it_diff = 1\n prev_win = -1\n best = float(\"-inf\")\n while it_diff > tol:\n # Optimize over Alice's measurement operators while fixing\n # Bob's. If this is the first iteration, then the previously\n # randomly generated operators in the outer loop are Bob's.\n # Otherwise, Bob's operators come from running the next SDP.\n alice_povms, lower_bound = self.__optimize_alice(dim, bob_povms)\n bob_povms, lower_bound = self.__optimize_bob(dim, alice_povms)\n\n it_diff = lower_bound - prev_win\n prev_win = lower_bound\n\n # As the SDPs keep alternating, check if the winning probability\n # becomes any higher. If so, replace with new best.\n best = max(best, lower_bound)\n\n best_lower_bound = max(best, best_lower_bound)\n\n return best_lower_bound", "def vsize(min, max):\n return lambda mate: any(min <= v <= max for v in mate['read_info'].v_list)" ]
[ "0.5819317", "0.57218736", "0.5664285", "0.5512384", "0.5491544", "0.5476615", "0.54661477", "0.54643595", "0.544348", "0.54200816", "0.54185605", "0.5366565", "0.53566635", "0.5350197", "0.53409666", "0.53298765", "0.532814", "0.53204817", "0.5299489", "0.52875227", "0.528628", "0.5271289", "0.5239153", "0.5223244", "0.5222556", "0.52219933", "0.5205944", "0.5205605", "0.5196527", "0.5194513" ]
0.58382094
0
Mutates a given Ab seq according to the rules for mutations in FWR and CDR parts. If no deadly mutation happens to the FWR part, there is a possibility of change in the CDR part.
def mutate_seq(seq, block0, RNs): sequence = seq block = block0 # get the number of changes in the FWR part and key part # for framework part, include the rate of silent mutations (75%), this # is not necessary for the explicitly modeled residues as changes there # can lead to replacement with the same AA still FWR_changes = np.random.binomial(cf.lAb, cf.p_err_FWR*0.75) CDR_changes = np.random.binomial(cf.nkey, cf.p_err_CDR) if FWR_changes > 0: # determine number of deadly muts and blockmuts in the non-death # branch (p_death + (1-p_death)*p_block + (1-p_death)*(1-p_block)=1) # 0 signifies deathly mutation, 1 signifies blocking mutation mutIDs = list(np.random.choice([0, 1, 2], p=[cf.p_death_FWR, (1-cf.p_death_FWR) * cf.p_block_FWR, (1-cf.p_death_FWR) * (1-cf.p_block_FWR)], size=FWR_changes)) if 0 in mutIDs: # if deadly mutations happen, return no sequence return None, 0, 0 elif 1 in mutIDs: # if block mutation happens, set block to true block = True # if the cell has not died yet, analyse mutations in the CDR region if CDR_changes > 0: # get non-repetitive positions where mutation will be attempted changepos = random.sample(range(cf.nkey), CDR_changes) for pos in changepos: # get transition probabilities for the current amino acid cumprob = np.cumsum(cf.tp20[sequence[pos] - 1]) randi = RNs.getR() # find replacement codon for i in range(21): # 20 aa plus stop if randi < cumprob[i]: sequence[pos] = i + 1 break # if stop codon was integrated into the sequence, return 0 as well if 21 in sequence: return None, 0, 0 # only mutations of cells that survived are returnd for the counting return sequence, FWR_changes, block
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SimultaneousRB_AC(qubits: qreg, seqs, add_cals=True):\n # Original:\n # seqsBis = []\n # for seq in zip(*seqs):\n # seqsBis.append([reduce(operator.__mul__, [AC(q,c) for q,c in zip(qubits,\n # pulseNums)]) for pulseNums in zip(*seq)])\n\n # # Add the measurement to all sequences\n # for seq in seqsBis:\n # seq.append(reduce(operator.mul, [MEAS(q) for q in qubits]))\n\n# axis_descriptor = [{\n# 'name': 'length',\n# 'unit': None,\n# 'points': list(map(len, seqs)),\n# 'partition': 1\n# }]\n\n # # Tack on the calibration sequences\n # if add_cals:\n # seqsBis += create_cal_seqs((qubits), 2)\n # axis_descriptor.append(cal_descriptor((qubits), 2))\n\n # metafile = compile_to_hardware(seqsBis, 'RB/RB', axis_descriptor = axis_descriptor, extra_meta = {'sequences':seqs})\n\n for seq in zip(*seqs):\n # Start sequence\n init(qubits)\n for pulseNums in zip(*seq):\n Barrier(qubits)\n for q, c in zip(qubits, pulseNums):\n AC(q, c)\n # Measure at end of each sequence\n measConcurrently(qubits)\n\n if add_cals:\n # Tack on calibration\n create_cal_seqs(qubits, 2)", "def SingleQubitRB_AC(qubit: qreg, seqs, purity=False, add_cals=True):\n\n # Original:\n # seqsBis = []\n # op = [Id(qubit, length=0), Y90m(qubit), X90(qubit)]\n # for ct in range(3 if purity else 1):\n # for seq in seqs:\n # seqsBis.append([AC(qubit, c) for c in seq])\n # #append tomography pulse to measure purity\n # seqsBis[-1].append(op[ct])\n # #append measurement\n # seqsBis[-1].append(MEAS(qubit))\n\n # # Tack on the calibration sequences\n # if add_cals:\n # seqsBis += create_cal_seqs((qubit,), 2)\n\n# axis_descriptor = [{\n# 'name': 'length',\n# 'unit': None,\n# 'points': list(map(len, seqs)),\n# 'partition': 1\n# }]\n# metafile = compile_to_hardware(seqsBis, 'RB/RB', axis_descriptor = axis_descriptor, extra_meta = {'sequences':seqs})\n\n # AC() gives a single pulse on qubit\n\n op = [Id, Y90m, X90]\n for ct in range(3 if purity else 1):\n for seq in seqs:\n init(qubit)\n for c in seq:\n AC(qubit, c)\n # append tomography pulse to measure purity\n # See issue #: 53\n func = op[ct]\n if ct == 0:\n func(qubit, length=0)\n else:\n func(qubit)\n # append measurement\n MEAS(qubit)\n\n if add_cals:\n # Tack on calibration sequences\n create_cal_seqs(qubit, 2)", "def fix_seq(self, fixed_seq):\n self.wc.fix_seq(wc(fixed_seq))", "def test_assign_seqs_fasta_plus_qual(self):\r\n\r\n # Handles single fasta and single qual\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_no_errors]\r\n file_data['qual_files'] = [self.valid_qual_file_no_errors]\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n file_data['demultiplexed_qual_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = False\r\n barcode_type = \"golay_12\"\r\n max_bc_errors = 1.5\r\n start_index = 1\r\n write_unassigned_reads = False\r\n disable_bc_correction = False\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = \">s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n\"\r\n expected_demultiplexed_qual_seq = '>s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\n30 26 11 11 29 20 19 16 24 17 29 28 11 27 14 24 24\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\n12 14 27 23 22 19 24 18 19 20 28 10 17 14 17 13\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\n10 20 16 20 25 27 22 28 16 22 16 18 12 13 16 25 17\\n'\r\n\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n self.assertEqual(file_data['demultiplexed_qual_f'].data,\r\n expected_demultiplexed_qual_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 1, 'AACTCGTCGATG,s1': 1,\r\n 'AGCAGCACTTGT,s2': 1}\r\n expected_bc_freqs = {'AACTCGTCGATG': 1, 'AGCAGCACTTGT': 1,\r\n 'ACCGCAGAGTCA': 1}\r\n expected_seq_counts = 3\r\n expected_corrected_bc_count = [0, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def mutate_seq(genome):\n for var in genome.get_variants():\n if var.type == \"snp\":\n mutate_snp(genome, var)\n elif var.type == \"indel\":\n mutate_indel(genome, var)\n elif var.type == \"deletion\":\n mutate_deletion(genome, var)\n elif var.type == \"translocation origin\":\n mutate_trans_orig(genome, var)\n elif var.type == \"translocation insert\":\n mutate_trans_ins(genome, var)", "def mutate_random(DNA,AminoAcid,distance,pdic,rev,header,Random,outputpath):\r\n ##debug vals \r\n start = [] # list of start positions of mutations ( start means first mutation in balanced case)\r\n both = [] # start and end position\r\n fobj2= open(outputpath+header+\"_CompleteLog.txt\",\"a\")\r\n fobj2.write(\"BalancedMutation\"+\"\\t\"+\"NewAA\" + \"\\t\" + \"OldAA\"+\"\\t\"+\"NewAAPos\"+\"\\t\"+\"OldAAPos\" +\"\\t\"+ \"NewDNA\"+\"\\t\"+ \"OldDNA\"+ \"\\t\"+\"NewDNAPos\"+\"\\t\"+\"OldDNAPos\"+\"\\n\")\r\n fobj2.close()\r\n \r\n \r\n # generate start positions for mutation (the samplespace)\r\n samplespace = []\r\n for i in range (2,len(AminoAcid),distance/3):\r\n samplespace.append(i)\r\n \r\n \r\n ##random_modification\r\n if (Random ==1):\r\n r.shuffle(samplespace)\r\n else:\r\n pass\r\n \r\n dna_list = list(DNA)\r\n AminoAcid_list = list(AminoAcid)\r\n \r\n '''the lookup dictionary for the aa triplets '''\r\n lookup_dic = INI.createdic(AminoAcid)\r\n\r\n #gotit indicator if a possibility was found to revert the initial changes (start of mutation)\r\n gotit=False\r\n # stat variables\r\n succ_counter = 0\r\n fail_counter = 0 \r\n skip = 0\r\n \r\n ''' Main loop over the AminoAcid'''\r\n for i in samplespace:\r\n ''' no triplet left --> break '''\r\n if(i+2 >len(AminoAcid)):\r\n print(\"\\t(finished...exceeded length of AA)\")\r\n continue\r\n \r\n ''' AA which is going to be mutated'''\r\n AA = AminoAcid_list[i]\r\n \r\n '''index for dna : i*3 --> AminoAcid --> DNA\r\n #not i*3+3 because i starts at AA 2 since we need a right and left neighbor'''\r\n iprime = i*3\r\n \r\n '''AA and corresponding DNA triplet for the middle AA '''\r\n AA_triplet= AminoAcid_list[i-1]+AminoAcid_list[i]+AminoAcid_list[i+1]\r\n DNA_triplet = DNA[iprime:iprime+3]\r\n\r\n # get temporary list of all mutations. Iterate over it to find best possible substitution\r\n mutationsliste,aaliste = getMutation(AA,DNA_triplet)\r\n \r\n \r\n # isvalidposition returns 1 if the position isforbidden, else 0\r\n val = isvalidposition(pdic, iprime, distance)\r\n if (val ==1):\r\n skip+=1\r\n fobj2= open(outputpath+header+\"_CompleteLog.txt\",\"a\")\r\n fobj2.write(str(0)+\"\\t\"+new_AA_triplet + \"\\t\" + \"' '\"+\"\\t\"+str(i)+\"\\t\"+\"' '\" +\"\\t\"+ new_triplet+\"\\t\"+ \"' '\"+ \"\\t\"+str(iprime+position)+\"\\t\"+\"'(skipped)'\"+\"\\n\")\r\n fobj2.close()\r\n continue\r\n \r\n else:\r\n pass\r\n \r\n\r\n for q,item in enumerate(mutationsliste):\r\n \r\n if gotit==True:\r\n break\r\n else:\r\n pass\r\n \r\n ''' old and new variables for before/after the mutation '''\r\n new_triplet = mutationsliste[q]\r\n new_AA = aaliste[q]\r\n new_N,old_N,position = getdifference(DNA_triplet,new_triplet)\r\n new_AA_triplet = AA_triplet[0]+new_AA+AA_triplet[2]\r\n tempdic = pdic\r\n tempdic[iprime+position]=\"M\"\r\n \r\n if (new_AA_triplet in lookup_dic):\r\n '''templist--> contains all starting positions of the \"new_AA_triplet\" which we want to substitute back '''\r\n templist = lookup_dic[new_AA_triplet]\r\n \r\n \r\n # add potential mutation to dictionary\r\n tempposition = [iprime+position,\"M\"]\r\n for l in range(0,len(templist)):\r\n posi = templist[l]\r\n # i*3 --> protein nach DNA, +3 betrachten IMMER mittlere AA\r\n ''' suitable dna position found? '''\r\n if (new_triplet == dna_list[posi*3+3]+dna_list[posi*3+3+1]+dna_list[posi*3+3+2]):\r\n val = isvalidposition(tempdic, posi*3+3+position, distance)\r\n \r\n if (val ==1):\r\n skip+=1\r\n continue\r\n else:\r\n pass\r\n \r\n '''back substitution & do subs on 1st position'''\r\n pdic[posi*3+3+position]=\"R\"\r\n dna_list[posi*3+3+position]= old_N\r\n \r\n pdic[iprime+position]=\"M\"\r\n dna_list[iprime+position]= new_N\r\n \r\n AminoAcid_list[i]= new_AA\r\n AminoAcid_list[posi+1]= AA\r\n \r\n gotit = True\r\n succ_counter+=1\r\n #lookup_dic[new_AA_triplet] = [i for i in lookup_dic[new_AA_triplet] if i!=posi]\r\n lookup_dic[new_AA_triplet].remove(posi)\r\n \r\n '''writing the log file '''\r\n fobj= open(outputpath+header+\"_CompleteLog.txt\",\"a\")\r\n fobj.write(str(1)+\"\\t\"+AA_triplet + \"\\t\" + new_AA_triplet+\"\\t\"+str(i)+\"\\t\"+str(posi) +\"\\t\"+ DNA_triplet+\"\\t\"+ str(new_triplet)+ \"\\t\"+str(iprime+position)+\"\\t\"+str(posi*3+3+position)+\"\\n\")\r\n fobj.close()\r\n \r\n ## statistics\r\n start.append(iprime+position)\r\n both.extend([iprime+position,posi*3+3+position])\r\n break\r\n \r\n # no possible triplet positions for back substitution in lookup_dic \r\n else:\r\n continue\r\n \r\n # after loop \r\n if (gotit==False):\r\n fobj2= open(outputpath+header+\"_CompleteLog.txt\",\"a\")\r\n fobj2.write(str(0)+\"\\t\"+new_AA_triplet + \"\\t\" + \"' '\"+\"\\t\"+str(i)+\"\\t\"+\"' '\" +\"\\t\"+ new_triplet+\"\\t\"+ \"' '\"+ \"\\t\"+str(iprime+position)+\"\\t\"+\"'(tried)'\"+\"\\n\")\r\n fobj2.close()\r\n fail_counter+=1\r\n # reverse substitutions on? (=1) off (=0). If one dont change first mutation in the first place. Else: just change it.. \r\n if (rev==0):\r\n pdic[iprime+position]=\"M\"\r\n dna_list[iprime+position]= new_N\r\n AminoAcid_list[i]= new_AA\r\n start.append(iprime+position)\r\n both.extend([iprime+position]) \r\n elif (gotit==True):\r\n gotit = False\r\n \r\n # stats (INI.savepickle(pdic,header+\"_pdic_e\"))\r\n print(\"\\r\\n########Some stats:########\")\r\n print(\"DNA length:\\t\" + str(len(DNA)))\r\n print(\"max substitutions:\\t\" + str(len(DNA)/distance))\r\n print(\"#Balanced Mutations:\\t\" + str(succ_counter))\r\n \r\n \r\n return (\"\".join(dna_list))", "def test_assign_seqs_error_correction(self):\r\n\r\n # Handles single fasta and single qual with error correction\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_with_bc_errors]\r\n file_data['qual_files'] = [self.valid_qual_file_no_errors]\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n file_data['demultiplexed_qual_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = False\r\n barcode_type = \"golay_12\"\r\n max_bc_errors = 1.5\r\n start_index = 1\r\n write_unassigned_reads = False\r\n disable_bc_correction = False\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = '>s1_1 ABCD0001 orig_bc=TACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=1\\nCAGGACGAGACGAGGTT\\n>s3_2 EFGH0002 orig_bc=GCCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=1\\nCCAGATTACGAGATTA\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n'\r\n expected_demultiplexed_qual_seq = '>s1_1 ABCD0001 orig_bc=TACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=1\\n30 26 11 11 29 20 19 16 24 17 29 28 11 27 14 24 24\\n>s3_2 EFGH0002 orig_bc=GCCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=1\\n12 14 27 23 22 19 24 18 19 20 28 10 17 14 17 13\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\n10 20 16 20 25 27 22 28 16 22 16 18 12 13 16 25 17\\n'\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n self.assertEqual(file_data['demultiplexed_qual_f'].data,\r\n expected_demultiplexed_qual_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 1, 'AACTCGTCGATG,s1': 1,\r\n 'AGCAGCACTTGT,s2': 1}\r\n expected_bc_freqs = {'TACTCGTCGATG': 1, 'GCCGCAGAGTCA': 1,\r\n 'AGCAGCACTTGT': 1}\r\n expected_seq_counts = 3\r\n expected_corrected_bc_count = [2, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def test_assign_seqs_fasta_qual_added_demultiplex_field(self):\r\n\r\n # Handles single fasta and single qual\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_no_errors]\r\n file_data['qual_files'] = [self.valid_qual_file_no_errors]\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n file_data['demultiplexed_qual_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', '1'): 's1',\r\n ('AGCAGCACTTGT', '2'): 's2', ('ACCGCAGAGTCA', '3'): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = False\r\n barcode_type = \"golay_12\"\r\n max_bc_errors = 1.5\r\n start_index = 1\r\n write_unassigned_reads = False\r\n disable_bc_correction = False\r\n added_demultiplex_field = 'Added_Demultiplex'\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = '>s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n'\r\n expected_demultiplexed_qual_seq = '>s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\n30 26 11 11 29 20 19 16 24 17 29 28 11 27 14 24 24\\n'\r\n\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n self.assertEqual(file_data['demultiplexed_qual_f'].data,\r\n expected_demultiplexed_qual_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,3,s3': 0, 'AGCAGCACTTGT,2,s2': 0,\r\n 'AACTCGTCGATG,1,s1': 1}\r\n expected_bc_freqs = {'AACTCGTCGATG': 1, 'AGCAGCACTTGT': 1,\r\n 'ACCGCAGAGTCA': 1}\r\n expected_seq_counts = 3\r\n expected_corrected_bc_count = [0, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def apply_rule(seq):\n for idx,prop in enumerate(seq.ant):\n\n if prop.conn == \"not\":\n # create a copy of seq (we don't want to mutate it)\n new_seq = Sequent(seq.ant[:],seq.con[:])\n # pop the proposition from the list\n not_a = new_seq.ant.pop(idx)\n # make sure we popped the correct one\n assert not_a.conn == \"not\"\n # apply the rule\n new_seq.con = [ not_a.p1 ] + new_seq.con\n # return a list of 3 values with seq2 being None\n # (since there is not split in this rule)\n return [new_seq , None, \"not left\"]\n\n elif prop.conn == \"or\":\n # create two copies of seq\n new_seq1 = Sequent(seq.ant[:], seq.con[:])\n new_seq2 = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_or_c = new_seq1.ant.pop(idx)\n # make sure we popped the correct one\n assert b_or_c.conn == \"or\"\n assert b_or_c == new_seq2.ant.pop(idx)\n # apply the rule\n new_seq1.ant.append(b_or_c.p1)\n new_seq2.ant.append(b_or_c.p2)\n # return the obtained sequents and the rule name\n # here we have two sequents since \"or left\"\n # has two sequents at the top\n return [new_seq1 , new_seq2, \"or left\"]\n\n elif prop.conn == \"and\":\n #create one copy of seq\n new_seq = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_and_c = new_seq.ant.pop(idx)\n # make sure we popped the correct one\n assert b_and_c.conn == \"and\"\n # apply the rule\n new_seq.ant.append(b_and_c.p1)\n new_seq.ant.append(b_and_c.p2)\n # return a list of 3 values with seq2 being None\n return [new_seq, None, 'and left']\n\n \n elif prop.conn == \"imp\":\n # create two copies of seq\n new_seq1 = Sequent(seq.ant[:], seq.con[:])\n new_seq2 = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_imp_c = new_seq1.ant.pop(idx)\n # make sure we popped the correct one\n assert b_imp_c.conn == \"imp\"\n assert b_imp_c == new_seq2.ant.pop(idx)\n # apply the rule\n new_seq1.ant.append(b_imp_c.p2)\n new_seq2.con.append(b_imp_c.p1)\n # return the obtained sequents and the rule name\n return [new_seq1 , new_seq2, \"implies left\"]\n\n for idx,prop in enumerate(seq.con):\n if prop.conn == \"not\":\n new_seq = Sequent(seq.ant[:],seq.con[:])\n # pop the proposition from the list\n not_a = new_seq.con.pop(idx)\n # make sure we popped the correct one\n assert not_a.conn == \"not\"\n # apply the rule\n new_seq.ant = [ not_a.p1 ] + new_seq.ant\n # return a list of 3 values with seq2 being None\n return [new_seq , None, \"not right\"]\n elif prop.conn == \"or\":\n # create one copy of seq\n new_seq = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_or_c = new_seq.con.pop(idx)\n # make sure we popped the correct one\n assert b_or_c.conn == \"or\" \n # apply the rule\n new_seq.con.append(b_or_c.p1)\n new_seq.con.append(b_or_c.p2)\n # return the obtained sequent and the rule name\n return [new_seq , None, \"or right\"]\n\n elif prop.conn == 'and':\n new_seq1 = Sequent(seq.ant[:], seq.con[:])\n new_seq2 = Sequent(seq.ant[:], seq.con[:])\n b_and_c = new_seq1.con.pop(idx)\n assert b_and_c.conn == \"and\"\n assert b_and_c == new_seq2.con.pop(idx)\n new_seq1.con.append(b_and_c.p1)\n new_seq2.con.append(b_and_c.p2)\n return [new_seq1 , new_seq2, \"and right\"]\n\n elif prop.conn == 'imp':\n new_seq = Sequent(seq.ant[:], seq.con[:])\n b_imp_c = new_seq.con.pop(idx)\n assert b_imp_c.conn == \"imp\"\n new_seq.ant.append(b_imp_c.p1)\n new_seq.con.append(b_imp_c.p2)\n return [new_seq , None, \"implies right\"]", "def determine_aa_change( self ):\n for k,v in self.obj_mi.hash_isoforms.iteritems(): #k = string that is isoform_id, v = Isoform instance\n obj_tt = self.create_transcript_instances( k )\n\n #METHOD 1: get the original codon & mutated codon\n # orig_codon = obj_tt.retrieve_containing_codon( self.snv_start, self.snv_strand )\n # i_genome_pos = obj_tt.arr_genome_pos.index( self.snv_start )\n # obj_tt.arr_nuc_seq[ i_genome_pos ] = self.base_alt\n # mut_codon = obj_tt.retrieve_containing_codon( self.snv_start, self.snv_strand )\n\n\n #METHOD 2: get the mutated codon\n full_pos = self.snv_chrom + ':' + str( self.snv_start ) + '-' + str( self.snv_end )\n hash_codon_info = obj_tt.get_mutated_codon( self.base_orig, self.base_alt, full_pos, self.snv_strand, True ) #output is hash -> {'codon_orig': codon_orig, 'codon_mut': codon_mut, 'aa_orig': aa_orig, 'aa_mut': aa_mut}\n\n\n\n ##TEST:: show the AA change based on mutation\n # print \"hash_codon_info: \"\n # print hash_codon_info\n\n # print \"gene strand & snv strand: \", obj_tt.iso_sj.strand, \" & \", self.snv_strand\n # print \"original base > mutated base: \", self.base_orig, \" > \", self.base_alt\n # print \"original codon > mutated codon: \", hash_codon_info['codon_orig'], \" > \", hash_codon_info['codon_mut']\n # print \"original AA > mutated AA: \", hash_codon_info['aa_orig'], \" > \", hash_codon_info['aa_mut']\n\n\n ##TEST:: determine consequence\n print \"GV_DAAC 1: \"\n obj_tt.alteration_consequence( self.base_alt, self.get_genomic_range(), self.snv_strand, self.alt_type )\n \n\n ##TEST METHOD - SEE WHAT STEPS I NEED TO PERFORM\n #TEST:: retrieve the original base & the mutated base\n # i_genome_pos = obj_tt.arr_genome_pos.index( self.snv_start )\n # orig_base = obj_tt.arr_nuc_seq[ i_genome_pos ]\n # print \"k = \", k, \" & i_genome_pos = \", i_genome_pos, \" | orig_base = \", orig_base, \" & double_check = \", self.base_orig, \" & iso_sj.strand = \", obj_tt.iso_sj.strand, \" & mut strand = \", self.snv_strand\n # hash_orig_codon = obj_tt.find_containing_codon( self.snv_start )\n # print \"hash_orig = \", hash_orig_codon\n # get_orig_codon = obj_tt.arr_nuc_seq[ hash_orig_codon['i_genome_start']:hash_orig_codon['i_genome_end'] + 1 ]\n # str_orig_codon = ''.join( get_orig_codon ) if obj_tt.iso_sj.strand > 0 else ''.join( get_orig_codon[::-1] )\n # print \"seq_orig = \", str_orig_codon, \" & type = \", type( get_orig_codon ), \" & rf = \", obj_tt.arr_rf[ hash_orig_codon['i_genome_start']:hash_orig_codon['i_genome_end'] + 1 ], \" & list_orig_codon = \", get_orig_codon\n\n # ##TEST:: make mutation\n # obj_tt.arr_nuc_seq[ i_genome_pos ] = self.base_alt\n # hash_mut_codon = obj_tt.find_containing_codon( self.snv_start )\n # print \"hash_muts = \", hash_mut_codon\n # get_mut_codon = obj_tt.arr_nuc_seq[ hash_mut_codon['i_genome_start']:hash_mut_codon['i_genome_end'] + 1 ]\n # str_mut_codon = ''.join( get_mut_codon ) if obj_tt.iso_sj.strand > 0 else ''.join( get_mut_codon[::-1] )\n # print \"seq_muts = \", str_mut_codon, \" & type = \", type( get_mut_codon ), \" & rf = \", obj_tt.arr_rf[ hash_mut_codon['i_genome_start']:hash_mut_codon['i_genome_end'] + 1 ], \" & list_mut_codon = \", get_mut_codon \n\n # ##TEST:: retrieve \n # print \"AA: from \", Seq( str_orig_codon ).translate( to_stop = False ), \">\", Seq( str_mut_codon ).translate( to_stop = False )\n\n # try:\n # i_genome_pos = obj_tt.arr_genome_pos.index( self.snv_start )\n # orig_base = obj_tt.arr_nuc_seq[ i_genome_pos ]\n # print \"k = \", k, \" & i_genome_pos = \", i_genome_pos, \" | orig_base = \", orig_base, \" & double_check = \", self.base_orig, \" & iso_sj.strand = \", obj_tt.iso_sj.strand, \" & mut strand = \", self.snv_strand\n # hash_orig_codon = obj_tt.find_containing_codon( self.snv_start )\n # print \"hash_orig = \", hash_orig_codon\n # get_orig_codon = obj_tt.arr_nuc_seq[ hash_orig_codon['i_genome_start']:hash_orig_codon['i_genome_end'] ]\n # print \"seq_orig = \", get_orig_codon\n\n # ##TEST:: make mutation\n # obj_tt.arr_nuc_seq[ i_genome_pos ] = self.base_alt\n # hash_mut_codon = obj_tt.find_containing_codon( self.snv_start )\n # print \"hash_muts = \", hash_mut_codon\n # get_mut_codon = obj_tt.arr_nuc_seq[ hash_mut_codon['i_genome_start']:hash_mut_codon['i_genome_end'] ]\n # print \"seq_muts = \", get_mut_codon \n\n # ##TEST:: retrieve \n # print \"AA: from \", Seq( orig_codon ).translate( to_stop = False ), \">\", Seq( mut_codon ).translate( to_stop = False )\n # except:\n # print \"ERROR:: for \", k, \", position does not exist: \", self.snv_start\n # continue\n\n print \"////////////////////\\n\"", "def test_assign_seqs_exceeds_error_correction(self):\r\n\r\n # Handles single fasta and single qual, errors exceed max\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_with_bc_errors]\r\n file_data['qual_files'] = [self.valid_qual_file_no_errors]\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n file_data['demultiplexed_qual_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = False\r\n barcode_type = \"golay_12\"\r\n max_bc_errors = 0.5\r\n start_index = 1\r\n write_unassigned_reads = False\r\n disable_bc_correction = False\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = '>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n'\r\n expected_demultiplexed_qual_seq = '>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\n10 20 16 20 25 27 22 28 16 22 16 18 12 13 16 25 17\\n'\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n self.assertEqual(file_data['demultiplexed_qual_f'].data,\r\n expected_demultiplexed_qual_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 0, 'AACTCGTCGATG,s1': 0,\r\n 'AGCAGCACTTGT,s2': 1}\r\n expected_bc_freqs = {'TACTCGTCGATG': 1, 'GCCGCAGAGTCA': 1,\r\n 'AGCAGCACTTGT': 1}\r\n expected_seq_counts = 3\r\n expected_corrected_bc_count = [0, 2]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def test_assign_seqs_fasta_only(self):\r\n\r\n # Initial test for single fasta file alone.\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_no_errors]\r\n file_data['qual_files'] = []\r\n #file_data['mapping_file'] = self.valid_mapping_data_golay_upper\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = False\r\n barcode_type = \"golay_12\"\r\n max_bc_errors = 1.5\r\n start_index = 1\r\n write_unassigned_reads = False\r\n disable_bc_correction = False\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = \">s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n\"\r\n\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 1, 'AACTCGTCGATG,s1': 1,\r\n 'AGCAGCACTTGT,s2': 1}\r\n expected_bc_freqs = {'AACTCGTCGATG': 1, 'AGCAGCACTTGT': 1,\r\n 'ACCGCAGAGTCA': 1}\r\n expected_seq_counts = 3\r\n expected_corrected_bc_count = [0, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def modify_SEQ(self, seq_in, cigar_list_in):\n seq = seq_in[:] # Make a copy.\n cigar_list = cigar_list_in[:]\n # Placeholder for the new sequence.\n new_seq = \"\"\n for item in cigar_list:\n # Number of operations.\n num = int(item[:-1])\n # Operation.\n letter = item[-1]\n if letter == \"M\" and num == len(seq_in):\n return seq_in\n if True:\n # Matches or mismatches.\n if letter in [\"M\", \"X\"]:\n new_seq += seq[:num]\n seq = seq[num:]\n\n # Hard-clips or skipped regions.\n elif letter in [\"H\", \"N\"]:\n seq = seq[num:]\n new_seq += num * \" \"\n # Deletions.\n elif letter == \"D\":\n seq = seq[num:]\n new_seq += num * \"~\"\n # Paddings, insertions, soft-clips.\n elif letter in [\"P\", \"I\", \"S\"]:\n seq = seq[num:]\n # Sequence match.\n elif letter == \"=\":\n new_seq = seq\n\n return new_seq", "def reassemble(self, seq, buf):\n # XXX - fastpath properly sequenced data.\n if seq == self.cur and not self.q:\n self.cur += len(buf)\n return buf\n # XXX - favor newer data\n heapq.heappush(self.q, (seq, buf))\n l = []\n while self.q:\n if self.q[0][0] <= self.cur:\n seq, buf = heapq.heappop(self.q)\n if seq != self.cur:\n # Reverse overlap. Trim left (empty string on rexmit)...\n buf = buf[self.cur-seq:]\n l.append(buf)\n self.cur += len(buf)\n else:\n break\n return ''.join(l)", "def test_assign_seqs_exceeds_error_correction_unassigned(self):\r\n\r\n # Handles single fasta and single qual, disabled bc correction,\r\n # writes unassigned sequences, retains barcodes, starts enumeration\r\n # at 1000, generic 12 base pair barcode type.\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_with_bc_errors]\r\n file_data['qual_files'] = [self.valid_qual_file_no_errors]\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n file_data['demultiplexed_qual_f'] = FakeOutFile()\r\n file_data['unassigned_seqs_f'] = FakeOutFile()\r\n file_data['unassigned_qual_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = True\r\n barcode_type = 12\r\n max_bc_errors = 1.5\r\n start_index = 1000\r\n write_unassigned_reads = True\r\n disable_bc_correction = True\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = '>s2_1002 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nAGCAGCACTTGTGACCGATTACGATAACG\\n'\r\n expected_demultiplexed_qual_seq = '>s2_1002 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\n30 27 11 16 30 19 13 19 16 15 24 12 10 20 16 20 25 27 22 28 16 22 16 18 12 13 16 25 17\\n'\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n self.assertEqual(file_data['demultiplexed_qual_f'].data,\r\n expected_demultiplexed_qual_seq)\r\n expected_unassigned_fasta_seq = '>Unassigned_1000 ABCD0001 orig_bc=TACTCGTCGATG new_bc=None bc_diffs=0\\nTACTCGTCGATGCAGGACGAGACGAGGTT\\n>Unassigned_1001 EFGH0002 orig_bc=GCCGCAGAGTCA new_bc=None bc_diffs=0\\nGCCGCAGAGTCACCAGATTACGAGATTA\\n'\r\n expected_unassigned_qual_seq = '>Unassigned_1000 ABCD0001 orig_bc=TACTCGTCGATG new_bc=None bc_diffs=0\\n29 13 24 14 10 14 16 13 30 10 13 11 30 26 11 11 29 20 19 16 24 17 29 28 11 27 14 24 24\\n>Unassigned_1001 EFGH0002 orig_bc=GCCGCAGAGTCA new_bc=None bc_diffs=0\\n13 22 15 12 10 14 23 13 25 22 15 20 12 14 27 23 22 19 24 18 19 20 28 10 17 14 17 13\\n'\r\n self.assertEqual(file_data['unassigned_seqs_f'].data,\r\n expected_unassigned_fasta_seq)\r\n self.assertEqual(file_data['unassigned_qual_f'].data,\r\n expected_unassigned_qual_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 0, 'AACTCGTCGATG,s1': 0,\r\n 'AGCAGCACTTGT,s2': 1}\r\n expected_bc_freqs = {'TACTCGTCGATG': 1, 'GCCGCAGAGTCA': 1,\r\n 'AGCAGCACTTGT': 1}\r\n expected_seq_counts = 3\r\n expected_corrected_bc_count = [0, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def test_assign_seqs_two_fastas_quals(self):\r\n\r\n # Handles single fasta and single qual\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_no_errors,\r\n self.valid_fasta_file_no_errors]\r\n file_data['qual_files'] = [self.valid_qual_file_no_errors,\r\n self.valid_qual_file_no_errors]\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n file_data['demultiplexed_qual_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = False\r\n barcode_type = \"golay_12\"\r\n max_bc_errors = 1.5\r\n start_index = 1\r\n write_unassigned_reads = False\r\n disable_bc_correction = False\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = '>s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n>s1_4 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_5 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_6 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n'\r\n expected_demultiplexed_qual_seq = '>s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\n30 26 11 11 29 20 19 16 24 17 29 28 11 27 14 24 24\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\n12 14 27 23 22 19 24 18 19 20 28 10 17 14 17 13\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\n10 20 16 20 25 27 22 28 16 22 16 18 12 13 16 25 17\\n>s1_4 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\n30 26 11 11 29 20 19 16 24 17 29 28 11 27 14 24 24\\n>s3_5 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\n12 14 27 23 22 19 24 18 19 20 28 10 17 14 17 13\\n>s2_6 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\n10 20 16 20 25 27 22 28 16 22 16 18 12 13 16 25 17\\n'\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n self.assertEqual(file_data['demultiplexed_qual_f'].data,\r\n expected_demultiplexed_qual_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 2, 'AACTCGTCGATG,s1': 2,\r\n 'AGCAGCACTTGT,s2': 2}\r\n expected_bc_freqs = {'AACTCGTCGATG': 2, 'AGCAGCACTTGT': 2,\r\n 'ACCGCAGAGTCA': 2}\r\n expected_seq_counts = 6\r\n expected_corrected_bc_count = [0, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def modifyends(dna, left=\"\", right=\"\", add=0, add_right=0, add_left=0, supfeature=False, product=None, process_name=None, \n process_description=None, pn=None, pd=None, quinable=True, **kwargs):\n\n kwargs.setdefault(\"_sourcefile\", None) \n kwargs.setdefault(\"process_id\", None)\n kwargs.setdefault(\"original_ids\", []) \n _sourcefile = kwargs[\"_sourcefile\"] \n process_id = kwargs[\"process_id\"] \n original_ids = kwargs[\"original_ids\"]\n\n if dna._ssdna == True:\n raise TypeError(\"ssDNA object cannot be processed by `modifydna` function\") \n \n project = None\n project = project if product is None else product\n process_name = pn if process_name is None else process_name\n process_description = pd if process_description is None else process_description\n\n if dna.topology == \"circular\":\n raise ValueError(\"End sequence structures cannot be modified. The topology of the given QUEEN_object is circular.\") \n else:\n pass\n \n def parse(seq,count=0):\n l_bracket = re.compile(\"\\(\")\n r_bracket = re.compile(\"\\)\")\n l_brace = re.compile(\"\\{\")\n r_brace = re.compile(\"\\}\")\n if set(str(seq)) <= set(\"0123456789ATGCRYKMSWBDHVNatgcnrykmswbdhv{}()/-*\"):\n lbk_list = [l.start() for l in re.finditer(l_bracket,seq)]\n lbc_list = [l.start() for l in re.finditer(l_brace,seq)]\n rbc_list = [l.start() for l in re.finditer(r_brace,seq)]\n rbk_list = []\n for bk in lbk_list:\n n = 0\n p = bk\n for c in seq[bk:]:\n if c == \"(\":\n n += 1\n elif c == \")\":\n n -= 1\n if n == 0:\n rbk_list.append(p)\n break\n p += 1\n \n if len(lbk_list) == len(rbk_list):\n bk_set = list(zip(lbk_list,rbk_list))\n bc_set = list(zip(lbc_list,rbc_list))\n if len(bk_set) > 0:\n for bks in bk_set:\n new_seq = seq[bks[0]+1:bks[1]] \n if \"(\" not in new_seq: \n num = 0\n flag = 0\n sub_lbc_list = [l.start() for l in re.finditer(l_brace,new_seq)]\n sub_rbc_list = [l.start() for l in re.finditer(r_brace,new_seq)]\n sub_bc_set = list(zip(sub_lbc_list, sub_rbc_list))\n new_new_seq = new_seq\n sub_bc_set.sort()\n for bcs in sub_bc_set:\n try:\n num = int(new_seq[bcs[0]+1:bcs[1]])\n except:\n return False\n if num > 0:\n new_new_seq = new_new_seq[:new_new_seq.find(\"{\")-1] + new_seq[bcs[0]-1] * num + new_seq[bcs[1]+1:] \n else:\n pass\n new_seq = new_new_seq\n num = 0\n for bcs in bc_set:\n if bcs[0] == bks[1]+1:\n try:\n num = int(seq[bcs[0]+1:bcs[1]])\n except:\n return False\n break\n else:\n pass\n if num > 0:\n new_seq = new_seq * num\n else:\n pass\n break\n else:\n pass\n new_seq = seq[:bks[0]] + new_seq + seq[bcs[1]+1:]\n \n else:\n new_seq = seq\n bc_set.sort()\n for bcs in bc_set:\n try:\n num = int(seq[bcs[0]+1:bcs[1]])\n except:\n return False\n if num > 0:\n new_seq = new_seq[:new_seq.find(\"{\")-1] + seq[bcs[0]-1] * num + seq[bcs[1]+1:] \n else:\n pass\n \n if set(str(new_seq)) <= set(\"ATGCRYKMSWBDHVNatgcnrykmswbdhv/-*\"):\n pass\n else:\n new_seq = parse(new_seq,count+1)\n return new_seq\n else:\n return False\n else:\n return False\n \n def check_endseq(top, bottom):\n #Check if the top strand seqeunce is complement with the bottom strand. \n new_top = \"\"\n new_bottom = \"\"\n for t,b in zip(top,bottom):\n if t != b.translate(str.maketrans(\"ATGCRYKMSWBDHV\",\"TACGYRMKWSVHDB\")) and (t != \"-\" and b != \"-\"):\n return False, False\n new_top += t\n new_bottom += b\n return new_top, new_bottom\n \n if left == \"\":\n left = \"*/*\"\n \n if right == \"\":\n right = \"*/*\"\n \n left_origin, right_origin = left, right\n left, right = parse(left.upper()), parse(right.upper())\n left, rihgt = str(left), str(right) \n pattern1, pattern2, patternl1, patternl2, patternr1, patternr2 = \"[ATGCRYKMSWBDHVN*-]*/?[ATGCRYKMSWBDHVN*-]*\", \"[ATGCRYKMSWBDHVN*]+-+[ATGCRYKMSWBDHVN*]+\", \"^[ATGCRYKMSWBDHVN*]+-+/\", \"/[ATGCRYKMSWBDHVN*]+-+$\", \"^-+[ATGCRYKMSWBDHVN*]+/\", \"/-+[ATGCRYKMSWBDHVN*]+$\" \n pattern1 = re.compile(pattern1) \n pattern2 = re.compile(pattern2) \n patternl1 = re.compile(patternl1)\n patternl2 = re.compile(patternl2)\n patternr1 = re.compile(patternr1)\n patternr2 = re.compile(patternr2)\n left_end, right_end = left, right\n \n if pattern1.fullmatch(left_end) != None and pattern2.search(left_end) is None and patternl1.search(left_end) is None and patternl2.search(left_end) is None:\n pass \n else:\n raise TypeError(\"Please sepcify a proper sequence pattern for the 'left' argument\") \n \n if pattern1.fullmatch(right_end) != None and pattern2.search(right_end) is None and patternr1.search(right_end) is None and patternr2.search(right_end) is None:\n pass\n else:\n raise TypeError(\"Please sepcify a proper sequence pattern for the 'right' argument\") \n \n if \"/\" in left_end:\n left_end_top, left_end_bottom = left_end.split(\"/\")\n else:\n add = 1\n add_left = 1\n left_end_top, left_end_bottom = left_end, left_end.translate(str.maketrans(\"ATGCRYKMSWBDHV\",\"TACGYRMKWSVHDB\"))\n left_end = left_end_top + \"/\" + left_end_bottom\n\n if len(left_end_top) != len(left_end_bottom):\n raise ValueError(\"Please specify a proper end sequence structure for the 'left' argument.\")\n \n elif \"-\" in left_end or \"*\" in left_end:\n left_end_top, left_end_bottom = check_endseq(left_end_top, left_end_bottom)\n if left_end_top != False:\n left_end_length = len(left_end_top)\n if \"*\" in left_end_top or \"*\" in left_end_bottom:\n if set(left_end_top) <= set([\"*\",\"-\"]) and set(left_end_bottom) <= set([\"*\",\"-\"]):\n left_end_top = \"\".join([s if q != \"-\" else \"-\" for s,q in zip(dna.seq[:left_end_length], left_end_top)])\n left_end_bottom = \"\".join([s if q != \"-\" else \"-\" for s,q in zip(dna.seq[:left_end_length].translate(str.maketrans(\"ATGCRYKMSWBDHV\",\"TACGYRMKWSVHDB\")), left_end_bottom)])\n else:\n raise ValueError(\"'*' cannot be used wih IUPAC nucleotide codes\")\n \n left_end_top, left_end_bottom = left_end_top.replace(\"-\",\"\"), left_end_bottom.replace(\"-\",\"\") \n if len(left_end_top) < len(left_end_bottom):\n left_length = len(left_end_bottom)\n left_end = left_end_bottom[0:len(left_end_bottom)-1*len(left_end_top)].translate(str.maketrans(\"ATGCRYKMSWBDHV\",\"TACGYRMKWSVHDB\")) \n left_end_top = -1\n left_end_bottom = 1\n\n elif len(left_end_top) > len(left_end_bottom):\n left_length = len(left_end_top)\n left_end = left_end_top[0:len(left_end_top)-1*len(left_end_bottom)]\n left_end_top = 1\n left_end_bottom = -1\n \n else:\n left_length = len(left_end_top)\n left_end = left_end_top[0:len(left_end_top)-1*len(left_end_bottom)]\n left_end_top = 1\n left_end_bottom = 1\n else:\n add = 1\n add_left = 1\n left_end = left_end_top + \"/\" + left_end_bottom\n\n else:\n raise ValueError(\"Please specify a proper end sequence structure for the 'left' argument.\")\n else:\n left_end_length = len(left_end_top) \n left_length = len(left_end_top)\n left_end_top = 1\n left_end_bottom = 1\n\n if \"/\" in right_end:\n right_end_top, right_end_bottom = right_end.split(\"/\")\n else:\n add = 1\n add_right = 1 \n right_end_top, right_end_bottom = right_end, right_end.translate(str.maketrans(\"ATGCRYKMSWBDHV\",\"TACGYRMKWSVHDB\"))\n right_end = right_end_top + \"/\" + right_end_bottom\n \n if len(right_end_top) != len(right_end_bottom):\n raise ValueError(\"Please specify a proper end sequence structure for the 'right' argument.\")\n \n elif \"-\" in right_end or \"*\" in right_end:\n right_end_top, right_end_bottom = check_endseq(right_end_top, right_end_bottom)\n if right_end_top != False:\n right_end_length = len(right_end_top)\n if \"*\" in right_end_top or \"*\" in right_end_bottom:\n if set(right_end_top) <= set([\"*\",\"-\"]) and set(right_end_bottom) <= set([\"*\",\"-\"]):\n right_end_top = \"\".join([s if q != \"-\" else \"-\" for s,q in zip(dna.seq[-1*right_end_length:], right_end_top)])\n right_end_bottom = \"\".join([s if q != \"-\" else \"-\" for s,q in zip(dna.seq[-1*right_end_length:].translate(str.maketrans(\"ATGCRYKMSWBDHV\",\"TACGYRMKWSVHDB\")), right_end_bottom)])\n else:\n raise ValueError(\"'*' cannot be used wih characters for DNA sequence.\")\n\n right_end_top, right_end_bottom = right_end_top.replace(\"-\",\"\"), right_end_bottom.replace(\"-\",\"\") \n if len(right_end_top) < len(right_end_bottom):\n right_length = len(right_end_bottom)\n right_end = right_end_bottom[len(right_end_top):].translate(str.maketrans(\"ATGCRYKMSWBDHV\",\"TACGYRMKWSVHDB\")) \n right_end_top = -1\n right_end_bottom = 1\n\n elif len(right_end_top) > len(right_end_bottom):\n right_length = len(right_end_top)\n right_end = right_end_top[len(right_end_bottom):] \n right_end_top = 1\n right_end_bottom = -1\n \n else:\n right_length = len(right_end_top)\n right_end = right_end_top[len(right_end_bottom):] \n right_end_top = 1\n right_end_bottom = 1\n else:\n add = 1\n add_right = 1 \n right_end = right_end_top + \"/\" + right_end_bottom\n\n else:\n raise ValueError(\"Please specify a proper end sequence structure for the 'right' argument.\")\n else:\n right_end_length = len(right_end_top) \n right_length = len(right_end_top) \n right_end_top = 1\n right_end_bottom = 1\n \n if add == 1 or (left_end != dna.seq[left_end_length-left_length:left_end_length-left_length+len(left_end)] \n or right_end != str(dna[len(dna.seq)-right_end_length + right_length - len(right_end):len(dna.seq)-right_end_length + right_length].seq)):\n \n if add_left == 1 and add_right == 1:\n new_dna = dna.__class__(seq=left_end.split(\"/\")[0] + dna.seq + right_end.split(\"/\")[0] + \"/\" \n + left_end.split(\"/\")[1] + dna.seq.translate(str.maketrans(\"ATGCRYKMSWBDHV\",\"TACGYRMKWSVHDB\")) + right_end.split(\"/\")[1], quinable=0) \n new_dna._dnafeatures = _slide(dna.dnafeatures, len(left_end.split(\"/\")[0])) \n new_dna.record = copy.copy(dna.record) \n new_dna.record.features = new_dna.dnafeatures\n new_dna._positions = (-1,) * len(left_end.split(\"/\")[0]) + new_dna._positions + (-1,) * len(right_end.split(\"/\")[0])\n \n \n elif add_right == 1:\n left_end = dna.__class__(seq=left_end, quinable=0) \n right_end = dna.__class__(seq=right_end, quinalbe=0) \n new_dna = cropdna(dna, start=left_end_length-left_length, end=len(dna.seq), quinable=0) + right_end\n new_dna._right_end = right_end._right_end\n new_dna._left_end = left_end.seq\n new_dna._left_end_top = left_end_top \n new_dna._left_end_bottom = left_end_bottom\n new_dna._positions = new_dna._positions + (-1,) * len(right_end.seq)\n \n else:\n left_end = dna.__class__(seq=left_end, quinable=0) \n right_end = dna.__class__(seq=right_end, quinable=0) \n new_dna = left_end + cropdna(dna, start=0, end=len(dna.seq)-right_end_length+right_length, quinable=0)\n new_dna._left_end = left_end._left_end\n new_dna._right_end = right_end.seq\n new_dna._right_end_top = right_end_top \n new_dna._right_end_bottom = right_end_bottom\n new_dna._positions = (-1,) * len(left_end.seq) + new_dna._positions\n \n if type(left_origin) == new_dna.seq.__class__ and left_origin.parental_class == \"QUEEN\": \n parental_id = left_origin.parental_id\n tmp_left = None\n if left_origin.name is not None: \n if left_origin.name == \"rcseq\":\n tmp_left = flipdna(left_origin.parent, quinable==0) \n else:\n tmp_left = None\n else:\n tmp_left = left_origin.parent\n \n if tmp_left is not None:\n new_dna._dnafeatures = tmp_left.dnafeatures + new_dna._dnafeatures\n\n if type(right_origin) == new_dna.seq.__class__ and right_origin.parental_class == \"QUEEN\": \n parental_id = right_origin.parental_id\n tmp_right = None\n if right_origin.name is not None: \n if right_origin.name == \"rcseq\":\n tmp_right = flipdna(right_origin.parent, quinable==0) \n else:\n tmp_right = None\n else:\n tmp_right = right_origin.parent\n\n if tmp_right is not None:\n new_dna._dnafeatures += _slide(tmp_right.dnafeatures, len(left_end.split(\"/\")[0]) + len(dna.seq))\n \n new_dna._history_feature = dna._history_feature \n \n #Recover fragmented features if complete sequence is in the construct.\n new_features = [] \n remove_features = [] \n for feat in new_dna.dnafeatures:\n if \"broken_feature\" in feat.qualifiers:\n note = feat.qualifiers[\"broken_feature\"][0]\n label = \":\".join(note.split(\":\")[:-1])\n poss, pose = list(map(int,note.split(\":\")[-1].split(\"..\")))\n length = int(note.split(\":\")[-4]) \n if feat.location.strand != -1:\n sfeat = feat.start-(poss-1) \n sfeat = sfeat if sfeat > 0 else len(new_dna.seq) + sfeat\n efeat = feat.end+(length-pose)\n else:\n sfeat = feat.start-(length-pose) \n sfeat = sfeat if sfeat > 0 else len(new_dna.seq) + sfeat\n efeat = feat.end+(poss-1) \n \n if note.split(\":\")[-3] == new_dna.printsequence(sfeat, efeat, strand=feat.location.strand):\n if sfeat < efeat:\n location = FeatureLocation(sfeat, efeat, feat.location.strand) \n else:\n location = CompoundLocation([FeatureLocation(sfeat, len(new_dna.seq)), FeatureLocation(0, efeat, feat.location.strand)]) \n newfeat = feat.__class__(location=location, subject=new_dna)\n newfeat.type = feat.type\n newfeat.qualifiers = feat.qualifiers\n del newfeat.qualifiers[\"broken_feature\"]\n newfeat._id = label.split(\":\")[1]\n new_features.append(newfeat)\n remove_features.append(feat)\n \n for feat in remove_features:\n del new_dna._dnafeatures[new_dna.dnafeatures.index(feat)] \n \n for feat in new_features:\n new_dna._dnafeatures.append(feat) \n else:\n new_dna = copy.deepcopy(dna) \n new_dna._left_end = left_end\n new_dna._right_end = right_end\n new_dna._left_end_top = left_end_top \n new_dna._left_end_bottom = left_end_bottom\n new_dna._right_end_top = right_end_top\n new_dna._right_end_bottom = right_end_bottom\n \n if project is None:\n new_dna._unique_id = dna._unique_id \n else:\n new_dna._unique_id = project\n \n if type(supfeature) in (tuple, list) and type(supfeature[0]) == dict:\n for feature_dict in supfeature: \n new_dna.setfeature(feature_dict) \n elif type(supfeature) == dict:\n new_dna.setfeature(supfeature)\n \n if quinable == True:\n args = [] \n history_features = [new_dna._history_feature] \n args.append(\"'{}'\".format(new_dna._unique_id))\n args.append(\"'{}'\".format(dna._unique_id)) \n ends = [] \n if type(left_origin) == new_dna.seq.__class__:\n if left_origin.parental_class == \"DNAFeature\":\n qkey = left_origin.qkey\n for qindex, qfeat in enumerate(new_dna.__class__.queried_features_dict[qkey]):\n if qfeat._second_id == left_origin.parental_id:\n break\n if type(left_origin.item) == int:\n args.append(\"QUEEN.queried_features_dict['{}'][{}].{}[{}]\".format(qkey, qindex, \"seq\" , left_origin.item))\n elif type(left_origin.item) == slice:\n sl_start = left_origin.item.start\n sl_stop = left_origin.item.stop \n sl_step = left_origin.item.step\n sl_start = \"\" if sl_start is None else sl_start\n sl_stop = \"\" if sl_stop is None else sl_stop\n if sl_step == 1 or sl_step == None:\n args.append(\"QUEEN.queried_features_dict['{}'][{}].seq[{}:{}]\".format(qkey, qindex, sl_start, sl_stop))\n else:\n args.append(\"QUEEN.queried_features_dict['{}'][{}].seq[{}:{}:{}]\".format(qkey, qindex, sl_start, sl_stop, sl_step))\n else:\n args.append(\"QUEEN.queried_features_dict['{}'][{}].seq\".format(qkey, qindex))\n history_features.append(left_origin.parent.subject._history_feature) \n\n elif left_origin.parental_class == \"QUEEN\": \n parental_id = left_origin.parental_id\n\n if left_origin.name != None: \n if \"printsequence\" in left_origin.name:\n if len(left_origin.name.split(\"_\")) == 2: \n seqname = \"QUEEN.dna_dict['{}'].printsequence(strand={})\".format(parental_id, left_origin.name.split(\"_\")[-1]) \n else:\n seqname = \"QUEEN.dna_dict['{}'].printsequence(start={}, end={}, strand={})\".format(parental_id, *left_origin.name.split(\"_\")[1:])\n elif lefet_origin.name == \"rcseq\":\n seqname = \"QUEEN.dna_dict['{}'].rcseq\".format(parental_id) \n else:\n seqname = \"QUEEN.dna_dict['{}'].seq\".format(parental_id)\n\n if type(left_origin.item) == int:\n args.append(\"{}.seq[{}]\".format(seqname, left_origin.item))\n elif type(left_origin.item) == slice:\n sl_start = left_origin.item.start\n sl_stop = left_origin.item.stop \n sl_step = left_origin.item.step\n sl_start = \"\" if sl_start is None else sl_start\n sl_stop = \"\" if sl_stop is None else sl_stop\n if sl_step == 1 or sl_step == None:\n args.append(\"{}[{}:{}]\".format(seqname, sl_start, sl_stop))\n else:\n args.append(\"{}[{}:{}:{}]\".format(seqname, sl_start, sl_stop, sl_step))\n else:\n args.append(\"{}\".format(seqname))\n history_features.append(left_origin.parent._history_feature) \n \n elif left_origin.parental_class == \"Cutsite\":\n if left_origin.parent.name not in cs.defaultkeys:\n cs.new_cutsites.append((left_origin.parent.name, left_origin.parent.cutsite)) \n args.append(\"cs.lib['{}'].{}\".format(left_origin.parent.name, left_origin.name)) \n else:\n args.append(\"'{}'\".format(left_origin)) \n ends.append(left) \n else:\n ends.append(left_origin) \n args.append(\"'{}'\".format(left_origin)) \n \n if type(right_origin) == new_dna.seq.__class__:\n if right_origin.parental_class == \"DNAFeature\":\n qkey = right_origin.qkey\n for qindex, qfeat in enumerate(new_dna.__class__.queried_features_dict[qkey]):\n if qfeat._second_id == right_origin.parental_id:\n break\n if type(right_origin.item) == int:\n args.append(\"QUEEN.queried_features_dict['{}'][{}].{}[{}]\".format(qkey, qindex, \"seq\" , right_origin.item))\n elif type(right_origin.item) == slice:\n sl_start = right_origin.item.start \n sl_stop = right_origin.item.stop \n sl_step = right_origin.item.step\n sl_start = \"\" if sl_start is None else sl_start\n sl_stop = \"\" if sl_stop is None else sl_stop\n if sl_step == 1 or sl_step == None:\n args.append(\"QUEEN.queried_features_dict['{}'][{}].seq[{}:{}]\".format(qkey, qindex, sl_start, sl_stop))\n else:\n args.append(\"QUEEN.queried_features_dict['{}'][{}].seq[{}:{}:{}]\".format(qkey, qindex, sl_start, sl_stop, sl_step))\n else:\n args.append(\"QUEEN.queried_features_dict['{}'][{}].seq\".format(qkey, qindex))\n history_features.append(right_origin.parent.subject._history_feature) \n\n elif right_origin.parental_class == \"QUEEN\": \n parental_id = right_origin.parental_id\n \n if right_origin.name != None: \n if \"printsequence\" in right_origin.name:\n if len(right_origin.name.split(\"_\")) == 2: \n seqname = \"QUEEN.dna_dict['{}'].printsequence(strand={})\".format(parental_id, right_origin.name.split(\"_\")[-1]) \n else:\n seqname = \"QUEEN.dna_dict['{}'].printsequence(start={}, end={}, strand={})\".format(parental_id, *right_origin.name.split(\"_\")[1:])\n elif right_origin.name == \"rcseq\":\n seqname = \"QUEEN.dna_dict['{}'].rcseq\".format(parental_id) \n else:\n seqname = \"QUEEN.dna_dict['{}'].seq\".format(parental_id)\n \n if type(right_origin.item) == int:\n args.append(\"{}[{}]\".format(seqname, right_origin.item))\n \n elif type(right_origin.item) == slice:\n sl_start = right_origin.item.start\n sl_stop = right_origin.item.stop \n sl_step = right_origin.item.step\n sl_start = \"\" if sl_start is None else sl_start\n sl_stop = \"\" if sl_stop is None else sl_stop\n if sl_step == 1 or sl_step == None:\n args.append(\"{}[{}:{}]\".format(seqname, sl_start, sl_stop))\n else:\n args.append(\"{}[{}:{}:{}]\".format(seqname, sl_start, sl_stop, sl_step))\n else:\n args.append(\"{}\".format(seqname))\n history_features.append(right_origin.parent._history_feature) \n \n elif right_origin.parental_class == \"Cutsite\":\n if right_origin.parent.name not in cs.defaultkeys:\n cs.new_cutsites.add((right_origin.parent.name, right_origin.parent.cutsite)) \n args.append(\"cs.lib['{}'].{}\".format(right_origin.parent.name, right_origin.name)) \n\n else:\n args.append(\"'{}'\".format(right_origin)) \n ends.append(right)\n else:\n ends.append(right_origin) \n args.append(\"'{}'\".format(right_origin)) \n\n project = \"\" \n fsupfeature = \"\" if supfeature == False else \", supfeature={}\".format(str(supfeature)) \n fproduct = \"\" if product is None else \", product='\" + product + \"'\"\n process_name = \"\" if process_name is None else \", process_name='\" + process_name + \"'\"\n process_description = \"\" if process_description is None else \", process_description='\" + process_description + \"'\" \n\n new_dna._product_id = new_dna._unique_id if product is None else product \n building_history = \"QUEEN.dna_dict['{}'] = modifyends(QUEEN.dna_dict['{}'], left={}, right={}{}{}{}{}{})\".format(new_dna._product_id, dna._product_id, args[2], args[3], fsupfeature, project, fproduct, process_name, process_description) \n history_feature = _combine_history(new_dna, history_features) \n new_dna._history_feature = history_feature\n process_id, original_ids = make_processid(new_dna, building_history, process_id, original_ids)\n add_history(new_dna, [building_history, \"left: {}; right: {}; leftobj: {}; rightobj: {}\".format(*ends, args[2], args[3]), \",\".join([process_id] + original_ids)], _sourcefile) \n new_dna._check_uniqueness()\n else:\n new_dna.__dict__[\"_product_id\"] = dna._product_id if \"_product_id\" in dna.__dict__ else dna._unique_id\n\n for dnafeature in new_dna.dnafeatures:\n dnafeature.subject = new_dna\n \n if product is None:\n pass \n else:\n product = product.replace(\" \",\"\")\n match = re.fullmatch(\"(.+)\\[(.+)\\]\", product)\n if match:\n if match.group(2).isdecimal() == True:\n new_dna.__class__._namespace[match.group(1)][int(match.group(2))] = new_dna\n else:\n new_dna.__class__._namespace[match.group(1)][match.group(2)] = new_dna\n else: \n new_dna.__class__._namespace[product] = new_dna\n return new_dna", "def correctfasta(vectint, records):\n\n\n# go through each sequence in genome file\n for record in records:\n if record in vectint:\n # We have the remove keyword. Do not process sequence record\n recordseq = records[record]\n if \"remove\" in vectint[record]:\n continue\n if \"trim3\" in vectint[record]:\n # We cannot work directly on the records hash\n # duplicate the sequence, and modify it\n recordseq = recordseq[:vectint[record][\"trim3\"]]\n if \"trim5\" in vectint[record]:\n # We cannot work directly on the records hash\n # duplicate the sequence, and modify it\n recordseq = recordseq[vectint[record][\"trim5\"]:]\n # print modified sequence\n if len(recordseq.seq) > 0:\n print(\">\"+record)\n print(recordseq.seq)\n else:\n # print unmodified sequence\n print(\">\"+record)\n print(records[record].seq)", "def get_mutated_sequence(focus_seq, mutant, start_idx=1, AA_vocab=AA_vocab):\n mutated_seq = list(focus_seq)\n for mutation in mutant.split(\":\"):\n try:\n from_AA, position, to_AA = mutation[0], int(mutation[1:-1]), mutation[-1]\n except:\n print(\"Issue with mutant: \"+str(mutation))\n relative_position = position - start_idx\n assert (from_AA==focus_seq[relative_position]), \"Invalid from_AA or mutant position: \"+str(mutation)+\" from_AA: \"+str(from_AA) + \" relative pos: \"+str(relative_position) + \" focus_seq: \"+str(focus_seq)\n assert (to_AA in AA_vocab) , \"Mutant to_AA is invalid: \"+str(mutation)\n mutated_seq[relative_position] = to_AA\n return \"\".join(mutated_seq)", "def test_update_program_multiple(self):\n\n program = [7, 2, 10, 4, 8, 7, 0]\n substr = pyCompressor.CandidateSubr(3, (0, 1))\n substr._position = 5\n substr._global = True\n substr2 = pyCompressor.CandidateSubr(2, (0, 5))\n substr2._position = 21\n substr2._global = True\n encoding = [(1, substr), (5, substr2)]\n bias = 0\n\n self.empty_compreffor.update_program(program, encoding, bias, [bias], 0)\n\n self.assertEqual(program, [7, 5, \"callgsubr\", 8, 21, \"callgsubr\"])", "def fix_seq(self, fixed_seq):\n assert len(fixed_seq) == self.length, \\\n \"Length of fixed sequence (%d) does not match length of %s (%d)\" \\\n % (len(fixed_seq), self.full_name, self.length)\n i = 0\n for seq in self.seqs:\n seq.fix_seq( fixed_seq[i:i+seq.length] )\n i += seq.length", "def Mutation(self, state):\n changed = False;\n #-------------------------------------------------------\n # MUTATE CONDITION\n #-------------------------------------------------------\n for att in range(cons.env.format_data.numb_attributes): #Each condition specifies different attributes, so we need to go through all attributes in the dataset.\n att_info = cons.env.format_data.attribute_info[att]\n if random.random() < cons.mu and state[att] != cons.label_missing_data:\n #MUTATION--------------------------------------------------------------------------------------------------------------\n if att not in self.specified_attributes: #Attribute not yet specified\n self.specified_attributes.append(att)\n self.condition.append(self.buildMatch(att, state)) #buildMatch handles both discrete and continuous attributes\n changed = True\n\n elif att in self.specified_attributes: #Attribute already specified\n i = self.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n #-------------------------------------------------------\n # DISCRETE OR CONTINUOUS ATTRIBUTE - remove attribute specification with 50% chance if we have continuous attribute, or 100% if discrete attribute.\n #-------------------------------------------------------\n if not att_info[0] or random.random() > 0.5:\n self.specified_attributes.remove(att)\n self.condition.pop(i) #buildMatch handles both discrete and continuous attributes\n changed = True\n #-------------------------------------------------------\n # CONTINUOUS ATTRIBUTE - (mutate range with 50% probability vs. removing specification of this attribute all together)\n #-------------------------------------------------------\n else:\n #Mutate continuous range - based on Bacardit 2009 - Select one bound with uniform probability and add or subtract a randomly generated offset to bound, of size between 0 and 50% of att domain.\n attribute_range = float(att_info[1][1]) - float(att_info[1][0])\n mutate_range = random.random()*0.5*attribute_range\n if random.random() > 0.5: #Mutate minimum\n if random.random() > 0.5: #Add\n self.condition[i][0] += mutate_range\n else: #Subtract\n self.condition[i][0] -= mutate_range\n else: #Mutate maximum\n if random.random() > 0.5: #Add\n self.condition[i][1] += mutate_range\n else: #Subtract\n self.condition[i][1] -= mutate_range\n\n #Repair range - such that min specified first, and max second.\n self.condition[i].sort()\n changed = True\n #-------------------------------------------------------\n # NO MUTATION OCCURS\n #-------------------------------------------------------\n else:\n pass\n #-------------------------------------------------------\n # MUTATE PHENOTYPE\n #-------------------------------------------------------\n if cons.env.format_data.discrete_action:\n nowchanged = self.discreteActionMutation()\n #else:\n # nowChanged = self.continuousPhenotypeMutation(phenotype)\n\n if changed or nowchanged:\n return True", "def SingleQubitIRB_AC(qubit: qreg, seqFile):\n\n # Original:\n # # Setup a pulse library\n # pulseLib = [AC(qubit, cliffNum) for cliffNum in range(24)]\n # pulseLib.append(pulseLib[0])\n # measBlock = MEAS(qubit)\n\n # with open(seqFile,'r') as FID:\n # fileReader = reader(FID)\n # seqs = []\n # for pulseSeqStr in fileReader:\n # seq = []\n # for pulseStr in pulseSeqStr:\n # seq.append(pulseLib[int(pulseStr)])\n # seq.append(measBlock)\n # seqs.append(seq)\n\n # # Hack for limited APS waveform memory and break it up into multiple files\n # # We've shuffled the sequences so that we loop through each gate length on the inner loop\n # numRandomizations = 36\n # for ct in range(numRandomizations):\n # chunk = seqs[ct::numRandomizations]\n # chunk1 = chunk[::2]\n # chunk2 = chunk[1::2]\n # # Tack on the calibration scalings\n # chunk1 += [[Id(qubit), measBlock], [X(qubit), measBlock]]\n # fileNames = compile_to_hardware(chunk1, 'RB/RB', suffix='_{0}'.format(2*ct+1))\n # chunk2 += [[Id(qubit), measBlock], [X(qubit), measBlock]]\n # fileNames = compile_to_hardware(chunk2, 'RB/RB', suffix='_{0}'.format(2*ct+2))\n\n # Issue #54:\n # FIXME: If the helper here raises an error, we get a QGL2 compiler error like:\n # error: ast eval failure [readSeqFile(seqFile)]: type <class 'ValueError'> Missing file of sequences\n # error: failed to evaluate assignment [pulseSeqStrs___ass_006 = readSeqFile(seqFile)]\n pulseSeqStrs = readSeqFile(seqFile)\n numSeqs = len(pulseSeqStrs)\n\n # Hack for limited APS waveform memory and break it up into multiple files\n # We've shuffled the sequences so that we loop through each gate length on the inner loop\n numRandomizations = 36\n fileNames = []\n for ct in range(numRandomizations):\n doCt = ct\n isOne = True\n while doCt < numSeqs:\n getPulseSeq(qubit, pulseSeqStrs[doCt])\n\n # Tack on calibration scalings\n if isOne:\n init(qubit)\n Id(qubit)\n MEAS(qubit)\n init(qubit)\n X(qubit)\n MEAS(qubit)\n else:\n init(qubit)\n Id(qubit)\n MEAS(qubit)\n init(qubit)\n X(qubit)\n MEAS(qubit)\n\n # Now write these sequences\n # FIXME: Then magically get the sequences here....\n # This needs to get refactored....\n # We need to split creating seqs from c_to_h\n# fileNames = compile_to_hardware([], 'RB/RB',\n# suffix='_{0}'.format(2*ct+1+1*(not\n# isOne)),\n# qgl2=True)\n\n doCt += numRandomizations\n isOne = not isOne", "def shrink_seq(mrnaseq, mrna_frag, mrna_frag_target, total_length=50):\n # Prepare sequences with no gaps\n mrnaseq_nogap = mrnaseq.replace(\"-\", \"\")\n mrna_frag_nogap = mrna_frag.replace(\"-\", \"\")\n if len(mrna_frag_nogap) < total_length:\n syserr(mrna_frag_nogap)\n syserr(mrnaseq)\n syserr(mrna_frag)\n syserr(mrna_frag_target)\n raise Exception(\n \"Check your sequences maybe you should extend, not shrink them\")\n span = re.search(mrna_frag_nogap, mrnaseq_nogap).span()\n\n # Decide which type of extension to do\n gap_pos_mean = mean(\n [i for i, x in enumerate(mrna_frag_target) if x == \"-\"])\n list_median = median([i for i in range(len(mrna_frag_target))])\n\n # this ratio gives us relative position of the gaps\n ratio = gap_pos_mean / list_median\n\n # Based on the ratio do the shrinkage of the sequence\n if ratio > 0.5 and ratio < 1.5: # extend both sides\n li = span[0]\n ui = span[1]\n length = ui - li\n if length < total_length:\n return -1\n elif length == total_length:\n return mrnaseq_nogap[li:ui]\n else:\n dif = abs(total_length - length)\n quot = dif // 2 # this is explicit integer division\n l_ext = li + quot\n u_ext = ui - (dif - quot)\n if (u_ext < 0) or (u_ext > len(mrnaseq_nogap) - 1):\n return \"NA\"\n else:\n return mrnaseq_nogap[l_ext:u_ext]\n elif ratio <= 0.5: # trim left - it means upstream (5'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = len(mrna_frag_nogap) - total_length\n return mrnaseq_nogap[li + dif:ui]\n elif ratio >= 1.5: # extend right - it means downstream (3'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = len(mrna_frag_nogap) - total_length\n return mrnaseq_nogap[li:ui - dif]", "def test_assign_seqs_two_fastas(self):\r\n\r\n # Handles two fasta files alone\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_no_errors,\r\n self.valid_fasta_file_no_errors]\r\n file_data['qual_files'] = []\r\n #file_data['mapping_file'] = self.valid_mapping_data_golay_upper\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = False\r\n barcode_type = \"golay_12\"\r\n max_bc_errors = 1.5\r\n start_index = 1\r\n write_unassigned_reads = False\r\n disable_bc_correction = False\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = '>s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n>s1_4 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_5 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_6 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n'\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 2, 'AACTCGTCGATG,s1': 2,\r\n 'AGCAGCACTTGT,s2': 2}\r\n expected_bc_freqs = {'AACTCGTCGATG': 2, 'AGCAGCACTTGT': 2,\r\n 'ACCGCAGAGTCA': 2}\r\n expected_seq_counts = 6\r\n expected_corrected_bc_count = [0, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def change_biopython_record_sequence(record, new_seq):\n new_record = deepcopy(record)\n\n if has_dna_alphabet:\n seq = Seq(new_seq, alphabet=DNAAlphabet())\n else:\n seq = Seq(new_seq)\n\n new_record.seq = seq\n return new_record", "def batmod_ac(d, _dt, _soc0, _soc, _Pr, _Pbs0, _Pbs, _Pbat):\r\n # Loading of particular variables\r\n _E_BAT = d[0]\r\n _eta_BAT = d[1]\r\n _t_CONSTANT = d[2]\r\n _P_SYS_SOC0_DC = d[3]\r\n _P_SYS_SOC0_AC = d[4]\r\n _P_SYS_SOC1_DC = d[5]\r\n _P_SYS_SOC1_AC = d[6]\r\n _AC2BAT_a_in = d[7]\r\n _AC2BAT_b_in = d[8]\r\n _AC2BAT_c_in = d[9]\r\n _BAT2AC_a_out = d[10]\r\n _BAT2AC_b_out = d[11]\r\n _BAT2AC_c_out = d[12]\r\n _P_AC2BAT_DEV = d[13]\r\n _P_BAT2AC_DEV = d[14]\r\n _P_BAT2AC_out = d[15]\r\n _P_AC2BAT_in = d[16]\r\n _t_DEAD = int(round(d[17]))\r\n _SOC_h = d[18]\r\n\r\n _P_AC2BAT_min = _AC2BAT_c_in\r\n _P_BAT2AC_min = _BAT2AC_c_out\r\n\r\n # Correction factor to avoid over charge and discharge the battery\r\n corr = 0.1\r\n\r\n # Initialization of particular variables\r\n\r\n _tde = _t_CONSTANT > 0 # Binary variable to activate the first-order time delay element\r\n # Factor of the first-order time delay element\r\n _ftde = 1 - np.exp(-_dt / _t_CONSTANT)\r\n\r\n # First time step with regard to the dead time of the system control\r\n _tstart = np.maximum(2, 1 + _t_DEAD)\r\n _tend = int(_Pr.size)\r\n _th = 0\r\n\r\n # Capacity of the battery, conversion from kWh to Wh\r\n _E_BAT *= 1000\r\n\r\n # Effiency of the battery in percent\r\n _eta_BAT /= 100\r\n\r\n # Check if the dead or settling time can be ignored and set flags accordingly\r\n if _dt >= (3 * _t_CONSTANT) or _tend == 1:\r\n _tstart = 1\r\n T_DEAD = False\r\n else:\r\n T_DEAD = True\r\n\r\n if _dt >= _t_DEAD + 3 * _t_CONSTANT:\r\n SETTLING = False\r\n else:\r\n SETTLING = True\r\n\r\n for t in range(_tstart - 1, _tend):\r\n\r\n # Energy content of the battery in the previous time step\r\n E_b0 = _soc0 * _E_BAT\r\n\r\n # Calculate the AC power of the battery system from the residual power\r\n # with regard to the dead time of the system control\r\n if T_DEAD:\r\n P_bs = _Pr[t - _t_DEAD]\r\n else:\r\n P_bs = _Pr[t]\r\n\r\n # Check if the battery holds enough unused capacity for charging or discharging\r\n # Estimated amount of energy in Wh that is supplied to or discharged from the storage unit.\r\n E_bs_est = P_bs * _dt / 3600\r\n\r\n # Reduce P_bs to avoid over charging of the battery\r\n if E_bs_est > 0 and E_bs_est > (_E_BAT - E_b0):\r\n P_bs = (_E_BAT - E_b0) * 3600 / _dt\r\n # When discharging take the correction factor into account\r\n elif E_bs_est < 0 and np.abs(E_bs_est) > (E_b0):\r\n P_bs = -((E_b0 * 3600 / _dt) * (1-corr))\r\n\r\n # Adjust the AC power of the battery system due to the stationary\r\n # deviations taking the minimum charging and discharging power into\r\n # account\r\n if P_bs > _P_AC2BAT_min:\r\n P_bs = np.maximum(_P_AC2BAT_min, P_bs + _P_AC2BAT_DEV)\r\n\r\n elif P_bs < -_P_BAT2AC_min:\r\n P_bs = np.minimum(-_P_BAT2AC_min, P_bs - _P_BAT2AC_DEV)\r\n\r\n else:\r\n P_bs = 0\r\n\r\n # Limit the AC power of the battery system to the rated power of the\r\n # battery converter\r\n P_bs = np.maximum(-_P_BAT2AC_out * 1000,\r\n np.minimum(_P_AC2BAT_in * 1000, P_bs))\r\n\r\n # Adjust the AC power of the battery system due to the settling time\r\n # (modeled by a first-order time delay element) Hier hat der Schritt vorher eine Null?\r\n # Muss der vorherige Wert mit übergeben werden?\r\n if SETTLING:\r\n if t > 0:\r\n P_bs = _tde * _Pbs[t-1] + _tde * (P_bs - _Pbs[t-1]) * _ftde + P_bs * (not _tde)\r\n else:\r\n P_bs = _tde * _Pbs0 + _tde * (P_bs - _Pbs0) * _ftde + P_bs * (not _tde)\r\n\r\n # Decision if the battery should be charged or discharged\r\n if P_bs > 0 and _soc0 < 1 - _th * (1 - _SOC_h):\r\n # The last term th*(1-SOC_h) avoids the alternation between\r\n # charging and standby mode due to the DC power consumption of the\r\n # battery converter when the battery is fully charged. The battery\r\n # will not be recharged until the SOC falls below the SOC-threshold\r\n # (SOC_h) for recharging from PV.\r\n\r\n # Normalized AC power of the battery system\r\n p_bs = P_bs / _P_AC2BAT_in / 1000\r\n\r\n # DC power of the battery affected by the AC2BAT conversion losses\r\n # of the battery converter\r\n P_bat = np.maximum(\r\n 0, P_bs - (_AC2BAT_a_in * p_bs * p_bs + _AC2BAT_b_in * p_bs + _AC2BAT_c_in))\r\n\r\n elif P_bs < 0 and _soc0 > 0:\r\n\r\n # Normalized AC power of the battery system\r\n p_bs = np.abs(P_bs / _P_BAT2AC_out / 1000)\r\n\r\n # DC power of the battery affected by the BAT2AC conversion losses\r\n # of the battery converter\r\n P_bat = P_bs - (_BAT2AC_a_out * p_bs * p_bs +\r\n _BAT2AC_b_out * p_bs + _BAT2AC_c_out)\r\n\r\n else: # Neither charging nor discharging of the battery\r\n\r\n # Set the DC power of the battery to zero\r\n P_bat = 0\r\n\r\n # Decision if the standby mode is active\r\n if P_bat == 0 and _soc0 <= 0: # Standby mode in discharged state\r\n\r\n # DC and AC power consumption of the battery converter\r\n P_bat = -np.maximum(0, _P_SYS_SOC0_DC)\r\n P_bs = _P_SYS_SOC0_AC\r\n\r\n elif P_bat == 0 and _soc0 > 0: # Standby mode in fully charged state\r\n\r\n # DC and AC power consumption of the battery converter\r\n P_bat = -np.maximum(0, _P_SYS_SOC1_DC)\r\n P_bs = _P_SYS_SOC1_AC\r\n\r\n # Transfer the realized AC power of the battery system and\r\n # the DC power of the battery\r\n _Pbs0 = P_bs\r\n _Pbs[t] = P_bs\r\n _Pbat[t] = P_bat\r\n\r\n # Change the energy content of the battery from Ws to Wh conversion\r\n if P_bat > 0:\r\n E_b = E_b0 + P_bat * np.sqrt(_eta_BAT) * _dt / 3600\r\n\r\n elif P_bat < 0:\r\n E_b = E_b0 + P_bat / np.sqrt(_eta_BAT) * _dt / 3600\r\n\r\n else:\r\n E_b = E_b0\r\n\r\n # Calculate the state of charge of the battery\r\n _soc0 = E_b / _E_BAT\r\n _soc[t] = _soc0\r\n\r\n # Adjust the hysteresis threshold to avoid alternation\r\n # between charging and standby mode due to the DC power\r\n # consumption of the battery converter.\r\n if _th and _soc[t] > _SOC_h or _soc[t] > 1:\r\n _th = True\r\n else:\r\n _th = False\r\n\r\n return _Pbat, _Pbs, _soc, _soc0, _Pbs0", "async def _pairing_char_write(\n client: AIOHomeKitBleakClient,\n handle: BleakGATTCharacteristic,\n iid: int,\n request: list[tuple[TLV, bytes]],\n) -> dict[int, bytes]:\n buffer = bytearray()\n next_write = TLV.encode_list(request)\n\n for _ in range(MAX_REASSEMBLY):\n data = await char_write(client, None, None, handle, iid, next_write)\n decoded = dict(TLV.decode_bytearray(bytearray(data)))\n if TLV.kTLVType_FragmentLast in decoded:\n logger.debug(\"%s: Reassembling final fragment\", client.address)\n buffer.extend(decoded[TLV.kTLVType_FragmentLast])\n return dict(TLV.decode_bytes(buffer))\n elif TLV.kTLVType_FragmentData in decoded:\n logger.debug(\"%s: Reassembling fragment\", client.address)\n # There is more data, acknowledge the fragment\n # and keep reading\n buffer.extend(decoded[TLV.kTLVType_FragmentData])\n # Acknowledge the fragment\n # We must construct this manually since TLV.encode_bytes\n # current does not know how to encode a 0 length\n next_write = bytes([TLV.kTLVType_FragmentData, 0])\n else:\n return decoded\n\n raise ValueError(f\"Reassembly failed - too many fragments (max: {MAX_REASSEMBLY})\")", "def extend_taa_seq(self, taa_pos_base, old_seq, new_seq):\n taa_pos = None\n termlen = -1 # use -1 to detect abnormal computes\n seq_end = self.cds_end\n new_aa_seq = ''\n i = 0\n while True:\n ci = i*3\n old_codon_seq = old_seq[ci:ci+3]\n new_codon_seq = new_seq[ci:ci+3]\n # if sequence comes to ends, extend sequence from reference file\n if (old_codon_seq not in standard_codon_table or\n new_codon_seq not in standard_codon_table):\n seq_inc = faidx.refgenome.fetch_sequence(self.chrm, seq_end+1, seq_end+100)\n old_seq += seq_inc\n new_seq += seq_inc\n old_codon_seq = old_seq[ci:ci+3]\n new_codon_seq = new_seq[ci:ci+3]\n seq_end += 100\n\n taa_ref_run = codon2aa(old_codon_seq)\n taa_alt_run = codon2aa(new_codon_seq)\n new_aa_seq += taa_alt_run\n # print i, old_codon_seq, new_codon_seq, taa_ref_run, taa_alt_run\n if taa_pos == None and taa_ref_run != taa_alt_run:\n taa_pos = i\n taa_ref = taa_ref_run\n taa_alt = taa_alt_run\n if taa_alt_run == '*':\n if taa_pos == None:\n # stop codon encountered before difference\n return None # nothing occur to protein level\n termlen = i + 1 - taa_pos\n break\n i += 1\n\n new_aa_seq = new_aa_seq[taa_pos:]\n if taa_pos == None:\n print('oldseq', old_seq)\n print('newseq', new_seq)\n taa_pos += taa_pos_base\n\n aae = AAExtension()\n aae.taa_pos = taa_pos\n aae.taa_ref = taa_ref\n aae.taa_alt = taa_alt\n aae.termlen = termlen\n aae.new_aa_seq = new_aa_seq\n\n return aae", "def test_write_seqs_to_fasta(self):\r\n fd, output_fp = mkstemp(\r\n prefix=\"qiime_util_write_seqs_to_fasta_test\",\r\n suffix='.fasta')\r\n close(fd)\r\n self.files_to_remove.append(output_fp)\r\n seqs = [('s1', 'ACCGGTTGG'), ('s2', 'CCTTGG'),\r\n ('S4 some comment string', 'A')]\r\n exp = \">s1\\nACCGGTTGG\\n>s2\\nCCTTGG\\n>S4 some comment string\\nA\\n\"\r\n # works in write mode\r\n write_seqs_to_fasta(output_fp, seqs, 'w')\r\n self.assertEqual(open(output_fp).read(), exp)\r\n # calling again in write mode overwrites original file\r\n write_seqs_to_fasta(output_fp, seqs, 'w')\r\n self.assertEqual(open(output_fp).read(), exp)\r\n # works in append mode\r\n exp2 = exp + exp\r\n write_seqs_to_fasta(output_fp, seqs, 'a')\r\n self.assertEqual(open(output_fp).read(), exp2)" ]
[ "0.5245027", "0.5227622", "0.5217706", "0.5205439", "0.5075445", "0.5072723", "0.50680596", "0.50551826", "0.50075334", "0.49982312", "0.49953303", "0.49861366", "0.49375024", "0.4926492", "0.48596647", "0.48129907", "0.48101753", "0.4808212", "0.47136652", "0.46780968", "0.462251", "0.46223268", "0.46186623", "0.4612542", "0.46038774", "0.45797276", "0.45777217", "0.45669654", "0.45633814", "0.45581514" ]
0.6742125
0
Given a mother cell and the epitope present in the system, the function produces between zero and two daughter cells, according to the division success and returns them as Bcell objects in a single list.
def divide(mother, AgEpitope, tnow, mut_list, RNs): dlist = [] # get new sequences, additional mutation counts and block status # for the daughters; mutations may happen during division ONLY if # the cell's family has been in the GC for long enough to have enough AID if ((tnow - mother.AIDstart) >= cf.tAID): # mutations can happen seq1, mutcount1, block1 = mutate_seq(mother.sequence[:], mother.block, RNs) seq2, mutcount2, block2 = mutate_seq(mother.sequence[:], mother.block, RNs) else: # mutational programme is not switched on yet (daughter=mother) seq1, mutcount1, block1 = mother.sequence[:], 0, mother.block seq2, mutcount2, block2 = mother.sequence[:], 0, mother.block num_muts = 0 num_ben = 0 # make new Bcell objects if sequences are okay if seq1 is not None: # if cell is blocked, affinity <= affinity0 if not block1: Emax = E_best(seq1, AgEpitope) else: Emax = min(E_best(mother.sequence0, AgEpitope), E_best(seq1, AgEpitope)) daughter1 = Bcell(sequence=seq1, sequence0=mother.sequence0[:], affinity=Emax, affinity0=mother.affinity0, origin=mother.origin, mutations=mother.mutations + mutcount1, family=mother.family, birthtime=mother.birthtime, GCentrytime=tnow, AIDstart=mother.AIDstart, block=block1) dlist.append(daughter1) # mutation counting num_muts += mutcount1 if Emax > mother.affinity: num_ben += 1 if seq2 is not None: # if cell is blocked, affinity <= affinity0 if not block2: Emax = E_best(seq2, AgEpitope) else: Emax = min(E_best(mother.sequence0, AgEpitope), E_best(seq2, AgEpitope)) daughter2 = Bcell(sequence=seq2, sequence0=mother.sequence0[:], affinity=Emax, affinity0=mother.affinity0, origin=mother.origin, mutations=mother.mutations + mutcount2, family=mother.family, birthtime=mother.birthtime, GCentrytime=tnow, AIDstart=mother.AIDstart, block=block2) dlist.append(daughter2) # mutation counting num_muts += mutcount2 if Emax > mother.affinity: num_ben += 1 mut_list.append((tnow, mother.family, num_muts, num_ben)) del mother return dlist, mut_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cell_division(waitlist, celllist, AgEpitope, tnow, mut_list, RNs):\n for cell in celllist:\n # get list of 0 to 2 daughters\n dlist, mut_list = divide(cell, AgEpitope, tnow, mut_list, RNs)\n # add daughters to waitlist\n waitlist = waitlist + dlist\n return waitlist, mut_list", "def test_split_cell_splits_neighbours(mock_amg):\n\n # split the centre cell in the mock grid\n # this will create 4 more cells at tier 1\n mock_amg.cells[4].split()\n\n # now split the bottom right of these cells\n # this should force the east and south cells to also be split\n mock_amg.cells[4].children['br'].split()\n\n assert mock_amg.cells[5].has_children\n assert mock_amg.cells[1].has_children", "def __ret_cell(self, cell):\n # try:\n # assert cell < self.length\n # except AssertionError:\n # print(\"ERROR: Cell value '%d' is larger than total size '%d'.\" % (cell, self.length-1))\n # sys.exit()\n # Next value is every self.row * N. Work backwards with int division.\n # a = cell // self.row % self.row\n # a = cell // self.row\n a = cell // self.col % self.row\n # Values are consequtive and wrap at self.col.\n b = cell % self.col\n return [a, b]", "def test_breed_certain_probability_all_in_cell():\n cell = topo.Jungle()\n for _ in range(100):\n cell.add_animal(animals.Herbivores(age=10, weight=100))\n cell.add_animal(animals.Carnivores(age=10, weight=100))\n cell.breed_all_animals_in_cell()\n assert len(cell.herbivore_list) == 200\n assert len(cell.carnivore_list) == 200", "def process_cell(self, neighbourhood: List[Cell], old_cell: Cell) -> Cell:", "def get_adjcells(self,cell):\n adj_cells = []\n cells_xy = []\n if cell.x > 0:\n adj_cells.append(self.cell_array.item((cell.x-1,cell.y)))\n if cell.x < self.grid_size - 1:\n adj_cells.append(self.cell_array.item((cell.x+1,cell.y)))\n if cell.y > 0:\n adj_cells.append(self.cell_array.item((cell.x,cell.y-1)))\n if cell.y < self.grid_size - 1:\n adj_cells.append(self.cell_array.item((cell.x,cell.y+1)))\n return adj_cells", "def fifteen():\r\n\r\n currentcell = 1.0\r\n cellpaths = 2.0\r\n \r\n while currentcell < 20.0:\r\n currentcell += 1.0\r\n cellpaths = cellpaths * (4.0 - 2.0/currentcell)\r\n \r\n return cellpaths", "def connect_cells(dfte,vari):\n # Create the variabel cell for mother, grand mother and grand grand mother\n if 'g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'parent_cell') #Create genealogy\n if 'g_g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'g_parent_cell')\n if 'g_g_g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'g_g_parent_cell')\n #give unique index to all cells\n dfte['uid'] = dfte['cell']+dfte['time_sec'].apply(lambda x: str(x))\n vac=[];sc=[];uid = []\n # Create a vecotor for the variable of interest of cell,mother,grand mother and grand grand mother and an unique identifier of it\n for c,idx in enumerate(dfte['cell'].unique()):\n dau = dfte.loc[dfte['cell']==idx]\n pc = dau['parent_cell'].iloc[0]\n mum = dfte.loc[dfte['cell']==pc]\n gpc = dau['g_parent_cell'].iloc[0]\n gmum = dfte.loc[dfte['cell']==gpc]\n ggpc = dau['g_g_parent_cell'].iloc[0]\n ggmum = dfte.loc[dfte['cell']==ggpc]\n gggpc = dau['g_g_g_parent_cell'].iloc[0]\n gggmum = dfte.loc[dfte['cell']==gggpc]\n fte = lambda x: x[['{}'.format(vari),'uid']].values\n tmp = np.vstack([fte(gggmum),fte(ggmum),fte(gmum),fte(mum),fte(dau)])\n vac.append(tmp[:,0])\n uid.append(tmp[:,1])\n sc.append(['super_cell_{}'.format(c)]*len(tmp))\n return pd.DataFrame({'super_cell':np.hstack(sc),'uid':np.hstack(uid),'{}'.format(vari):np.hstack(vac)})", "def _compute_hydrogen_bonds(self, entity):\n\n for (aa1, aa2) in combinations(entity, 2):\n\n # do not consider this pair if the number of atoms of the\n # residues is not sufficient\n if not (validate(aa1) and validate(aa2)):\n continue\n\n # stores both potentials between aa1 and aa2\n potentials = []\n\n segid1 = get_pos(aa1)\n segid2 = get_pos(aa2)\n\n # distance\n dist = np.abs(segid1 - segid2)\n\n # take care of the minimal sequence distance criterion\n # between aa1 and aa2\n if dist < self.min_seq_distance:\n continue\n\n # extract atoms from both amino acids\n atoms = [aa1.get_unpacked_list(),\n aa2.get_unpacked_list()]\n\n for i in range(0, len(atoms)):\n c_carboxyl = np.array(atoms[i][2].get_coord())\n o_carboxyl = np.array(atoms[i][3].get_coord())\n\n nitrogen = np.array(atoms[1-i][0].get_coord())\n hydrogen = None\n for atom in atoms[1-i]:\n if atom.get_name().strip() == 'H':\n hydrogen = np.array(atom.get_coord())\n\n if hydrogen is None:\n potentials.append(0)\n continue\n\n # compute relevant distances\n r_ON = np.linalg.norm(o_carboxyl - nitrogen)\n r_CH = np.linalg.norm(c_carboxyl - hydrogen)\n r_OH = np.linalg.norm(o_carboxyl - hydrogen)\n r_CN = np.linalg.norm(c_carboxyl - nitrogen)\n\n # compute potential\n pot = potential(r_ON, r_CH, r_OH, r_CN)\n\n potentials.append(pot if pot < co.HBOND_THRESHOLD else 0)\n\n # we return this as an result if at least one potential\n # is below the threshold , so they are not both 0\n if sum(potentials) != 0:\n yield (aa1, aa2, potentials[0], potentials[1])", "def test_get_leaf_cells(mock_amg):\n\n # for no splitting, this should be the same as the cell list\n assert mock_amg.get_all_leaf_cells() == mock_amg.cells\n\n # split the middle cell\n mock_amg.cells[4].split()\n\n # expected output\n exp_list = []\n exp_list.extend(mock_amg.cells[0:4])\n exp_list.extend(mock_amg.cells[-4:])\n exp_list.extend(mock_amg.cells[5:-4])\n\n assert exp_list == mock_amg.get_all_leaf_cells()", "def get_pegs(self):\n empty_pegs = []\n red_pegs = []\n black_pegs = []\n for cell_row in self.board:\n for current_cell in cell_row:\n if current_cell is not None:\n if current_cell.get_cell_state() == 0:\n empty_pegs.append(current_cell)\n elif current_cell.get_cell_state() == 1:\n red_pegs.append(current_cell)\n elif current_cell.get_cell_state() == 2:\n black_pegs.append(current_cell)\n return empty_pegs, red_pegs, black_pegs", "def BoosterFlux(E,mbparam):\n flux_data = np.array(LoadBoosterFlux(mbparam))\n E_lo = flux_data[:,0]*mbparam.GeV\n E_hi = flux_data[:,1]*mbparam.GeV\n \n nu_mu = flux_data[:,2]/(50.0*mbparam.MeV) # conv. scale to eV\n nu_mub = flux_data[:,3]/(50.0*mbparam.MeV) # conv. scale to eV\n \n nu_e = flux_data[:,4]/(50.0*mbparam.MeV) # conv. scale to eV\n nu_eb = flux_data[:,5]/(50.0*mbparam.MeV) # conv. scale to eV\n \n for i,EE in enumerate(E_lo):\n if E >= E_lo[i] and E < E_hi[i]:\n return [nu_e[i],nu_eb[i],nu_mu[i],nu_mub[i]]\n else :\n pass\n \n return [0.0,0.0,0.0,0.0]", "def test_feeding_carnivores_in_a_cell():\n animals.Carnivores.set_parameters({\"DeltaPhiMax\": 0.1})\n jungle_cell = topo.Jungle()\n [jungle_cell.add_animal(animals.Herbivores()) for _ in range(10)]\n [jungle_cell.add_animal(animals.Carnivores(weight=80)) for _ in range(10)]\n assert jungle_cell.biomass_carnivores() == 800\n pre_feeding_herbi_biomass = jungle_cell.biomass_herbivores()\n jungle_cell.feed_carnivores_in_cell()\n assert pre_feeding_herbi_biomass > jungle_cell.biomass_herbivores()", "def _get_supercells(self, struct1, struct2, fu, s1_supercell):\n def av_lat(l1, l2):\n params = (np.array(l1.lengths_and_angles) +\n np.array(l2.lengths_and_angles)) / 2\n return Lattice.from_lengths_and_angles(*params)\n\n def sc_generator(s1, s2):\n s2_fc = np.array(s2.frac_coords)\n if fu == 1:\n cc = np.array(s1.cart_coords)\n for l, sc_m in self._get_lattices(s2.lattice, s1, fu):\n fc = l.get_fractional_coords(cc)\n fc -= np.floor(fc)\n yield fc, s2_fc, av_lat(l, s2.lattice), sc_m\n else:\n fc_init = np.array(s1.frac_coords)\n for l, sc_m in self._get_lattices(s2.lattice, s1, fu):\n fc = np.dot(fc_init, np.linalg.inv(sc_m))\n lp = lattice_points_in_supercell(sc_m)\n fc = (fc[:, None, :] + lp[None, :, :]).reshape((-1, 3))\n fc -= np.floor(fc)\n yield fc, s2_fc, av_lat(l, s2.lattice), sc_m\n if s1_supercell:\n for x in sc_generator(struct1, struct2):\n yield x\n else:\n for x in sc_generator(struct2, struct1):\n # reorder generator output so s1 is still first\n yield x[1], x[0], x[2], x[3]", "def test_feeding_herbivores_in_a_cell():\n jungle_cell = topo.Jungle()\n animals.Herbivores.parameters[\"sigma_birth\"] = 1.5\n [jungle_cell.add_animal(animals.Herbivores()) for _ in range(81)]\n herbivore_fitness_sort = sorted(jungle_cell.herbivore_list,\n key=lambda herbi: herbi.fitness)\n assert herbivore_fitness_sort[0].fitness < \\\n herbivore_fitness_sort[30].fitness\n least_fittest_herb = herbivore_fitness_sort[0]\n second_least_fittest_herb = herbivore_fitness_sort[1]\n least_fittest_weight = least_fittest_herb.weight\n second_least_fittest_weight = second_least_fittest_herb.weight\n jungle_cell.feed_herbivores_in_cell()\n assert least_fittest_weight == least_fittest_herb.weight\n assert second_least_fittest_weight != second_least_fittest_herb.weight\n # Test that the available fodder now are = 0, and that the increase_fodder\n # works, so the food level again = f_max\n assert jungle_cell.fodder == 0\n jungle_cell.increase_fodder()\n assert jungle_cell.fodder == 800", "def get_cell(twofold, verbose=False):\n\n\n def check90(angle):\n return abs(angle - (np.pi / 2)) < twofold['toll']\n\n def eqv(a, b, tola=0.2):\n return abs(a - b) < tola\n\n def colinear(v1, v2):\n return all(np.cross(v1, v2) == 0)\n\n\n tr = np.identity(3, dtype=int)\n d_base = twofold['dbase']\n ntwo = len(twofold['uvw'])\n\n if ntwo == 9: # Case (9) !Cubic n-2foldaxes=9\n mv = mt.mod(twofold['dv'])\n for i, j, k in combinations(range(ntwo), 3):\n vi = twofold['dv'][i]\n vj = twofold['dv'][j]\n vk = twofold['dv'][k]\n aij = mt.angle_between_vectors(vi, vj)\n aik = mt.angle_between_vectors(vi, vk)\n ajk = mt.angle_between_vectors(vj, vk)\n if check90(aij) and check90(aik) and check90(ajk):\n eij = eqv(mv[i], mv[j])\n eik = eqv(mv[i], mv[k])\n ejk = eqv(mv[j], mv[k])\n if eij and eik and ejk:\n v1 = vi\n v2 = vj\n v3 = vk\n tr = np.array([twofold['uvw'][i] for i in [i, j, k]])\n break\n\n # check rightness\n namina = nl.det(tr)\n if namina < 0:\n tr[3] *= -1\n v3 *= -1\n namina *= -1\n\n if namina == 0:\n print(\"Pseudo-cubic but tolerance too small ... \")\n ok = False\n return\n if namina == 1:\n print(\"Cubic, Primitive cell\")\n if namina == 2:\n if not(mt.coprime(np.dot([0, 1, 1], tr))):\n print(\"Cubic, A-centred cell\")\n elif not(mt.coprime(np.dot([1, 1, 1], tr))):\n print(\"Cubic, I-centred cell\")\n elif not(mt.coprime(np.dot([1, 1, 0], tr))):\n print(\"Cubic, C-centred cell\")\n elif not(mt.coprime(np.dot([1, 0, 1], tr))):\n print(\"Cubic, B-centred cell\")\n if namina >= 3:\n print(\"Cubic, F-centred cell\")\n\n if ntwo == 7: # Case (7) !Hexagonal n-2foldaxes=7\n hexap = False\n hexac = False\n\n mv = mt.mod(twofold['dv'])\n order = np.argsort(mv)\n twofold['sigma'] = twofold['sigma'][order]\n twofold['uvw'] = twofold['uvw'][order]\n twofold['hkl'] = twofold['hkl'][order]\n twofold['dv'] = twofold['dv'][order]\n twofold['rv'] = twofold['rv'][order]\n mv = mt.mod(twofold['dv'])\n\n # Search tha a-b plane\n for i, j in combinations(range(ntwo), 2):\n vi = twofold['dv'][i]\n vj = twofold['dv'][j]\n aij = mt.angle_between_vectors(vi, vj)\n if abs(aij - (2 * np.pi / 3.0)) < twofold['toll']:\n if (abs(mv[i] - mv[j]) < tola):\n v1 = vi\n v2 = vj\n v1_i, v2_i = i, j\n hexap = True\n break\n\n\n # then ! Search the c-axis, it should be also\n # a two-fold axis! because Op(6).Op(6).Op(6)=Op(2)\n if hexap:\n for i in range(ntwo):\n vi = twofold['dv'][i]\n aij = mt.angle_between_vectors(v1, vi)\n aik = mt.angle_between_vectors(v2, vi)\n if check90(aij) and check90(aik):\n v3 = vi\n v3_i = i\n hexac = True\n break\n else:\n ok = False\n if verbose:\n print('no axes 90 to hexagonal')\n if hexac:\n tr = np.array([twofold['uvw'][v1_i],\n twofold['uvw'][v2_i],\n twofold['uvw'][v3_i]])\n\n namina = int(nl.det(tr))\n if (namina < 0):\n tr[2] *= -1\n v3 *= -1\n namina *= -1\n if namina == 1:\n print(\"Hexagonal, Primitive cell\")\n elif namina > 1:\n print(\"Hexagonal, centred cell? possible mistake\")\n else:\n print('azzo', namina)\n else:\n ok = False\n print(\"The c-axis of a hexagonal cell was not found!\")\n return\n\n if ntwo == 5: # Case (5) !Tetragonal n-2foldaxes=5\n ab = []\n inp = np.zeros(5)\n mv = mt.mod(twofold['dv']) # modulus twofold vector\n\n for i, j in combinations(range(ntwo), 2):\n vi = twofold['dv'][i]\n vj = twofold['dv'][j]\n m = mt.angle_between_vectors(vi, vj)\n c45 = abs(m - (np.pi * 0.25)) < twofold['toll']\n c135 = abs(m - (np.pi * 0.75)) < twofold['toll']\n if c45 or c135:\n inp[i] = 1\n inp[j] = 1\n ab.append([i, j][np.argmin(mv[[i, j]])])\n ab = list(set(ab))\n if len(ab) < 2:\n ok = False\n if verbose:\n print(\"Basis vectors a-b not found!\")\n return\n # !Determination of the c-axis\n # (that making 90 degree with all the others)\n naminc = np.argmin(inp)\n # !The two axes forming a,b are those of indices ab(1) and ab(2)\n namina = ab[0]\n naminb = ab[1]\n\n tr[0] = twofold['uvw'][namina]\n tr[1] = twofold['uvw'][naminb]\n tr[2] = twofold['uvw'][naminc]\n v1 = twofold['dv'][namina]\n v2 = twofold['dv'][naminb]\n v3 = twofold['dv'][naminc]\n\n namina = nl.det(tr)\n if (namina < 0):\n tr[2] = -tr[2]\n v3 = -v3\n namina = -namina\n\n if namina == 1:\n print(\"Tetragonal, Primitive cell\")\n elif namina == 2:\n print(\"Tetragonal, I-centred cell\")\n else:\n print(\"Error in tetragonal cell\")\n ok = False\n\n if ntwo == 3: # Case (3) !Orthorhombic/Trigonal n-2foldaxes=3\n u = mt.mod(twofold['dv'])\n a_i, b_i, c_i = np.argsort(u)\n tr[0, :] = twofold['uvw'][a_i]\n tr[1, :] = twofold['uvw'][b_i]\n tr[2, :] = twofold['uvw'][c_i]\n\n v1 = twofold['dv'][a_i]\n v2 = twofold['dv'][b_i]\n v3 = twofold['dv'][c_i]\n\n ang = np.array([mt.angle_between_vectors(v3, v2),\n mt.angle_between_vectors(v1, v3),\n mt.angle_between_vectors(v1, v2)])\n\n # Check the system by verifying that the two-fold axes\n # form 90 (orthorhombic)\n if all([check90(i) for i in ang]): # if !orthorhombic\n namina = nl.det(tr)\n if namina < 0:\n tr[2, :] *= -1\n v3 *= -1\n namina *= -1\n\n if namina == 1:\n print(\"Orthorhombic, Primitive cell\")\n elif namina == 2:\n vecs = [[0, 1, 1], [1, 1, 1], [1, 1, 0], [1, 0, 1]]\n for rw_i, rw in enumerate(vecs):\n if not(mt.coprime(np.dot(rw, tr))):\n orthoType = rw_i\n break\n message = [\"Orthorhombic, A-centred cell\",\n \"Orthorhombic, I-centred cell\",\n \"Orthorhombic, C-centred cell\",\n \"Orthorhombic, B-centred cell\"]\n print(message[orthoType])\n if namina >= 3:\n print(\"Orthorhombic, F-centred cell\")\n\n else: # !Rhombohedral/Trigonal\n # In the Trigonal system the two-fold axes are in the plane\n # the three-fold axis, and valid a,b, vectors can be chosen\n # among any two two-fold axes forming an angle of 120 degrees\n # verify that 1 and 2 form 120 \n if any(abs(ang - (2 * np.pi / 3)) < twofold['toll']): # search 120\n res = np.argmin(abs(ang - (2 * np.pi / 3)))\n if res == 0:\n v1, v2, v3 = v2, v3, v1\n tr = tr[[1, 2, 0]]\n elif res == 1:\n v1, v2, v3 = v1, v3, v2\n tr = tr[[0, 2, 1]]\n\n elif any(abs(ang - (np.pi / 3)) < twofold['toll']): # search 60\n res = np.argmin(abs(ang - (np.pi / 3)))\n if res == 0:\n v1, v2, v3 = v2, -v3, v1\n tr = tr[[1, 2, 0]]\n elif res == 1:\n v1, v2, v3 = v1, -v3, v2\n tr = tr[[0, 2, 1]]\n else:\n v2 = -v2\n tr[1] = -tr[1]\n else:\n if verbose:\n print(\"Trigonal/Rhombohedral test failed! \\\n \\nSupply only one two-fold axis\")\n ok = False\n return\n\n ok = False\n for uvw in nestedLoop(range(-3, 4), range(-3, 4), range(0, 4)):\n if not(mt.coprime(uvw)):\n continue\n vec = np.dot(d_base, uvw)\n ang1 = mt.angle_between_vectors(vec, v1)\n ang2 = mt.angle_between_vectors(vec, v2)\n if check90(ang1) and check90(ang2):\n tr[2, :] = uvw\n ok = True\n break\n if ok:\n namina = np.round(nl.det(tr), 4)\n if namina < 0:\n tr[2, :] *= -1\n namina *= -1\n v3 = np.dot(d_base, tr[2])\n if namina == 1:\n print(\"Rhombohedral Primitive hexagonal cell\")\n\n elif namina == 3:\n rw = np.dot([2, 1, 1], tr)\n if not(mt.coprime(rw)):\n print(\"Rhombohedral, obverse setting cell\")\n else:\n print(\"Rhombohedral, reverse setting cell\")\n else:\n print(\"Trigonal/Rhombohedral test failed!\")\n print(\" Supply only one two-fold axis\")\n\n if ntwo == 1: # Case (3) !Monoclinic n-2foldaxes=1\n v2 = twofold['dv'][0]\n tr[1] = twofold['uvw'][0]\n row = []\n for rw in nestedLoop(range(-3, 4), range(-3, 4), range(0, 4)):\n if all(np.array(rw) == 0):\n continue\n if not(mt.coprime(rw)):\n continue\n vec = np.dot(d_base, rw)\n if check90(mt.angle_between_vectors(v2, vec)):\n if any([colinear(rw, i) for i in row]):\n continue\n else:\n row.append(np.array(rw))\n\n\n row_mod = np.array([mt.mod(np.dot(d_base, i)) for i in row])\n rms = np.argsort(row_mod)\n tr[0] = row[rms[0]]\n v1 = np.dot(d_base, tr[0, :])\n tr[2] = row[rms[1]]\n v3 = np.dot(d_base, tr[2, :])\n\n # Test rightness\n if nl.det(tr) < 0:\n tr[1, :] = -tr[1, :]\n v2 = -v2\n\n # Test if beta is lower than 90 in such a case invert c and b\n if mt.angle_between_vectors(v1, v3) < (np.pi / 2): # !angle beta < 90\n tr[0, :] = -tr[0, :]\n v1 = -v1\n tr[2, :] = -tr[2, :]\n v2 = -v2\n\n namina = nl.det(tr)\n if namina == 1:\n print(\"Monoclinic, Primitive cell\")\n if namina == 2:\n if not(mt.coprime(np.dot([1, 1, 0], tr))):\n print(\"Monoclinic, C-centred cell\")\n if not(mt.coprime(np.dot([0, 1, 1], tr))):\n print(\"Monoclinic, A-centred cell\")\n\n if ntwo not in [1, 3, 5, 7, 9]:\n print(\"Wrong number of two-fold axes! \", ntwo)\n ok = False\n return\n # Calculation of the new cell\n alpha = np.degrees(mt.angle_between_vectors(v2, v3))\n beta = np.degrees(mt.angle_between_vectors(v1, v3))\n gamma = np.degrees(mt.angle_between_vectors(v1, v2))\n a = mt.mod(v1)\n b = mt.mod(v2)\n c = mt.mod(v3)\n print(f'a={round(a,2)}, b={round(b,2)}, c={round(c,2)},',\n f'alpha={round(alpha,2)}, beta={round(beta,2)},',\n f'gamma={round(gamma,2)}')\n ok = True\n return [a, b, c, alpha, beta, gamma], tr", "def test_split_cell_east_sets_neighbours(mock_amg):\n\n mock_amg.cells[4].split() # middle cell\n mock_amg.cells[5].split() # east cell\n\n east = mock_amg.cells[5]\n west = mock_amg.cells[4]\n\n assert west.children['tr'].east == east.children['tl']\n assert west.children['br'].east == east.children['bl']\n assert east.children['tl'].west == west.children['tr']\n assert east.children['bl'].west == west.children['br']", "def split_bottleExpansion((nuW,nuEF,nuEB,TE), (n1,n2), pts): \n #Define grid to use\n xx = yy = dadi.Numerics.default_grid(pts)\n \n #phi for equilibrium ancestral population\n phi = dadi.PhiManip.phi_1D(xx)\n \n \n #The ancestral population splits into the West and East, and the East undergoes a second bottleneck followed by an exponential population size change.\n phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)\n \n #Function for the Eastern population describing an second bottleneck followed by exponential population growth until present\n nuE_func = lambda t: nuEB*numpy.exp(numpy.log(nuEF/nuEB) * t/TE)\n\n # function for growth in west\n nuW_func = lambda t: numpy.exp(numpy.log(nuW) * t/TE)\n\n # integrate the two populations\n phi = dadi.Integration.two_pops(phi,xx,TE, nu1=nuW_func, nu2=nuE_func)\n \n #Return frequency spectrum\n fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,yy))\n return fs", "def doCalculation(self, E1, E2, muL, muR, T, pot, C, TCalc, Density, E0, L):\n NEcut = len(E1) #we determine the number of single-particle states that we use\n VG=np.diag(pot)\n E= int(0.5*np.size(VG))\n V = VG[0:E] #since the potential of both barriers is symmetric and we only tunnel through one barrier. Therefore we only use one half of the potential.\n dx= L/(np.size(pot))\n\n #Following prints are for debugging purposes:\n #print(\"---------------------------------------------------------------------\")\n #print(\"---------------------------------------------------------------------\")\n #print(\"Hier beginnt die Ausgabe von Rates:\")\n #print(\"---------------------------------------------------------------------\")\n #print(\"V:\", V)\n #print(\"E1:\", E1)\n #print(\"E2:\", E2)\n #print(\"C:\", C)\n\n kB=0.08629 #Boltzmann constant in meV/K\n \n \n def fermi(E,mu,T):\n \"\"\"This fermi-function tells us with which likelyhood a state with an E is occupied on the lead.\n E(float): energy difference between the initial and the final state that the tunneling electron has to carry.\n mu(float): chemical potential of either drain(muR) or source(muL).\n T(float): temperature.\n \"\"\"\n if (E-mu)/T > 600:\n f=0\n\t\t\t\t\n else:\n f=1/(math.exp((E-mu)/(kB*T) )+1)\n return(f)\n \n\n\t#This function is called by the Gamma_ij-equations and includes the transmission-coefficient for each tunnelling-event\n #and the density of state function of the source and drain. \n def Gamma(Ea,Eb,V):\n \"\"\":math:`\\\\Gamma` includes the transmission coefficient and DOS: :math:`\\Gamma = | t |^2 * DOS`\n\n Ea(float): energy of initial state\n Eb(float): energy of final state\n V(np.array): barrier potential\n \"\"\"\n #print(Ea)\n #print(V)\n return (np.absolute(TCalc.calculate_transmission(Ea,V,dx))**2*Density.calculate_DensityofStates(np.absolute(Ea-Eb)))\n \n #These next four functions are used to calculate the transition rates.Each function for a different kind of transition:\n #We distinguish between transitions, in which the number of electrons on the dot changes from one to two(Gamma_12) and reverse(Gamma_21).\n #And between transitions in which the number of electrons on the dot change from zero to one(Gamma_01) and reverse(Gamma_10).\n\n def Gamma_12(Ea,Eb,mu,T):\n \"\"\"Calculates the rate of a transition from a one body state to a two body state.\n\n Ea(float): energy of initial state\n Eb(float): energy of final state\n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n summe=0\n j=0\n Cb=C[np.where(E2==Eb)[0][0]]\n while j< NEcut:\n summe=Cb[np.where(E1==Ea)[0][0]][j]+summe\n j=j+1\n return(Gamma(Ea,Eb,V)*(np.absolute(summe))**2*fermi((Eb-Ea),mu,T))\n\n\n def Gamma_01(Eb,mu,T):\n \"\"\"Calculates the transition rate from the vacuum state to a one-body state.\n\n Eb(float): energy of final state\n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n return(Gamma(E0,Eb,V)*fermi((Eb-E0),mu,T))\n\n def Gamma_21(Ea,Eb,mu,T):\n \"\"\"Calculates the rate of a transition from a two body state to a one body state.\n\n Ea(float): energy of initial state\n Eb(float): energy of final state\n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n summe=0\n nu=0\n Ca=C[np.where(E2==Ea)[0][0]]\n while nu < NEcut:\n summe=summe+Ca[np.where(E1==Eb)[0][0]][nu]\n nu=nu+1\n return(Gamma(Ea,Eb,V)*(np.absolute(summe))**2*(1-fermi((Ea-Eb),mu,T)))\n\n def Gamma_10(Ea,mu,T):\n \"\"\"Calculates the rate of a transition from a one body state to the vacuum state.\n\n Ea(float): energy of initial state \n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n return(Gamma(Ea,E0,V)*(1-fermi((Ea-E0),mu,T)))\n\n #creating the output matrices that later contain all the transition rates through either\n #the left or the right barrier\n Gamma_R=np.zeros((1+np.size(E1)+np.size(E2),1+np.size(E1)+np.size(E2)))\n Gamma_L=np.zeros((1+np.size(E1)+np.size(E2),1+np.size(E1)+np.size(E2)))\n\n #using a loop to fill the output matrices with transition rates.\n i_=0\n for i in E1:\n j_=0\n for j in E2:\n Gamma_L[i_+1][j_+1+np.size(E1)]=Gamma_12(i,j,muL,T)\n Gamma_L[j_+1+np.size(E1)][i_+1]=Gamma_21(j,i,muL,T)\n Gamma_R[i_+1][j_+1+np.size(E1)]=Gamma_12(i,j,muR,T)\n Gamma_R[j_+1+np.size(E1)][i_+1]=Gamma_21(j,i,muR,T)\n j_=j_+1\n Gamma_L[0][i_+1]=Gamma_10(i,muL,T)\n Gamma_R[0][i_+1]=Gamma_10(i,muR,T)\n Gamma_L[i_+1][0]=Gamma_01(i,muL,T)\n Gamma_R[i_+1][0]=Gamma_01(i,muR,T)\n i_=1+i_\n\n #print(\"Gamma_L und Gamma_R:\")\n #print(Gamma_L,Gamma_R)\n #print(\"-----------------------------------------------------------------------\")\n #print(\"---------------------------------------------------------------------\")\n return(Gamma_L,Gamma_R)", "def test_two_cell_repel():\n space = c6.Space()\n c6.Cell(space, [0, 0], 1)\n c6.Cell(space, [0, 1.9], 1)\n for i in range(2):\n space.step()", "def build_bonds(self):\n shape_prime = np.array([self.shape[0]-1,self.shape[1]-1,self.shape[2]-1])\n zeros = np.array([0,0,0])\n for i in range(self.shape[0]):\n for j in range(self.shape[1]):\n for k in range(self.shape[2]):\n for b,bond in enumerate(self.cell.bonds):\n newbond = copy.deepcopy(bond)\n newbond.cell1 += [i,j,k]\n newbond.cell2 += [i,j,k]\n #ToDo make a function to shorten those lines\n if np.prod(newbond.cell1 <= shape_prime) and np.prod(newbond.cell2<=shape_prime) and np.prod(zeros <=newbond.cell1) and np.prod(zeros <= newbond.cell2):\n newbond.coordinate1 = self.sites[newbond.cell1[0],newbond.cell1[1],newbond.cell1[2],newbond.site1].coordinate\n newbond.coordinate2 = self.sites[newbond.cell2[0],newbond.cell2[1],newbond.cell2[2],newbond.site2].coordinate\n self.bonds.append(newbond)", "def process_cell_cell_collision(i, membrane, vert_normals, cell, next_cell, cells, membrane_bounds, membrane_polys, next_momentum, sim_speed):\r\n for j, other in enumerate(cells):\r\n if i == j:\r\n continue\r\n\r\n b1 = membrane_bounds[i]\r\n b2 = membrane_bounds[j]\r\n # AABB test\r\n if b1[2] < b2[0] or b2[2] < b1[0] or b1[3] < b2[1] or b2[3] < b1[1]:\r\n intersection = None\r\n else:\r\n # Pressure from overlapping cells\r\n intersects = matplotlib.path.Path(\r\n other['membrane']).contains_points(membrane)\r\n next_momentum -= vert_normals[i] * 0.5 * np.expand_dims(np.minimum(\r\n (0.0 + 0.1) * intersects, 0.2 * next_cell['volume']), -1) * sim_speed\r\n\r\n # if b1[2] < b2[0] or b2[2] < b1[0] or b1[3] < b2[1] or b2[3] < b1[1]:\r\n # intersection = None\r\n # else:\r\n # intersection = membrane_polys[i].intersection(\r\n # membrane_polys[j])\r\n\r\n # # Pressure from overlapping cells\r\n # if intersection is not None and not intersection.is_empty:\r\n # intersects = matplotlib.path.Path(\r\n # other['membrane']).contains_points(membrane)\r\n # next_momentum -= vert_normals[i] * 0.5 * np.expand_dims(np.minimum(\r\n # (intersection.area + 0.1) * intersects, 0.2 * next_cell['volume']), -1) * sim_speed\r", "def update_cell_membranes(cells):\r\n membrane_polys = [Polygon(cell['membrane']).buffer(0) for cell in cells]\r\n membrane_bounds = [p.bounds for p in membrane_polys]\r\n\r\n # Get normal vectors for membrane vertices\r\n vert_normals = [geometry.get_vert_normals(\r\n geometry.get_edge_normals(cell['membrane'])) for cell in cells]\r\n\r\n all_membranes = np.concatenate([cell['membrane'] for cell in cells], axis=0)\r\n # [(cell idx, vert idx), ...]\r\n all_membrane_map = np.concatenate([\r\n np.stack([\r\n np.repeat([i], cell['membrane'].shape[0]),\r\n np.arange(cell['membrane'].shape[0],)\r\n ], axis=1)\r\n for i, cell in enumerate(cells)\r\n ], axis=0).astype(np.int32)\r\n\r\n # Find inter-cell membrane vertices that are close enough for adhesion/diffusion\r\n nearby_membranes, nearby_membrane_map = find_nearby_membranes(\r\n all_membranes, all_membrane_map, vert_normals)\r\n\r\n # Change membrane rest length according with the cell volume\r\n membrane_rdists = []\r\n for i, cell in enumerate(cells):\r\n # Get all the pairwise distances between membrane vertices\r\n membrane_dists = scipy.spatial.distance.squareform(\r\n scipy.spatial.distance.pdist(cell['membrane']))\r\n membrane_rdists_i = 1.0 / (membrane_dists + 1e-6)\r\n membrane_rdists_i[np.where(membrane_dists == 0)] = 0\r\n membrane_rdists.append(membrane_rdists_i)\r\n\r\n return membrane_bounds, membrane_polys, vert_normals, \\\r\n all_membranes, all_membrane_map, \\\r\n nearby_membranes, nearby_membrane_map, \\\r\n membrane_rdists", "def gen_CASTEP_supercell(CASTEP_cell,na,nb,nc):\n nruter=dict()\n nruter[\"na\"]=na\n nruter[\"nb\"]=nb\n nruter[\"nc\"]=nc\n nruter[\"lattvec\"]=np.array(CASTEP_cell[\"lattvec\"])\n nruter[\"lattvec\"][:,0]*=na\n nruter[\"lattvec\"][:,1]*=nb\n nruter[\"lattvec\"][:,2]*=nc\n nruter[\"elements\"]=copy.copy(CASTEP_cell[\"elements\"])\n nruter[\"numbers\"]=na*nb*nc*CASTEP_cell[\"numbers\"]\n nruter[\"positions\"]=np.empty((3,CASTEP_cell[\"positions\"].shape[1]*na*nb*nc))\n pos=0\n for pos,(k,j,i,iat) in enumerate(itertools.product(xrange(nc),\n xrange(nb),\n xrange(na),\n xrange(\n CASTEP_cell[\"positions\"].shape[1]))):\n nruter[\"positions\"][:,pos]=(CASTEP_cell[\"positions\"][:,iat]+[i,j,k])/[\n na,nb,nc]\n nruter[\"types\"]=[]\n for i in xrange(na*nb*nc):\n nruter[\"types\"].extend(CASTEP_cell[\"types\"])\n # print \"supercell\", nruter\n return nruter", "def bottlegrow_split_bottleExpansion((nu,T,nuW,nuEF,nuEB,TE), (n1,n2), pts): \n #Define grid to use\n xx = yy = dadi.Numerics.default_grid(pts)\n \n #phi for equilibrium ancestral population\n phi = dadi.PhiManip.phi_1D(xx)\n \n # bottlegrowth in ancient population\n nu_func = lambda t: numpy.exp(numpy.log(nu) * t/T)\n\n phi = Integration.one_pop(phi, xx, T, nu_func)\n\n\n #The ancestral population splits into the West and East, and the East undergoes a second bottleneck followed by an exponential population size change.\n phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)\n \n #Function for the Eastern population describing an second bottleneck followed by exponential population growth until present\n nuE_func = lambda t: nuEB*numpy.exp(numpy.log(nuEF/nuEB) * t/TE)\n\n # function for growth in west\n nuW_func = lambda t: numpy.exp(numpy.log(nuW) * t/TE)\n\n # integrate the two populations\n phi = dadi.Integration.two_pops(phi,xx,TE, nu1=nuW_func, nu2=nuE_func)\n \n #Return frequency spectrum\n fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,yy))\n return fs\n\n\n\n\n\n def bottlegrow_split_bottleExpansion_mig((nu,T,nuW,nuEF,nuEB,TE,m12,m21), (n1,n2), pts):\n \"\"\"\n Model with bottlegrowth, split, followed by second bottleneck and exp recovery in Eastern pop\n \n nu, or ancestral population size defaults to 1.\n \n nu= Ratio of contemporary to ancient population size\n T = Time in the past at which growth began\n nuW: The size of the western population after split\n nuEF: The final size for the eastern population\n nuEB: The size of the eastern population after the bottleneck\n TE: The time of the eastern-western split\n m12: Migration from pop 2 to pop 1 (2*Na*m12)\n m21: Migration from pop 1 to pop 2\n\n n1,n2: Size of fs to generate.\n pts: Number of points to use in grid for evaluation.\n \"\"\" \n #Define grid to use\n xx = yy = dadi.Numerics.default_grid(pts)\n \n #phi for equilibrium ancestral population\n phi = dadi.PhiManip.phi_1D(xx)\n \n # bottlegrowth in ancient population\n nu_func = lambda t: numpy.exp(numpy.log(nu) * t/T)\n\n phi = Integration.one_pop(phi, xx, T, nu_func)\n\n\n #The ancestral population splits into the West and East, and the East undergoes a second bottleneck followed by an exponential population size change.\n phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)\n \n #Function for the Eastern population describing an second bottleneck followed by exponential population growth until present\n nuE_func = lambda t: nuEB*numpy.exp(numpy.log(nuEF/nuEB) * t/TE)\n\n # function for growth in west\n nuW_func = lambda t: numpy.exp(numpy.log(nuW) * t/TE)\n\n # integrate the two populations\n phi = dadi.Integration.two_pops(phi,xx,TE, nu1=nuW_func, nu2=nuE_func,m12=m12, m21=m21)\n \n #Return frequency spectrum\n fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,yy))\n return fs", "def get_further_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) > 1 or abs(x-i)+abs(y-j) == 0: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def next_t(cell_list, current_burning, b_grid, current_fuel, f_grid, h_grid, \n i_threshold, w_direction, burnt_cells):\n for cell in cell_list: \n \n # for a cell that's not yet burning\n if b_grid[cell[0]][cell[1]] is False:\n burn = check_ignition(current_burning, current_fuel, h_grid, \n i_threshold, w_direction, cell[0], cell[1])\n if burn:\n burnt_cells.append(cell)\n b_grid[cell[0]][cell[1]] = True\n \n # for a cell that's already burning\n else: \n if f_grid[cell[0]][cell[1]] > 1:\n f_grid[cell[0]][cell[1]] -= 1\n else:\n f_grid[cell[0]][cell[1]] -= 1\n b_grid[cell[0]][cell[1]] = False", "def test_split_adds_known_neighbours(mock_amg):\n\n mock_amg.cells[4].split()\n # bl\n assert mock_amg.cells[-4].north is mock_amg.cells[-2]\n assert mock_amg.cells[-4].east is mock_amg.cells[-3]\n\n # br\n assert mock_amg.cells[-3].north is mock_amg.cells[-1]\n assert mock_amg.cells[-3].west is mock_amg.cells[-4]\n\n # tl\n assert mock_amg.cells[-2].south is mock_amg.cells[-4]\n assert mock_amg.cells[-2].east is mock_amg.cells[-1]\n\n # tr\n assert mock_amg.cells[-1].south is mock_amg.cells[-3]\n assert mock_amg.cells[-1].west is mock_amg.cells[-2]", "def bombs(self) -> List[Point]:\n\t\treturn self._bombs", "def bosonic_cells(self):\n cells = self.cells()\n fermionic_cells = self.fermionic_cells()\n coords = [x for x in cells if x not in fermionic_cells]\n return coords" ]
[ "0.7026222", "0.5630059", "0.5613996", "0.55163336", "0.54273593", "0.5390995", "0.53396046", "0.53241175", "0.5278331", "0.5259016", "0.5221316", "0.52150244", "0.52112615", "0.52056575", "0.51980996", "0.5194948", "0.51921505", "0.5188548", "0.5138519", "0.5136642", "0.5085086", "0.5083834", "0.50675434", "0.5060581", "0.50605386", "0.50569123", "0.50086504", "0.500254", "0.5000854", "0.49908102" ]
0.643389
1
Returns an array of Ag present in the system at every timestep using the initial dose and assuming exponential decay with the decay constant supplied. Maximum intial dose is 1 so that all values in this array range between 0 and 1. Values below 0.01 are set to 0. Several vaccination timepoints are handled by adding their effects and applying a ceiling afterwards.
def Ag_density(): # initialise no infection default for the number of infections required agcurves = [np.zeros(cf.endtime + 1) for inf in cf.tinf] # for every infection, calculate its individual effect per timepoint for i in range(len(cf.tinf)): pag = cf.dose[i] # peak tai = 0 # tnow after infection while pag > 0.01: pag = cf.dose[i] * math.exp(-float(tai) / cf.tdecay) agcurves[i][cf.tinf[i] + tai] = pag tai += 1 if cf.tinf[i] + tai >= cf.endtime: break # sum up all effects agcurve_uncapped = np.sum(agcurves, axis=0) # set all values above 100% to 100% agcurve = [np.min([val, 1]) for val in agcurve_uncapped] return agcurve
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):\n value_estimates = np.append(value_estimates, value_next)\n delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]\n advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)\n return advantage", "def main():\n prior = np.asarray([2.0, 4e-6, 1e-4])\n priorCov = np.diag(np.asarray([5.0, 1e-2, 1.0]))\n independentVariable = np.arange(0.0, 5.0e-5, 1e-7)\n \n truth = np.asarray([2.5, 1.2e-5, 4e-4])\n observation = (truth[0] * np.exp(-independentVariable/(truth[1])) + \n truth[2] + np.random.normal(0.0, 0.1, independentVariable.shape))\n observationError = (0.1 * np.sqrt(truth[0] * \n np.exp(-independentVariable/(truth[1]))) + math.sqrt(truth[2]))\n \n model = decay(prior=prior, priorCov=priorCov, \n otherModelParam=None, \n parameterNames=[\"a\", \"tau\", \"offset\"], \n verbose=2,\n observation=observation, \n observationError=observationError,\n independentVariable=independentVariable)\n \n oe = OE(model=model, maxiter=8)\n \n oe()\n \n# print(oe)\n# print(model)\n# \n# model.plot()\n \n return oe, model", "def compute_gae(rewards, values, gamma, tau):\n tensor_type = type(values)\n deltas = tensor_type(values.size(0), 1)\n advantages = tensor_type(values.size(0), 1)\n prev_value = 0\n prev_advantage = 0\n for i in reversed(range(rewards.size(0))):\n deltas[i] = rewards[i] + gamma * prev_value - values[i]\n advantages[i] = deltas[i] + gamma * tau * prev_advantage\n prev_value = values[i, 0]\n prev_advantage = advantages[i, 0]\n\n return advantages", "def test_call_function_ExponentialDecay():\n a = 0.4 # Decay constant\n u0 = 3.2 # Function value u(t) for some known time t\n der_u = -1.28 # Analytic value for the derivative of u at the known time t\n eps = 10**(-7)# Since we are dealing with floating point numbers,\n # we need a limit when checking that a difference is zero.\n decay_model = ExponentialDecay(a)\n assert(abs(decay_model(0, u0)-der_u) < eps)", "def get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):\n value_estimates = np.asarray(value_estimates.tolist() + [value_next])\n delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]\n advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)\n return advantage", "def gae(done, rewards, values, n_envs, steps_per_env, gamma, gae_lambda, device):\n advantages = torch.zeros((n_envs, steps_per_env, 1), dtype=torch.float, device=device)\n last_advantage = 0\n for state in reversed(range(steps_per_env)):\n error = rewards[:, state] + gamma * values[:, state + 1] * (~done[:, state]) - values[:, state]\n last_advantage = (error + gamma * gae_lambda * last_advantage * (~done[:, state]))\n\n advantages[:, state] = last_advantage\n\n return advantages", "def _EMA(vec, win):\n\n # assert win>0 and win<len(vec), \"the size of EMA window is not allowed\"\n alpha = 2/(1+win)\n if win == 1 or len(vec) == 1:\n k = 1\n else: # why we need this?? -- to save memory using approximately \n err = 0.000001\n k = np.ceil(np.log(err) / np.log(1-alpha))\n\n N = int(min(len(vec), k))\n ema_series = [0 for _ in range(N)]\n ema_series[0] = vec[0]\n for i in range(1,N):\n ema_series[i] = alpha*vec[i] + (1-alpha)*ema_series[i-1]\n return ema_series", "def monte_carlo(env, V, policy, episodes=5000, max_steps=100,\n alpha=0.1, gamma=0.99):\n nS = env.observation_space.n\n discounts = np.logspace(0, max_steps, num=max_steps,\n base=gamma, endpoint=False)\n for i in range(episodes):\n # alpha = max(alpha * np.exp(-0.01 * i), 0.0)\n episode = generate_episode(env, policy, max_steps)\n return_visited = np.zeros(nS, dtype=bool)\n for step, (state, action, reward) in enumerate(episode):\n if return_visited[state]:\n continue\n return_visited[state] = True\n seq_len = len(episode[step:])\n G = np.sum(discounts[:seq_len] * episode[step:, 2])\n V[state] = V[state] + alpha * (G - V[state])\n return V", "def get_EG(vals):\n return get_tau_gap(vals)/2", "def ema(df, key, lag):\n\n def calc_ema(index, df, emas, mult):\n return round(((df[index][key] - emas[-1]) * mult) + emas[-1], 2)\n\n avg = moving_avg(df, key, lag)[0]\n multiplier = 2.0 / (lag + 1)\n\n emas = [avg]\n [emas.append(calc_ema(i, df, emas, multiplier)) for i in range(lag + 1, len(df))]\n return emas", "def delayE(self):\n sinE = np.sin(self.E())\n return self.GAMMA", "def _getExponentialValues(self, arr):\r\n return [math.exp(val) for val in arr]", "def compute_gae(V, s, ss, r, absorbing, last, gamma, lam):\n v = V(s)\n v_next = V(ss)\n gen_adv = np.empty_like(v)\n for rev_k in range(len(v)):\n k = len(v) - rev_k - 1\n if last[k] or rev_k == 0:\n gen_adv[k] = r[k] - v[k]\n if not absorbing[k]:\n gen_adv[k] += gamma * v_next[k]\n else:\n gen_adv[k] = r[k] + gamma * v_next[k] - v[k] + gamma * lam * gen_adv[k + 1]\n return gen_adv + v, gen_adv", "def expdecay_despiker(self, exponent=None, tstep=None):\n # if exponent is None:\n # if not hasattr(self, 'expdecay_coef'):\n # self.find_expcoef()\n # exponent = self.expdecay_coef\n if tstep is None:\n tstep = np.diff(self.Time[:2])\n if not hasattr(self, 'despiked'):\n self.data['despiked'] = {}\n for a, vo in self.focus.items():\n v = vo.copy()\n if 'time' not in a.lower():\n lowlim = np.roll(v * np.exp(tstep * exponent), 1)\n over = np.roll(lowlim > v, -1)\n\n if sum(over) > 0:\n # get adjacent values to over - limit values\n # calculate replacement values\n neighbours = []\n fixend = False\n oover = over.copy()\n if oover[0]:\n neighbours.append([v[1], np.nan])\n oover[0] = False\n if oover[-1]:\n oover[-1] = False\n fixend = True\n neighbours.append(np.hstack([v[np.roll(oover, -1)][:, np.newaxis],\n v[np.roll(oover, 1)][:, np.newaxis]]))\n if fixend:\n neighbours.append([v[-2], np.nan])\n\n neighbours = np.vstack(neighbours)\n\n replacements = np.apply_along_axis(np.nanmean, 1, neighbours)\n # and subsitite them in\n v[over] = replacements\n self.data['despiked'][a] = v\n self.setfocus('despiked')\n return", "def eady_growth_rate(data):\n N2 = ixr.brunt_vaisala(data)\n f = 2.0*omega*xruf.sin(xruf.deg2rad(data.lat))\n\n dz = ixr.domain.calculate_dz(data)\n du = ixr.domain.diff_pfull(data.ucomp, data)\n\n N = xruf.sqrt(N2.where(N2 > 0))\n\n egr = 0.31*du/dz*f/N\n return np.abs(egr)", "def get_epsilon_decay_function(e_start, e_end, decay_duration):\n return lambda frame_idx: e_end + \\\n (e_start - e_end) * np.exp(-1. * frame_idx / decay_duration)", "def _generator(self):\n # Initial setup\n ac = self._env.action_space.sample() # not used, just so we have the datatype\n self.new = True # marks if we're on first timestep of an episode\n self.ob = self._convert_state(self._env.reset()) \n T = self._timesteps\n\n cur_ep_ret = 0 # return in current episode\n cur_ep_len = 0 # len of current episode\n ep_rets = [] # returns of completed episodes in this segment\n ep_lens = [] # lengths of ...\n\n # Initialize history arrays\n #obs = np.array([None for _ in range(T)])\n obs = nd.empty((T,) + self._env.observation_space.shape)\n rews = np.zeros(T, 'float32')\n vpreds = np.zeros(T, 'float32')\n news = np.zeros(T, 'int32')\n acs = np.array([ac for _ in range(T)])\n prevacs = acs.copy()\n\n t = 0\n while True:\n ob = self.ob # Use `self.` since `_evaluate` may have reset the env\n new = self.new\n prevac = ac\n ac, vpred = self._act(ob)\n # NOTE(openAI) Slight weirdness here because we need value function at time T\n # before returning segment [0, T-1] so we get the correct terminal value\n if t > 0 and t % T == 0:\n seg = {\"ob\": obs, \"rew\": rews, \"vpred\": vpreds, \"new\": news,\n \"ac\": acs, \"nextvpred\": vpred * (1 - new),\n \"ep_rets\": np.array(copy.deepcopy(ep_rets)),\n \"ep_lens\": np.array(copy.deepcopy(ep_lens))}\n self._add_vtarg_and_adv(seg, self._gamma, self._lambda)\n yield seg\n # NOTE: Do a deepcopy if the values formerly in these arrays are used later.\n ep_rets = []\n ep_lens = []\n i = t % T\n\n obs[i] = ob[0]\n vpreds[i] = vpred\n news[i] = new\n acs[i] = ac\n prevacs[i] = prevac\n\n ob, rew, new, _ = self._env.step(ac)\n ob = self._convert_state(ob)\n rews[i] = rew\n\n cur_ep_ret += rew\n cur_ep_len += 1\n if new:\n ep_rets.append(cur_ep_ret)\n ep_lens.append(cur_ep_len)\n cur_ep_ret = 0\n cur_ep_len = 0\n ob = self._convert_state(self._env.reset())\n self.new = new\n self.ob = ob\n t += 1", "def generate_returns(episode, gamma=0.9):\n len_episode = len(episode) # T = length of current episode\n epi_returns = np.zeros(len_episode)\n ############################\n # YOUR IMPLEMENTATION HERE #\n # HINT: Representing immediate reward as a vector and\n # using a vector of powers of gamma along with `np.dot` will\n # make this much easier to implement in a few lines of code.\n # You don't need to use this approach however and use whatever works for you. #\n\n\n episode_array = np.array(episode)\n master_reward_vector = episode_array[:, 2] # vector containing all the rewards from the episode, [r1, r2, ..., rT]\n master_gamma_vector = np.power(gamma, np.arange(1, len(master_reward_vector)+1)) # [gamma^0, gamma^1, gamma^2, ..., gamma^T]\n # print(\"master_reward_vector {0}\".format(master_reward_vector))\n # print(\"master_gamma_vector {0}\".format(master_gamma_vector))\n \n for i in range (0, len_episode):\n reward_vector = master_reward_vector[i:] # vector containing the last i rewards, [rk-i, ..., rk-1, rk]\n gamma_vector = master_gamma_vector[0:len_episode-i] # vector containing the first i powers of gamma, [gamma^0, gamma^1, ..., gamma^i]\n # print(\"reward_vector {0}, dim {1}\".format(reward_vector, reward_vector.shape))\n # print(\"gamma_vector {0}, dim {1}\".format(gamma_vector, gamma_vector.shape))\n epi_returns[i] = np.dot(gamma_vector, reward_vector) # [rk-i, ..., rk-1, rk] DOT [gamma^0, gamma^1, ..., gamma^i]\n # print(\"epi_returns[{0}] {1}\".format(i, epi_returns[i]))\n \n ############################\n # print(\"epi_returns{0}\".format(epi_returns))\n return epi_returns", "def ricker(dt, pt):\n nt = int(2 * pt / dt)\n c = np.zeros(nt)\n t0 = pt / dt\n a_ricker = 4 / pt\n\n for it in range(0, nt):\n t = ((it + 1) - t0) * dt\n c[it] = -2 * a_ricker * t * math.exp(-(a_ricker * t) ** 2)\n\n return c", "def decay(time_, max_time, coeff):\n threshold = max_time - time_\n if threshold < 0:\n threshold = 0\n return 1 + threshold * coeff / max_time", "def main_gamma_ray_loop(\n num_decays,\n model,\n plasma,\n time_steps=10,\n time_end=80.0,\n grey_opacity=-1,\n spectrum_bins=500,\n time_space=\"log\",\n photoabsorption_opacity=\"tardis\",\n pair_creation_opacity=\"tardis\",\n seed=1,\n path_to_decay_data=\"~/Downloads/tardisnuclear/decay_radiation.h5\",\n positronium_fraction=0.0,\n):\n # Note: not best numpy practice, but works better in numba than the alternatives\n np.random.seed(seed)\n\n # Enforce cgs\n outer_velocities = model.v_outer.to(\"cm/s\").value\n inner_velocities = model.v_inner.to(\"cm/s\").value\n ejecta_density = model.density.to(\"g/cm^3\").value\n ejecta_volume = model.volume.to(\"cm^3\").value\n ejecta_velocity_volume = (\n 4 * np.pi / 3 * (outer_velocities**3.0 - inner_velocities**3.0)\n )\n time_explosion = model.time_explosion.to(\"s\").value\n number_of_shells = model.no_of_shells\n raw_isotope_abundance = model.raw_isotope_abundance.sort_values(\n by=[\"atomic_number\", \"mass_number\"], ascending=False\n )\n\n shell_masses = ejecta_volume * ejecta_density\n\n time_start = time_explosion\n time_end *= u.d.to(u.s)\n\n assert (\n time_start < time_end\n ), \"Error, simulation start time greater than end time!\"\n\n if time_space == \"log\":\n times = np.zeros(time_steps + 1)\n\n # log time steps\n for i in range(time_steps + 1):\n times[i] = (\n np.log(time_start)\n + (np.log(time_end) - np.log(time_start)) / time_steps * i\n )\n times[i] = np.exp(times[i])\n else:\n times = np.linspace(time_start, time_end, time_steps + 1)\n\n dt_array = np.diff(times)\n effective_time_array = np.array(\n [np.sqrt(times[i] * times[i + 1]) for i in range(time_steps)]\n )\n\n # Use isotopic number density\n for atom_number in plasma.isotope_number_density.index.get_level_values(0):\n values = plasma.isotope_number_density.loc[atom_number].values\n if values.shape[1] > 1:\n plasma.number_density.loc[atom_number] = np.sum(values, axis=0)\n else:\n plasma.number_density.loc[atom_number] = values\n\n # Calculate electron number density\n electron_number_density = (\n plasma.number_density.mul(plasma.number_density.index, axis=0)\n ).sum()\n\n electron_number_density_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n mass_density_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n electron_number = (electron_number_density * ejecta_volume).to_numpy()\n\n inv_volume_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n # Pre-calculate quantities as they change with time\n for i, t in enumerate(effective_time_array):\n inv_volume_time[:, i] = (1.0 / ejecta_velocity_volume) / (t**3.0)\n mass_density_time[:, i] = shell_masses * inv_volume_time[:, i]\n electron_number_density_time[:, i] = (\n electron_number * inv_volume_time[:, i]\n )\n\n energy_df_rows = np.zeros((number_of_shells, time_steps))\n\n # Calculate number of packets per shell based on the mass of isotopes\n number_of_isotopes = plasma.isotope_number_density * ejecta_volume\n total_number_isotopes = number_of_isotopes.sum(axis=1)\n\n inventories = raw_isotope_abundance.to_inventories()\n all_isotope_names = get_all_isotopes(raw_isotope_abundance)\n all_isotope_names.sort()\n\n gamma_ray_lines = get_nuclear_lines_database(path_to_decay_data)\n\n taus = {}\n parents = {}\n gamma_ray_line_array_list = []\n average_energies_list = []\n average_positron_energies_list = []\n\n for i, isotope in enumerate(all_isotope_names):\n nuclide = rd.Nuclide(isotope)\n taus[isotope] = nuclide.half_life() / np.log(2)\n child = nuclide.progeny()\n if child is not None:\n for c in child:\n if rd.Nuclide(c).half_life(\"readable\") != \"stable\":\n parents[c] = isotope\n\n energy, intensity = setup_input_energy(\n gamma_ray_lines[\n gamma_ray_lines.Isotope == isotope.replace(\"-\", \"\")\n ],\n \"g\",\n )\n gamma_ray_line_array_list.append(np.stack([energy, intensity]))\n average_energies_list.append(np.sum(energy * intensity))\n positron_energy, positron_intensity = setup_input_energy(\n gamma_ray_lines[\n gamma_ray_lines.Isotope == isotope.replace(\"-\", \"\")\n ],\n \"bp\",\n )\n average_positron_energies_list.append(\n np.sum(positron_energy * positron_intensity)\n )\n\n # Construct Numba typed dicts\n gamma_ray_line_arrays = {}\n average_energies = {}\n average_positron_energies = {}\n\n for iso, lines in zip(all_isotope_names, gamma_ray_line_array_list):\n gamma_ray_line_arrays[iso] = lines\n\n for iso, energy, positron_energy in zip(\n all_isotope_names, average_energies_list, average_positron_energies_list\n ):\n average_energies[iso] = energy\n average_positron_energies[iso] = positron_energy\n\n # urilight chooses to have 0 as the baseline for this calculation\n # but time_start may also be valid in which case decay time is time_end - time_start\n total_energy_list = []\n\n for shell, inv in enumerate(inventories):\n decayed_energy = {}\n total_decays = inv.cumulative_decays(time_end)\n for nuclide in total_decays:\n if nuclide in parents and nuclide != \"Co-56\" and nuclide != \"Co-57\":\n parent = parents[nuclide]\n if parent in parents:\n parent = parents[parent]\n decayed_energy[parent] += (\n total_decays[nuclide]\n * average_energies[nuclide]\n * shell_masses[shell]\n )\n else:\n decayed_energy[nuclide] = (\n total_decays[nuclide]\n * average_energies[nuclide]\n * shell_masses[shell]\n )\n\n total_energy_list.append(decayed_energy)\n\n total_energy = pd.DataFrame(total_energy_list)\n\n total_energy_columns = total_energy.columns.to_list()\n\n total_energy = total_energy[\n sorted(\n total_energy_columns, key=get_nuclide_atomic_number, reverse=True\n )\n ]\n\n energy_per_mass = total_energy.divide(\n (raw_isotope_abundance * shell_masses).T.to_numpy(),\n axis=0,\n )\n\n # Time averaged energy per mass for constant packet count\n average_power_per_mass = energy_per_mass / (time_end - time_start)\n\n energy_per_mass_norm = energy_per_mass.divide(\n energy_per_mass.sum(axis=1), axis=0\n ) # .cumsum(axis=1)\n\n decayed_packet_count = num_decays * number_of_isotopes.divide(\n total_number_isotopes, axis=0\n )\n\n packets_per_isotope = (\n (energy_per_mass_norm * decayed_packet_count.T.values)\n .round()\n .fillna(0)\n .astype(int)\n )\n\n print(\"Total gamma-ray energy\")\n print(total_energy.sum().sum() * u.keV.to(\"erg\"))\n\n print(\"Total positron energy\")\n print(total_energy[\"Co-56\"].sum(axis=0) * 0.0337 * u.keV.to(\"erg\"))\n\n # Taking iron group to be elements 21-30\n # Used as part of the approximations for photoabsorption and pair creation\n # Dependent on atomic data\n iron_group_fraction_per_shell = model.abundance.loc[(21):(30)].sum(axis=0)\n\n number_of_packets = packets_per_isotope.sum().sum()\n print(\"Total packets:\", number_of_packets)\n\n packet_energy = total_energy.sum().sum() / number_of_packets\n\n print(\"Energy per packet\", packet_energy)\n\n # Need to update volume for positron deposition to be time-dependent\n print(\"Initializing packets\")\n (\n packets,\n energy_df_rows,\n energy_plot_df_rows,\n energy_plot_positron_rows,\n ) = initialize_packets(\n packets_per_isotope,\n packet_energy,\n gamma_ray_line_arrays,\n positronium_fraction,\n inner_velocities,\n outer_velocities,\n inv_volume_time,\n times,\n energy_df_rows,\n effective_time_array,\n taus,\n parents,\n average_positron_energies,\n inventories,\n average_power_per_mass,\n )\n\n print(\"Total positron energy from packets\")\n print((energy_df_rows).sum().sum() * u.eV.to(\"erg\"))\n\n total_cmf_energy = 0\n total_rf_energy = 0\n\n for p in packets:\n total_cmf_energy += p.energy_cmf\n total_rf_energy += p.energy_rf\n\n print(\"Total CMF energy\")\n print(total_cmf_energy)\n\n # Below is the Artis compensation for their method of packet rejection\n \"\"\"\n energy_ratio = total_energy.sum().sum() / total_cmf_energy\n\n print(\"Energy ratio\")\n print(energy_ratio)\n \n for p in packets:\n p.energy_cmf *= energy_ratio\n p.energy_rf *= energy_ratio\n\n for e in energy_df_rows:\n e *= energy_ratio\n \n for row in energy_plot_df_rows:\n row[1] *= energy_ratio\n \"\"\"\n print(\"Total RF energy\")\n print(total_rf_energy)\n\n energy_bins = np.logspace(2, 3.8, spectrum_bins)\n energy_out = np.zeros((len(energy_bins - 1), time_steps))\n\n # Process packets\n (\n energy_df_rows,\n energy_plot_df_rows,\n energy_out,\n deposition_estimator,\n ) = gamma_packet_loop(\n packets,\n grey_opacity,\n photoabsorption_opacity,\n pair_creation_opacity,\n electron_number_density_time,\n mass_density_time,\n inv_volume_time,\n iron_group_fraction_per_shell.to_numpy(),\n inner_velocities,\n outer_velocities,\n times,\n dt_array,\n effective_time_array,\n energy_bins,\n energy_df_rows,\n energy_plot_df_rows,\n energy_out,\n )\n\n # DataFrame of energy information\n energy_plot_df = pd.DataFrame(\n data=energy_plot_df_rows,\n columns=[\n \"packet_index\",\n \"energy_input\",\n \"energy_input_r\",\n \"energy_input_time\",\n \"energy_input_type\",\n \"compton_opacity\",\n \"photoabsorption_opacity\",\n \"total_opacity\",\n ],\n )\n\n # DataFrame of positron energies\n energy_plot_positrons = pd.DataFrame(\n data=energy_plot_positron_rows,\n columns=[\n \"packet_index\",\n \"energy_input\",\n \"energy_input_r\",\n \"energy_input_time\",\n ],\n )\n\n # DataFrame of estimated deposition\n # Multiply dataframes by inv_volume_time array\n # if per unit volume is needed\n energy_estimated_deposition = (\n pd.DataFrame(data=deposition_estimator, columns=times[:-1])\n ) / dt_array\n\n # Energy is eV/s\n energy_df = pd.DataFrame(data=energy_df_rows, columns=times[:-1]) / dt_array\n\n final_energy = 0\n for p in packets:\n final_energy += p.energy_rf\n\n print(\"Final energy to test for conservation\")\n print(final_energy)\n\n escape_energy = pd.DataFrame(\n data=energy_out, columns=times[:-1], index=energy_bins\n )\n\n return (\n energy_df,\n energy_plot_df,\n escape_energy,\n decayed_packet_count,\n energy_plot_positrons,\n energy_estimated_deposition,\n )", "def model_growth_rate(t, a_0, omega):\n a = a_0 * np.exp(omega * t)\n return a", "def calc_core(points, eps_0, charge_from_dopants, Evac_minus_Ei, ni,\r\n tol=1e-5, max_iterations=inf, Evac_start=None, Evac_end=None):\r\n dx = points[1] - points[0]\r\n if max(np.diff(points)) > 1.001 * dx or min(np.diff(points)) < 0.999 * dx:\r\n raise ValueError('Error! points must be equally spaced!')\r\n if dx <= 0:\r\n raise ValueError('Error! points must be in increasing order!')\r\n \r\n num_points = len(points)\r\n \r\n # Seed for Evac\r\n seed_charge = np.zeros(num_points)\r\n Evac = [Evac_minus_EF_from_charge(Evac_minus_Ei[i], ni[i],\r\n charge_from_dopants[i], seed_charge[i])\r\n for i in range(num_points)]\r\n Evac = np.array(Evac)\r\n if Evac_start is not None:\r\n Evac[0] = Evac_start\r\n if Evac_end is not None:\r\n Evac[-1] = Evac_end\r\n\r\n ###### MAIN LOOP ######\r\n \r\n iters=0\r\n err=inf\r\n while err > tol and iters < max_iterations:\r\n iters += 1\r\n \r\n prev_Evac = Evac\r\n \r\n Evac = np.zeros(num_points)\r\n \r\n Evac[0] = prev_Evac[0]\r\n Evac[-1] = prev_Evac[-1]\r\n # Set Evac[i] = (prev_Evac[i-1] + prev_Evac[i+1])/2\r\n Evac[1:-1] = (prev_Evac[0:-2] + prev_Evac[2:])/2\r\n charge = local_charge(Evac_minus_Ei, ni, charge_from_dopants,\r\n Evac)['net_charge']\r\n Evac[1:-1] -= 0.5 * dx**2 * charge[1:-1] / (eps_0[1:-1]\r\n * eps0_in_e_per_cm3_over_V_per_nm2)\r\n \r\n err = max(abs(prev_Evac - Evac))\r\n\r\n if False:\r\n # Optional: graph Evac a few times during the process to see\r\n # how it's going.\r\n if 5 * iters % max_iterations < 5:\r\n plt.figure()\r\n plt.plot(points, prev_Evac, points, Evac)\r\n if iters == max_iterations:\r\n print('Warning! Did not meet error tolerance. Evac changed by up to ('\r\n + '{:e}'.format(err) + ')eV in the last iteration.' )\r\n else:\r\n print('Met convergence criterion after ' + str(iters)\r\n + ' iterations.')\r\n \r\n return Evac", "def add_gae(trajectories, gamma, lam):\n for trajectory in trajectories:\n if gamma < 0.999: # don't scale for gamma ~= 1\n rewards = trajectory['rewards'] * (1 - gamma)\n else:\n rewards = trajectory['rewards']\n values = trajectory['values']\n # temporal differences\n tds = rewards - values + np.append(values[1:] * gamma, 0)\n advantages = discount(tds, gamma * lam)\n trajectory['advantages'] = advantages", "def EMA(df, base, target, period, alpha=False):\n\n con = pd.concat([df[:period][base].rolling(window=period).mean(), df[period:][base]])\n\n if (alpha == True):\n # (1 - alpha) * previous_val + alpha * current_val where alpha = 1 / period\n df[target] = con.ewm(alpha=1 / period, adjust=False).mean()\n else:\n # ((current_val - previous_val) * coeff) + previous_val where coeff = 2 / (period + 1)\n df[target] = con.ewm(span=period, adjust=False).mean()\n\n df[target].fillna(0, inplace=True)\n return df", "def rolling_mom(decay: float) -> _InitUpdate:\n\n def init_fn(p: Any) -> MomAccumulator:\n return MomAccumulator(\n m=jax.tree_util.tree_map(jnp.zeros_like, p),\n t=jnp.asarray(0, dtype=jnp.int32))\n\n def update_fn(state: MomAccumulator, grad: Any) -> MomAccumulator:\n m = jax.tree_util.tree_map(lambda a, b: decay * a + (1 - decay) * b,\n state.m, grad)\n return MomAccumulator(m=m, t=state.t + 1)\n\n return _InitUpdate(init_fn, update_fn)", "def exp(t,tau):\n return np.exp(-t/tau)", "def tracking(alm, emmArr, ellArr, lmax, trate, time):\n alm2 = alm.copy()\n for emm in range(lmax+1):\n index = np.where((emmArr == emm))[0]\n tomega = 1j * emm * trate\n alm2[index] *= np.exp(tomega)\n return alm2", "def vel2acc(timeseries, dt):\n return np.diff(np.hstack(([0], timeseries)) * (1.0 / dt))", "def rate(s=switch, e=early_mean, l=late_mean):\n out = np.empty(len(disasters_array))\n # Early mean prior to switchpoint\n out[:s] = e\n # Late mean following switchpoint\n out[s:] = l\n return out" ]
[ "0.571016", "0.5588797", "0.5566866", "0.5552068", "0.55310565", "0.55223566", "0.54889387", "0.5482954", "0.5337391", "0.53091055", "0.5293329", "0.5291754", "0.5280999", "0.5240395", "0.5240054", "0.5230036", "0.52182394", "0.5211412", "0.5211144", "0.52106285", "0.5179112", "0.5172109", "0.5164781", "0.51573557", "0.5139998", "0.5139221", "0.5127643", "0.5127469", "0.5116934", "0.5113462" ]
0.659793
0
Takes a list of cells, checks their birthtime and evaluates whether they live on based on their given lifetime.
def old_cells_die(celllist, tnow): survivors = [cell for cell in celllist if tnow - cell.birthtime <= cf.tlifeN] return survivors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def long_waiters_die(celllist, tnow):\n survivors = []\n for sublist in celllist:\n newsub = []\n for cell in sublist:\n if tnow - cell.GCentrytime <= cf.tlifeGC:\n newsub.append(cell)\n survivors.append(newsub)\n return survivors", "def _should_cell_live(self, cell: Cell) -> bool:\n living_neighbours_count = self._count_living_neighbors(cell)\n # Any live cell with two or three live neighbours survives\n if cell.is_alive and living_neighbours_count in [2, 3]:\n return True\n # Any dead cell with three live neighbours becomes a live cell\n if not cell.is_alive and living_neighbours_count == 3:\n return True\n # All other live cells die in the next generation. Similarly, all other dead cells stay dead\n return False", "def test_live_cell(self, alive_cells, alive):\n for positions in alive_cells:\n world = gol.World(3, 3)\n world.set_cell((0, 0))\n for x, y in positions:\n world.set_cell((x, y))\n world.update()\n assert world[(0, 0)] == alive", "def annual_death_all_animals(self):\n for cell in itertools.chain.from_iterable(self.map):\n if type(cell).__name__ in self.allowed_cells:\n cell.annual_death()", "def test_dead_cell(self, alive_cells, alive):\n for positions in alive_cells:\n world = gol.World(3, 3)\n for x, y in positions:\n world.set_cell((x, y))\n world.update()\n assert world[(0, 0)] == alive", "def birth_check(self):\r\n if random.random() < 0.00017: # 0.0121, or 1.21%, is the yearly birth rate.\r\n birth_flag_list.append(1)\r\n # This makes the birth rate for every 5 days (73 'checks' a year) 0.00017%,\r\n # because 1 - 0.0121 = 0.9879; 98.79% is the chance of not giving birth that year.\r\n # 0.99983 ^73 = 0.9879 are the 5-day chances compounded 73 times, and 1 - 0.99983 = 0.00017.\r\n # or you could use the yearly birth rate and have birth_check only occur randomly\r\n # around once a year.\r\n if birth_flag_list != [] and self.gender == 2 and self.marriage == 1 and self.age < 55:\r\n if self.last_birth_time >= 2: # 2 years is the set birth interval; can modify\r\n self.last_birth_time = 0 # reset counter\r\n birth_flag_list.remove(1)\r\n last = self.model.number_of_humans\r\n # build more attributes\r\n age = 0\r\n gender = random.choice([1, 2])\r\n education = 0\r\n work_status = 0\r\n marriage = 0\r\n if gender == 1:\r\n age_category = 0\r\n elif gender == 2:\r\n age_category = 1\r\n ind = Human(last + 1, self.model, self.current_position, self.hh_id, age, self.resource_check,\r\n self.home_position, self.resource_position, self.resource_frequency, gender,\r\n education, work_status, marriage, self.past_hh_id, self.mig_years,\r\n self.migration_status, self.gtgp_part, self.non_gtgp_area,\r\n self.migration_network, self.mig_remittances, self.income_local_off_farm,\r\n self.last_birth_time, self.death_rate, age_category)\r\n self.model.schedule.add(ind)\r\n self.model.number_of_humans += 1\r\n hh_size_list[self.hh_id] += 1\r\n human_birth_list.append(last + 1)\r\n if ind.gender == 1:\r\n human_demographic_structure_list[0] += 1\r\n elif ind.gender == 2:\r\n human_demographic_structure_list[10] += 1", "def cells_enter_GCs(GC_waiting, celllist, tnow, RIs):\n for cell in celllist:\n # get a random GC for entry\n GCpos = RIs.getR()\n # set entrytnow into the waiting area and new position\n cell.GCentrytime = tnow\n cell.AIDstart = tnow\n # add cell to correct waitlist\n GC_waiting[GCpos].append(cell)\n\n return GC_waiting", "def island_deaths(self):\n for y in self.island_map:\n for cell in y:\n cell.deaths()", "def updateCells(cell_positions):\n # Build a set of canditates for live cells at the next generation, instead of looking through the whole grid\n # These will be dead neighbours of living cells\n possible_future_cells = set()\n # Make sets of cells to add and remove at the end of the check\n cells_remove = set()\n cells_add = set()\n for cell in cell_positions:\n # Get adjacent squares\n neighbours_dict = cellNeighbours(cell)\n number_live_neighbours = 0\n # Check which of these corresponds to another living cell\n for square in neighbours_dict.values():\n if square in cell_positions:\n number_live_neighbours+=1\n else:\n possible_future_cells.add(square)\n\n # Any live cell with fewer than two live neighbours dies, as if caused by under-population\n if number_live_neighbours<2:\n cells_remove.add(cell)\n # Any live cell with two or three live neighbours lives on to the next generation\n # do nothing\n # Any live cell with more than three live neighbours dies, as if by overcrowding\n elif number_live_neighbours>3:\n cells_remove.add(cell)\n # Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction\n for cell_candidate in possible_future_cells:\n cell_candidate_neighbours = cellNeighbours(cell_candidate).values()\n # Count number of live neighbours\n count = 0\n for square in cell_candidate_neighbours:\n if square in cell_positions:\n count+=1\n if count == 3:\n cells_add.add(cell_candidate)\n # Update cell_positions by removing dead cells and adding new-born cells\n for cell in cells_add:\n cell_positions.add(cell)\n for cell in cells_remove:\n cell_positions.remove(cell)\n # Return the update live cell list\n return cell_positions", "def gameOfLife(self, board: List[List[int]]) -> None:\n # copy matrix\n copy_matrix = [[board[row][col] for col in range(len(board[0]))] for row in range(len(board))]\n \n # 8 possible directions\n directions = [(0,1), (0, -1), (1,0), (-1,0), (-1,-1), (1,1), (1,-1), (-1,1)]\n num_rows = len(board)\n num_cols = len(board[0])\n \n # matrix traversal\n for i in range(0, num_rows):\n for j in range(0, num_cols):\n # for each cell, explore all of its neighboring cells\n num_live_cells = 0\n for direction in directions:\n r = i + direction[0]\n c = j + direction[1]\n # make sure if it is a live cell \n if (r < num_rows and r >=0) and (c < num_cols and c>=0) and (copy_matrix[r][c]==1):\n # if it is live cell, increment live_cell_count\n num_live_cells +=1\n # if here: We now have estimate of surrounding live cells\n # start applying rules \n # Rule-1: Any live cell with fewer than 2 live neighbors die\n # Rule-2: Any live cell with 2/3 live neighbors live up\n # Rule-3: Any Live cell with > 3 live neighbors die\n # Rule-4: Any dead cell with ==3 live neighbors becomes alive\n if copy_matrix[i][j] == 1 and (num_live_cells > 3 or num_live_cells < 2):\n # Rule-1 and Rule-3: So the current cell dies...\n board[i][j] = 0\n if copy_matrix[i][j] == 0 and num_live_cells == 3:\n # Rule-4: Dead becomes alive\n board[i][j] = 1\n # Rule-2 is taken care by default.", "def age_check(self):\r\n # check working status\r\n if 15 <= float(self.age) < 59:\r\n if self.work_status == 0:\r\n self.work_status = 1\r\n num_labor_list[self.hh_id] += 1\r\n labor_list.append(self.unique_id)\r\n if self.work_status == 1 and self.unique_id not in labor_list:\r\n labor_list.append(self.unique_id)\r\n else:\r\n self.work_status = 0\r\n\r\n # check education status; measured in years of education\r\n if 7 <= int(self.age) <= 19:\r\n if random.random() > 0.1:\r\n self.education += 1\r\n # most adults in the FNNR did not get a full 12-13 years of education\r\n elif 19 < float(self.age) < 23 and self.migration_status == 1:\r\n if random.random() < 0.5:\r\n self.education += 1 # went to college and got further education\r\n # this is rare; in the household list, a few received beyond 12 years of education\r\n\r\n # check age-based death rates\r\n if self.age > 65:\r\n self.death_rate = 0.001443 # 5-day death rate\r\n # The average death rate in China is 7.3 per 1,000 people/year, or 0.0073 (Google).\r\n # However, death rates should be higher for the elderly, or else the population structure will skew.\r\n # I set death rates for those over age 65 to be 10% per year--0.9 yearly survival rate.\r\n # The survival rate for each 5-day step is compounded 73 times, so x^73 = 0.85.\r\n # 0.998557 is the 5-day survival rate, and 1 - x is the 5-day death rate.\r\n else:\r\n self.death_rate = 0.00000425\r\n # I wanted people to have a 98% chance of reaching age 65 (death rate is lower if not elderly).\r\n # If a 'check' is every 5 days, 73 checks/year * 65 years = 4,745 checks.\r\n # x^4745 = 0.98; the 5-day survival rate is 0.99999575, and 1 - x is the 5-day death rate.\r\n\r\n # These rates are changeable later.\r", "def gameOfLife(self, board: List[List[int]]) -> None:\n m = len(board)\n if m==0:\n return board\n n = len(board[0])\n if n==0:\n return board\n def valid(a,b):\n if 0<=a<m and 0<=b<n:\n return True\n mat = [row[:] for row in board] #original copy of the board\n directions = [(0,-1),(-1,-1),(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1)]\n for i in range(m):\n for j in range(n):\n #count how many live=1 or dead=0 cells surrounding cell (i,j)\n cnt_live=0\n for direc in directions:\n if valid(i+direc[0],j+direc[1]):\n if mat[i+direc[0]][j+direc[1]]==1:\n cnt_live+=1\n if mat[i][j]==1 and cnt_live<2 or mat[i][j]==1 and cnt_live>3:\n board[i][j]=0\n elif mat[i][j]==1 and 2<=cnt_live<=3 or mat[i][j]==0 and cnt_live==3:\n board[i][j]=1", "def gameOfLife(self, board: List[List[int]]) -> None:\n changes = list()\n for i in range(len(board)):\n for j in range(len(board[0])):\n neighbor_data = {\n 'live': 0,\n 'dead': 0\n }\n checks = {(0,1), (0,-1), (1, 0), (-1, 0), (1, 1), (1, -1), (-1, 1), (-1,-1)}\n if i == 0:\n checks.discard((-1, 0))\n checks.discard((-1, 1))\n checks.discard((-1, -1))\n if j == 0:\n checks.discard((0, -1))\n checks.discard((-1, -1))\n checks.discard((1, -1))\n if i == (len(board) - 1):\n checks.discard((1,0))\n checks.discard((1,-1))\n checks.discard((1, 1))\n if j == (len(board[0]) - 1):\n checks.discard((0, 1))\n checks.discard((-1, 1))\n checks.discard((1, 1))\n for check in checks:\n if board[i + check[0]][j + check[1]]:\n neighbor_data['live'] += 1\n else:\n neighbor_data['dead'] += 1\n if board[i][j]:\n # check live rules\n if neighbor_data['live'] < 2 or neighbor_data['live'] > 3:\n changes.append((i, j))\n else:\n # check dead rules\n if neighbor_data['live'] == 3:\n changes.append((i, j))\n for change in changes:\n board[change[0]][change[1]] = int (not board[change[0]][change[1]])\n \n print (board)", "def gameOfLife(self, board):\n \n # Neighbours array for 8 neighboring cells of a given cell\n neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]\n \n rows = len(board)\n cols = len(board[0])\n \n # Iterate through the board by each cell\n for row in range(rows):\n for col in range(cols):\n \n # For each cell counting number of live neighbors\n live_neighbors = 0\n for neighbor in neighbors:\n \n # row and column of neighboring cell\n r = (row + neighbor[0])\n c = (col + neighbor[1])\n \n # Checking validity of neighboring cell and if it was originally a live cell\n if(r < rows and r >= 0) and (c < cols and c >= 0) and abs(board[r][c]) == 1:\n \n live_neighbors += 1\n \n # Rule 1 or Rule 3\n if board[row][col] == 1 and (live_neighbors < 2 or live_neighbors > 3):\n \n board[row][col] = -1 # -1 meaning cell is now dead but was originally live\n \n # Rule 4\n if board[row][col] == 0 and live_neighbors == 3:\n board[row][col] = 2 #2 meaning cell is now live but was originally dead\n # Get final representation for updated board \n for row in range(rows):\n for col in range(cols):\n \n if board[row][col] > 0:\n board[row][col] = 1\n \n else:\n board[row][col] = 0", "def _check_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n alive_neighbours = self._get_neighbours(row_number,col_number)\n \n self.to_be_updated[row_number][col_number] = False\n if self.cells[row_number][col_number].get_status():\n if alive_neighbours < 2:\n self.to_be_updated[row_number][col_number] = True\n elif alive_neighbours > 3:\n self.to_be_updated[row_number][col_number] = True\n else:\n if alive_neighbours == 3:\n self.to_be_updated[row_number][col_number] = True", "def gameOfLife(self, board: List[List[int]]) -> None:\n if not board or len(board)==0:\n return \n\n rows = len(board)\n cols = len(board[0])\n #lives = 0\n \n\n for i in range(rows):\n for j in range(cols):\n lives = self.n_neighbors(board,i,j)\n \n # Rule 1 and Rule 3\n if board[i][j]==1 and (lives <2 or lives >3):\n board[i][j]= 2 # -1 signifies the cell is now dead but originally was live.\n if board[i][j]== 0 and lives ==3:\n board[i][j]=3 # signifies the cell is now live but was originally dead.\n\n for i in range(rows):\n for j in range(cols):\n board[i][j] = board[i][j]%2\n return board", "def gameOfLife(self, board: List[List[int]]) -> None:\n m = len(board)\n if m==0:\n return board\n n = len(board[0])\n if n==0:\n return board\n def valid(a,b):\n if 0<=a<m and 0<=b<n:\n return True\n directions = [(0,-1),(-1,-1),(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1)]\n for i in range(m):\n for j in range(n):\n #count how many live=1 or dead=0 cells surrounding cell (i,j)\n cnt_live=0\n for direc in directions:\n if valid(i+direc[0],j+direc[1]):\n if board[i+direc[0]][j+direc[1]]==1 or board[i+direc[0]][j+direc[1]]==-1:\n cnt_live+=1\n if (board[i][j]==1 and cnt_live<2) or \\\n (board[i][j]==1 and cnt_live>3):\n board[i][j]=-1\n elif board[i][j]==0 and cnt_live==3:\n board[i][j]=2\n for i in range(m):\n for j in range(n):\n if board[i][j]==-1:\n board[i][j]=0\n elif board[i][j]==2:\n board[i][j]=1", "def live_or_die(self, x, y):\n neighbors = self.get_neighbors(x, y)\n num_neighbors = 0\n for val in neighbors:\n if val:\n num_neighbors+=1\n\n\n # cell dies if less than 2 neighbors\n if num_neighbors < 2:\n return False\n\n # cell lives on if has 2 or 3 neighbors\n if (num_neighbors == 2 or num_neighbors == 3) and self._board[x][y]:\n return True\n\n # cell dies if more than 2 neighbors\n if num_neighbors > 3:\n return False\n\n # cell is born if has 3 neighbors\n if num_neighbors == 3 and not self._board[x][y]:\n return True\n\n # for consistency\n return False", "def cell_create(game_set, screen, covids, cells):\n cell_create_flag = True\n cell = Cell(game_set, screen)\n for old_cell in cells.sprites():\n if old_cell.rect.y < game_set.cell_number_adjust:\n cell_create_flag = False\n break\n if (not pygame.sprite.spritecollide(cell, cells, 0) and\n not pygame.sprite.spritecollide(cell, covids, 0) and\n cell_create_flag):\n cells.add(cell)", "def is_alive(self, cell: Position) -> bool:\n return cell in self._state", "def noBigamy(individual):\n\n\n families = gedcom_parser.get_families(individual)\n\n marriageDateRanges = []\n for family in families:\n marriageDate = None\n divorceDate = None\n for element in family.get_child_elements():\n if element.get_tag() == \"MARR\":\n marriageDate = convertGedcomDate(element.get_child_elements()[0].get_value())\n\n if element.get_tag() == \"DIV\":\n divorceDate = convertGedcomDate(element.get_child_elements()[0].get_value())\n\n if divorceDate == None:\n divorceDate = dt.now()\n\n marriageDateRanges.append((marriageDate, divorceDate))\n \n marriageDateIntervals = pandas.arrays.IntervalArray.from_tuples(marriageDateRanges)\n\n\n if marriageDateIntervals.is_non_overlapping_monotonic:\n return True\n else:\n print(\n f\"Error US11: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs during another marriage\")\n return False", "def allValidCollectors():\n now = int(time.time())\n for col in allCollectors():\n if not col.dead or (now - col.lastspawn > 3600):\n yield col", "def check_bday(self):\n for fam in self.families.values():\n if fam.children != 'NA':\n # fam.children is either a set or 'NA' string\n for child in fam.children:\n bday = self.individuals[child].birthday\n marr = fam.married\n div = fam.divorced\n\n # if child is born before marriage date, and not yet divorced\n if marr != 'NA' and bday < marr and div == 'NA':\n print(f'US08 - {self.individuals[child].name} birthday before marriage on line {self.individuals[child]._birthday_line}')\n # if child is born more than 9 months after divorce\n if div != 'NA' and bday > div + relativedelta(months=9):\n print(f'US08 - {self.individuals[child].name} birthday before marriage on line {self.individuals[child]._birthday_line}')\n\n if fam.husb_id and fam.wife_id:\n dad = self.individuals[fam.husb_id]\n mom = self.individuals[fam.wife_id]\n # if child is born any time after mother dies\n if not mom.alive and mom.death < bday:\n print(f'US09 - {self.individuals[child].name} birthday after mom death date on line {self.individuals[child]._birthday_line}')\n # if child dies later than nine months after father dies\n if not dad.alive and dad.death + relativedelta(months=9) < bday:\n print(f'US09 - {self.individuals[child].name} birthday after dads death date on line {self.individuals[child]._birthday_line}')", "def __call__(self, time):\n for cell in self._population.cells:\n for person in cell.persons:\n if (hasattr(person, 'quarantine_start_time')) and (\n person.quarantine_start_time is not None):\n if time > person.quarantine_start_time + self.\\\n quarantine_duration:\n # Stop quarantine after quarantine period\n person.quarantine_start_time = None\n\n if (hasattr(person, 'isolation_start_time')) and (\n person.isolation_start_time == time):\n # Require household of symptomatic/isolating individuals to\n # quarantine with given household compliance and individual\n # compliance. Only check when infector starts its isolation\n # in order to prevent resetting. Start time is reset when\n # new person in household becomes an infector.\n r_house = random.random()\n if r_house < self.quarantine_house_compliant:\n for household_person in person.household.persons:\n if household_person != person:\n r_indiv = random.random()\n if r_indiv < \\\n self.quarantine_individual_compliant:\n household_person.\\\n quarantine_start_time = \\\n time + self.quarantine_delay", "def gameOfLife(self, board: list[list[int]]) -> None:\n def game_of_life_infinite(live: set[tuple[int, int]]) -> set[tuple[int, int]]:\n ctr = Counter((I, J)\n for i, j in live\n for I in range(i - 1, i + 2)\n for J in range(j - 1, j + 2)\n if I != i or J != j)\n return {ij\n for ij in ctr\n if ctr[ij] == 3 or ctr[ij] == 2 and ij in live}\n\n live = {(i, j) for i, row in enumerate(board)\n for j, live in enumerate(row) if live}\n live = game_of_life_infinite(live)\n for i, row in enumerate(board):\n for j in range(len(row)):\n row[j] = int((i, j) in live)", "def life_step(state):\n\t# For every cell each live cell in any of the 8 neighbouring cells contributes 1 to the sum\n\t# Rolling matricies is periodic so this implements periodic boundary conditions\n\tnumberOfNeigbours = sum(np.roll(np.roll(state, i, axis=0), j, axis=1)\n\t\t\t\t\t\t for i in (-1,0,1) for j in (-1,0,1) if (i != 0 or j != 0))\n\n\t# Any live cell with fewer than two live neighbours dies, as if caused by under-population\n\tstate = np.where(numberOfNeigbours < 2, 0, state)\n\t# Any live cell with more than three live neighbours dies, as if by over-population\n\tstate = np.where(numberOfNeigbours > 3, 0, state)\n\t# Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.\n\tstate = np.where(numberOfNeigbours == 3, 1, state)\n\n\treturn state", "def gameOfLife(self, board) :\n # mark live-->dead (-1)\n # mark live-->live (1)\n # mark dead-->live (2)\n # mark dead-->dead (0)\n\n h = len(board)\n w = len(board[0])\n\n def counter(i,j):\n c=0\n for m in range(-1,2):\n for n in range(-1,2):\n if i+m<0 or j+n <0 :\n continue\n if i+m>h-1 or j+n>w-1:\n continue\n else:\n if board[i+m][j+n]==1 or board[i+m][j+n]==-1:\n c+=1\n return c\n\n for i in range(h):\n for j in range(w):\n live=counter(i,j)\n if board[i][j] ==1:\n live=live-1\n if live<2 or live>3:\n board[i][j]=-1\n else:\n if live==3:\n board[i][j]=2\n for i in range(h):\n for j in range(w):\n if board[i][j]==2:\n board[i][j]=1\n if board[i][j]==-1:\n board[i][j]=0", "def gameOfLife(self, board: List[List[int]]) -> None:\n neighbors = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1),\n (0, 1), (1, 1)]\n rows = len(board)\n cols = len(board[0])\n\n tmp_board = [[board[r][c] for c in range(cols)] for r in range(rows)]\n\n for row in range(rows):\n for col in range(cols):\n lives = 0\n for n in neighbors:\n r = row + n[0]\n c = col + n[1]\n\n if 0 <= r < rows and 0 <= c < cols and tmp_board[r][c] == 1:\n lives += 1\n if tmp_board[row][col] == 1 and (lives < 2 or lives > 3):\n board[row][col] = 0\n if tmp_board[row][col] == 0 and lives == 3:\n board[row][col] = 1", "def check_contract_expire_soon():\n\n contract_expire_soon_list = []\n contract_expired_list = []\n\n # get user contract\n # refactoring techniques: replace temp with query\n user_role = get_user_role()\n contract_list = user_role.user_contracts\n\n for contract in contract_list:\n if contract['dateSigned'] and not contract['terminationDate']:\n\n # get expiry date and current date\n expiry_date = datetime.strptime(contract['expiryDate'][:19], \"%Y-%m-%dT%H:%M:%S\")\n current_time = datetime.now()\n \n # get the diffenrence between expiry date and current date\n difference = expiry_date - current_time\n days = divmod(difference.days, 86400)\n\n # Refactoring techniques: composing method\n contract_expire_soon = (days[1] <= 31) and (days[1] >= 0)\n contract_expired = days[0] < 0\n\n if contract_expire_soon:\n contract_expire_soon_list.append(contract)\n if contract_expired:\n contract_expired_list.append(contract)\n \n # return True if there's elem in any list, else False\n if len(contract_expire_soon_list) >= 1 or len(contract_expired_list) >= 1:\n return True, contract_expire_soon_list, contract_expired_list\n else:\n return False, contract_expire_soon_list, contract_expired_list", "def make_naive(RNs, seq_list, AgEpitope, tnow):\n # pick a random sequence from the pregenerated pool\n ab = random.choice(seq_list)\n Emax = E_best(ab, AgEpitope)\n if tnow == 0: # in initialisation, distribute ages evenly over\n # lifespan\n birthtime = -np.round(RNs.getR() * cf.tlifeN)\n else:\n birthtime = tnow\n newcell = Bcell(sequence=ab, sequence0=ab, affinity=Emax, affinity0=Emax,\n origin='naive', mutations=0,\n family=None, birthtime=birthtime,\n GCentrytime=None,\n AIDstart=None, block=False)\n return newcell" ]
[ "0.62469876", "0.61972153", "0.586499", "0.56494385", "0.5582217", "0.55195665", "0.55055493", "0.54996973", "0.54730326", "0.5453139", "0.5450774", "0.5255261", "0.52387965", "0.5236539", "0.5226822", "0.5223596", "0.5202929", "0.5185039", "0.5181881", "0.5146562", "0.5142961", "0.5116417", "0.51052886", "0.50959015", "0.50687224", "0.50423056", "0.49991238", "0.49935058", "0.49892572", "0.4984705" ]
0.71631736
0
Takes the list of sorted waiting lists and removes cells that have spent more than the allowed time period waiting for survival sinal.
def long_waiters_die(celllist, tnow): survivors = [] for sublist in celllist: newsub = [] for cell in sublist: if tnow - cell.GCentrytime <= cf.tlifeGC: newsub.append(cell) survivors.append(newsub) return survivors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_dead_obstacles(obstacle_list):\n\tfor obstacle in obstacle_list:\n\t\tobstacle.lifetime -= 1\n\t\tif obstacle.lifetime == 0:\n\t\t\tobstacle_list.remove(obstacle)\n\t\tprint(obstacle)", "def old_cells_die(celllist, tnow):\n survivors = [cell for cell in celllist\n if tnow - cell.birthtime <= cf.tlifeN]\n return survivors", "def purge_outlying_trials(self, trial_nums, thresh=5.0):\n for injkey in self.values.keys():\n for fit_key in self.values[injkey].keys():\n points = np.array(self.values[injkey][\n fit_key]['metric_val']['vals'])\n if len(points.shape) == 1:\n points = points[:, None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n modified_z_score = 0.6745 * diff / med_abs_deviation\n good_trials = modified_z_score < thresh\n if not np.all(good_trials):\n bad_trials = np.where(not good_trials)[0]\n logging.warning(\n 'Outlier(s) detected for %s in trial(s) %s. Will be '\n 'removed. If you think this should not happen, please '\n 'change the value of the threshold used for the '\n 'decision (currently set to %.2e).'%(\n fit_key, trial_nums[bad_trials], thresh\n )\n )\n for fitkey in self.values[injkey].keys():\n for param in self.values[injkey][fitkey].keys():\n new_vals = np.delete(\n np.array(self.values[injkey][\n fitkey][param]['vals']),\n bad_trials\n )\n self.values[injkey][\n fitkey][param]['vals'] = new_vals", "def cleanup():\n for s in [missiles, explosions, bonus]:\n\n set_to_remove = set([])\n for m in s:\n if m.isDead:\n set_to_remove.add(m)\n\n s.difference_update(set_to_remove)", "def prune_out_of_date_queuing_delays(self, global_clock_sec):\n # Replacement deque\n new_queuing_delays = deque()\n for exit_time, queuing_delay in list(self.queuing_delays):\n time_since_exit = global_clock_sec - exit_time\n assert time_since_exit >= 0.0\n\n if time_since_exit <= LinkBuffer.MAX_DATA_AGE_SEC:\n new_queuing_delays.append((exit_time, queuing_delay))\n\n self.queuing_delays = new_queuing_delays", "def agents_cleanup(agents, n) -> set:\n return set(agent for agent in agents if agent[0] < n and agent[1] < n)", "def unsend_scheduled_messages_after(self, time_cutoff):\n for user_id in self.user_id_to_scheduled_message_ts:\n for scheduled_ts in list(self.user_id_to_scheduled_message_ts[user_id]):\n if scheduled_ts >= time_cutoff:\n # The below if statement is likley redundant\n if scheduled_ts in self.user_id_to_scheduled_message_ts[user_id]:\n self.user_id_to_scheduled_message_ts[user_id].remove(scheduled_ts)", "def remove_less_than_treshold(potential_blockings, logger, dashboard_log):\n treshold = get_treshold(logger, dashboard_log)\n # create a new list containing all hosts with security value higher than treshold\n # inspired by https://stackoverflow.com/a/1207461\n return [host for host in potential_blockings if not get_average_security_value(host, logger) < treshold]", "def expired_alarm():\n temp_events = Events_list.copy()\n for x in range(len(temp_events)):#iterates for the whole events list\n if time.time() >= convert_to_epoch(temp_events[x][1]):#if the time set is less than current time it must be expired\n event_remove(temp_events[x][0])", "def free(range_lst, range_start, range_end, user_start, user_end):\n \n # Attempt to calculate range to subtract times from\n minute_range = []\n # range_start = arrow.get(range_start, \"MM/DD/YYYY hh:mm A\")\n # range_start_format = range_start.format(\"MM/DD/YYYY hh:mm A\")\n # range_end = arrow.get(range_end, \"MM/DD/YYYY hh:mm A\")\n # range_end_format = range_end.format(\"MM/DD/YYYY hh:mm A\")\n\n # Calculate range of minutes between potential start and end given by event creator\n minute_range = []\n for r in arrow.Arrow.range(\"minute\", range_start, range_end):\n minute_range.append(r)\n\n # Attempt to calculate user range of busy times\n try:\n user_start = arrow.get(user_start, \"MM/DD/YYYY hh:mm A\")\n user_end = arrow.get(user_end, \"MM/DD/YYYY hh:mm A\")\n\n user_range = arrow.Arrow.range(\"minute\", user_start, user_end)\n except:\n logger.info(\"MODULE 'free_times' FUNCTION 'free' -- Can't calculate USER range using {} - {}\".format(user_start, user_end))\n # Return empty list on fail\n return []\n\n # Subtract times from user_range from the general minute_range\n for time in user_range:\n if time in minute_range:\n index = minute_range.index(time)\n # None type will be used to generate range in flask_main find_busy_times\n minute_range[index] = None\n \n return minute_range", "def illegal_parallel_intervals(a_list, b_list):\n allowed_parallel_intervals = ['3', '6']\n consecutives = parallel_motion(a_list, b_list)\n\n return [\n c for c in consecutives\n if c[0][0][0] not in allowed_parallel_intervals\n ]", "def schedule_offloading(\n rp_boxes: List[np.ndarray]\n) -> List[np.ndarray]:\n return prioritize_larger_rp(rp_boxes)", "def available_processes(processes, time):\n return filter(lambda x: ((x['arrival_time'] <= time) and (x['remaining_time'] > 0)), processes)", "def removefutures(self, badstatuslist=['cancelled', 'error', 'lost'],\n keep=False):\n\n if isinstance(badstatuslist, str):\n badstatuslist = [badstatuslist]\n\n removed = 0\n for scanId in self.futures:\n # create list of futures (a dict per segment) that are cancelled\n\n removelist = [(seg, data, cc, acc)\n for (scanId0, futurelist) in iteritems(self.futures)\n for seg, data, cc, acc in futurelist\n if ((data.status in badstatuslist) or\n (cc.status in badstatuslist) or\n (acc.status in badstatuslist)) and\n (scanId0 == scanId)]\n\n self.errors[scanId] += len(removelist)\n for removefuts in removelist:\n (seg, data, cc, acc) = removefuts\n logger.warn(\"scanId {0} segment {1} bad status: {2}, {3}, {4}\"\n .format(scanId, seg, data.status, cc.status,\n acc.status))\n\n # clean them up\n errworkers = [(fut, self.client.who_has(fut))\n for futs in removelist\n for fut in futs[1:] if fut.status == 'error']\n errworkerids = [(fut, self.workernames[worker[0][0]])\n for fut, worker in errworkers\n for ww in listvalues(worker) if ww]\n for i, errworkerid in enumerate(errworkerids):\n fut, worker = errworkerid\n logger.warn(\"Error on workers {0}: {1}\"\n .format(worker, fut.exception()))\n\n for futures in removelist:\n self.futures[scanId].remove(futures)\n removed += 1\n\n if keep:\n if scanId not in self.futures_removed:\n self.futures_removed[scanId] = []\n self.futures_removed[scanId].append(futures)\n\n if removed:\n logger.warn(\"{0} bad jobs removed from scanId {1}\".format(removed,\n scanId))\n\n return removed", "def remove_premature_departures(trips):\n\t# sort ascending by arrival \n\t# then iteratively remove trips not also sorted by departure\n\tstarting_length = len(trips) # for logging\n\t#\n\ttrips.sort(key = lambda x: x.arrive_ts) # arrival, first to last\n\ti = 1\n\twhile i < len(trips):\n\t\t# if departure is before that of earlier-arriving trip\n\t\tif trips[i].depart_ts <= trips[i-1].depart_ts: \n\t\t\ttrips.pop(i)\n\t\t\tcontinue\n\t\ti+=1\n\t# there should be no simultaneous departures\n\tassert len(set([t.depart_ts for t in trips])) == len(trips)", "def hole_cleanup(atom_list): \n joey = atom_list.copy()\n while (len(joey) != 0):\n for atom in joey:\n takein = [atom]\n source_update = takein.copy()\n check = 1\n while (check == 1):\n source = source_update.copy()\n source_update = []\n c = len(takein)\n for element in source:\n bonds = [bond[0] for bond in identify_bonds(element, joey) if bond[0] not in takein]\n for h in bonds:\n takein.append(h)\n source_update.append(h)\n if ((len(takein) == c) and (len(takein) < 6)):\n check = 0\n for element in takein:\n atom_list.remove(element)\n elif (len(takein) == c):\n check = 0\n for element in takein:\n joey.remove(element)\n return atom_list", "def remove_expired(cls):\n max_trailers = 10\n current_trailers = cls.get_all(collection='approved_trailers')\n current_trailers.reverse()\n queued_trailers = cls.get_all(collection='queued_trailers')\n\n if len(current_trailers) >= max_trailers and len(queued_trailers) > 0:\n for trailer in current_trailers:\n time_active = trailer.date.timetuple().tm_yday - datetime.now().timetuple().tm_yday\n if time_active >= 14 and len(queued_trailers) > 0:\n cls.move(trailer, 'approved_trailers', 'archived_trailers')\n cls.move(queued_trailers[0], 'queued_trailers', 'approved_trailers')", "def freeBlackList():\n try:\n while True:\n sleep(FREE_BLACK_LIST)\n t = datetime.now()\n black_list_mutex.acquire()\n black_list_copy = dict(black_list)\n black_list_mutex.release()\n for blackIp in black_list_copy:\n if (t - black_list_copy[blackIp]).total_seconds() > SECONDS_TO_FREE:\n black_list_mutex.acquire()\n del black_list[blackIp]\n black_list_mutex.release()\n\n except:\n print(\"error in free black list ip\")\n finally:\n if black_list_mutex.locked():\n black_list_mutex.release()\n sys.exit()", "def notify_waiting_planes(self, curr_time):\n if self.cnt_waiting_to_land > 0:\n assert self.cnt_waiting_to_land == len(self.q_waiting_to_land)\n self.cnt_runways_in_use += 1\n self.cnt_waiting_to_land -= 1\n pending_event = self.q_waiting_to_land.pop()\n assert pending_event.type == EventType.PLANE_ARRIVES\n assert curr_time >= pending_event.time\n self.total_waiting_time_for_landing = curr_time - pending_event.time\n nxt_event_tuple = (EventType.PLANE_LANDED, curr_time+conf.runway_time_to_land, self.id)\n self.sim.schedule(nxt_event_tuple)\n elif self.cnt_waiting_to_depart > 0:\n assert self.cnt_waiting_to_depart == len(self.q_waiting_to_depart)\n self.cnt_runways_in_use += 1\n self.cnt_waiting_to_depart -= 1\n pending_event = self.q_waiting_to_depart.pop()\n assert pending_event.type == EventType.READY_FOR_TAKEOFF\n assert curr_time >= pending_event.time\n self.total_waiting_time_for_departing = curr_time - pending_event.time\n nxt_event_tuple = (EventType.PLANE_DEPARTS, curr_time+conf.runway_time_to_takeoff, self.id)\n self.sim.schedule(nxt_event_tuple)", "def checkAmountOfNeighbors(self):\n cellsToDelete = []\n for cell in self.cells:\n if(cell.numOfNeighbor > 3 or cell.numOfNeighbor < 2 or (cell.numOfNeighbor == 2 and cell.dead == True)):\n cellsToDelete.append(cell)\n elif(cell.numOfNeighbor == 3 and cell.dead == True):\n cell.makeAlive()\n cell.numOfNeighbor = 0\n\n self.removeCells(cellsToDelete)", "def __remove_expired_freezers(self, event: Event):\n if len(self.__freeze_map) == 0:\n # freeze option disabled\n return False\n self.__active_freezers = [freezer for freezer in self.__active_freezers\n if event.max_timestamp - freezer.min_timestamp <= self._pattern.window]", "def remove_wearables_without_valid_days(self):\n mark_for_removal = []\n for wearable in self.wearables.values():\n valid_days = self.get_valid_days(wearable.get_pid())[wearable.get_pid()]\n if len(valid_days) == 0:\n mark_for_removal.append(wearable.get_pid())\n\n for pid in mark_for_removal:\n print(\"Removing wearable %s.\" % pid)\n self.remove_wearable(pid)\n\n return len(mark_for_removal)", "def free_slots(self, day_bounds: Slot):\n free_slots: List[Slot] = []\n time_ptr = day_bounds.start\n for meeting in self.meetings:\n if meeting.start > time_ptr:\n free_slots.append(Slot(time_ptr.time_str, meeting.start.time_str))\n time_ptr = meeting.end\n if day_bounds.end > time_ptr:\n free_slots.append(Slot(time_ptr.time_str, day_bounds.end.time_str))\n return free_slots", "def _prune_candidates(self, beam_width=None):\n if beam_width is None:\n beam_width = self.beam_width\n if len(self.candidates) <= beam_width:\n return\n neg_scores = np.array([-cand.logp_total() for cand in self.candidates])\n parted_indices = np.argpartition(neg_scores, beam_width - 1)\n self.candidates = np.array(self.candidates)[parted_indices[:beam_width]].tolist()", "def accommodate_waiting_guests(seats, guests):\n\n curr_empty_seats = current_empty_seat_capacity(seats)\n empty_seat_list = find_all_available_seats(seats)\n\n if len(guests) <= curr_empty_seats:\n for index, _ in enumerate(guests):\n seats[empty_seat_list[index]] = guests[index]\n\n return seats", "def _periodically_cleanup_candidates(self):\n while True:\n yield 5 * 60.0\n\n now = time()\n for key, candidate in [(key, candidate) for key, candidate in self._candidates.iteritems() if candidate.is_all_obsolete(now)]:\n if __debug__: dprint(\"removing obsolete candidate \", candidate)\n del self._candidates[key]\n self.wan_address_unvote(candidate)", "def remove_outliers(lst):\n slst = sorted(lst)\n three_iqr = 3 * get_IQR(lst)\n low_boundary = float(np.percentile(lst, 25)) - three_iqr\n high_boundary = float(np.percentile(lst, 75)) + three_iqr\n\n return filter(lambda x: x >= low_boundary and x <= high_boundary, slst)", "def removeBetweenPercentile(requestContext, seriesList, n):\n if n < 50:\n n = 100 - n\n\n transposed = zip(*seriesList)\n\n lowPercentiles = [_getPercentile(col, 100-n) for col in transposed]\n highPercentiles = [_getPercentile(col, n) for col in transposed]\n\n return [l for l in seriesList if sum([not lowPercentiles[val_i] < val < highPercentiles[val_i]\n for (val_i, val) in enumerate(l)]) > 0]", "def _collect_waiting_times(self):\n incoming_roads = [\"E2T\", \"N2T\", \"W2T\", \"S2T\"]\n car_list = traci.vehicle.getIDList()\n for car_id in car_list:\n wait_time = traci.vehicle.getAccumulatedWaitingTime(car_id)\n road_id = traci.vehicle.getRoadID(car_id) # get the road id where the car is located\n if road_id in incoming_roads: # consider only the waiting times of cars in incoming roads\n self._waiting_times[car_id] = wait_time\n else: # not in incoming road\n if car_id in self._waiting_times: # a car that was tracked has cleared the intersection\n del self._waiting_times[car_id] \n total_waiting_time = sum(self._waiting_times.values())\n return total_waiting_time", "def attack_monster_long_range(self, attack_monsters, monster_list):\n for col in attack_monsters:\n col.health = 0\n if col.health <= 0:\n monster_list.remove(col)\n return monster_list" ]
[ "0.58844036", "0.588434", "0.58419377", "0.5756025", "0.57415056", "0.57187665", "0.56889147", "0.56837434", "0.5636393", "0.56136954", "0.55720395", "0.5538212", "0.55239934", "0.5519692", "0.5505961", "0.5505401", "0.54820275", "0.54682", "0.5465266", "0.54433537", "0.5381421", "0.53696114", "0.53624743", "0.53375304", "0.5322552", "0.5319461", "0.5312532", "0.53120846", "0.53113", "0.5305125" ]
0.66973364
0
Given the current antigen density and the lists of free cells, the function tries to activate a corresponding number of cells and applies an activation probability corresponding to its affinity to every one of them. The lists of remaining free cells as well as an event of germination to be added to the event_list are returned.
def try_activation(Agden, free_naives, free_memory, tnow, RNs): activated = [] fail_naive = [] fail_memory = [] # randomize free cell lists random.shuffle(free_naives) random.shuffle(free_memory) # get number of cells to activate from naive list act_n = np.random.binomial(len(free_naives), cf.p_base * Agden) # get number of memory cells to be activated to enter GC act_m = np.random.binomial(len(free_memory), cf.p_base * Agden) actsum = act_n + act_m # activation of act_n naive cells for i in range(int(act_n)): cell = free_naives.pop() activated.append(cell) # activation of act_m memory cells for GC for i in range(int(act_m)): cell = free_memory.pop() activated.append(cell) # merge lists to new free pool and create event to be returned new_free_naives = free_naives + fail_naive new_free_memory = free_memory + fail_memory if len(activated) > 0: migtime = max(1, cf.tmigration) event = (tnow + migtime, 'Enter', None, activated) else: event = None return new_free_naives, new_free_memory, event, actsum
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_activation(self):\r\n\r\n x=0\r\n edges=self.in_edges\r\n for edge in edges:\r\n x+= edge.source.activation*edge.weight\r\n self.activation=1/(1+exp(-x))", "def _update_activation_state(self):\n activation_count = 0\n for task_id in self.task_dict:\n activation_count += (1 if self.task_dict[task_id].is_activated else 0)\n # Result\n self.num_activated_task = activation_count\n self.is_activated = (activation_count > 0)", "def _activate(self, x):\n self._activation_map = self._activation_distance(x, self._weights)", "def cells_enter_GCs(GC_waiting, celllist, tnow, RIs):\n for cell in celllist:\n # get a random GC for entry\n GCpos = RIs.getR()\n # set entrytnow into the waiting area and new position\n cell.GCentrytime = tnow\n cell.AIDstart = tnow\n # add cell to correct waitlist\n GC_waiting[GCpos].append(cell)\n\n return GC_waiting", "def inc_gains_of_free_cells(self):\r\n for cell in self.cells:\r\n if not cell.locked:\r\n cell.gain += 1\r\n cell.yank()", "def activation_cycle(self) -> None:\n self.update_net()\n self.update_inhibition()\n self.update_membrane_potential()\n self.update_activation()", "def __init_probability_functions(self):\n probability_functions = {}\n for state in self.non_terminal_spaces:\n for action in self.action_space:\n resulting_state = state + self.action_space[action]\n if self.__off_grid_move(resulting_state, state):\n key = (state, -1, state, action)\n else:\n key = (resulting_state, -1, state, action)\n probability_functions[key] = 1\n return probability_functions", "def fc_activation(c_range, num_act, k_a, k_i, ep_a, ep_ap, ep_ai=4.5,\n n_ns=4.6E6, n_sites=int(2)):\n # Compute the MWC probability.\n mwc_term = p_act(c_range, k_a, k_i, n_sites=n_sites, ep_ai=ep_ai)\n\n # Compute and return the fold-change\n numer = 1 + mwc_term * (num_act / n_ns) * np.exp(-(ep_a + ep_ap))\n denom = 1 + mwc_term * (num_act / n_ns) * np.exp(-ep_a)\n return numer / denom", "def _simulate_until_stable(\n grid: List[List[str]],\n count_occupied: Callable[[List[List[str]], int, int], int],\n min_occupied_to_free=4) -> int:\n while True:\n next_grid = _simulate(grid, count_occupied, min_occupied_to_free)\n if _are_equal(next_grid, grid):\n return _count_occupied_seats(grid)\n grid = next_grid", "def calc_attention(self, encoder_hidden_states):\n\n params = self.dec_params\n if len(encoder_hidden_states.shape) == 3:\n # Squeeze the first dimension\n encoder_hidden_states = np.squeeze(encoder_hidden_states, axis=0)\n\n # T x Attn_vec_size\n attn_enc_term = np.matmul(encoder_hidden_states, params.attn_enc_w)\n\n def attention(dec_state):\n attn_dec_term = (np.matmul(dec_state, params.attn_dec_w) +\n params.attn_dec_b) # T x A\n attn_sum = np.tanh(attn_enc_term + attn_dec_term) # T x A\n attn_logits = np.squeeze(np.matmul(attn_sum, params.attn_v)) # T\n attn_probs = softmax(attn_logits)\n\n context_vec = np.matmul(attn_probs, encoder_hidden_states)\n # The attention probabilities are necessary for coverage penalty calculation\n return (context_vec, attn_probs)\n\n return attention", "def update_attentive_A(self):\n\n kg_score_list, row_list, col_list = [], [], []\n # To reduce the GPU memory consumption, we calculate the scores of KG triples according to the type of relation\n for rel_idx in range(1, self.n_relations, 1):\n triple_index = torch.where(self.all_rs == rel_idx)\n kg_score = self.generate_transE_score(\n self.all_hs[triple_index], self.all_ts[triple_index], rel_idx\n )\n row_list.append(self.all_hs[triple_index])\n col_list.append(self.all_ts[triple_index])\n kg_score_list.append(kg_score)\n kg_score = torch.cat(kg_score_list, dim=0)\n row = torch.cat(row_list, dim=0)\n col = torch.cat(col_list, dim=0)\n indices = torch.cat([row, col], dim=0).view(2, -1)\n # Current PyTorch version does not support softmax on SparseCUDA, temporarily move to CPU to calculate softmax\n A_in = torch.sparse.FloatTensor(indices, kg_score, self.matrix_size).cpu()\n A_in = torch.sparse.softmax(A_in, dim=1).to(self.device)\n self.A_in = A_in", "def activate(self, x):\n self._activate(x)\n return self._activation_map", "def transition_function(grid, neighbourstates, neighbourcounts, decay_grid,\n water_decay_grid):\n\n global water_counter\n global ignition_grid\n neighbourstates = np.array(neighbourstates)\n init_grid = initial_grid.astype(int)\n ig_grid = np.array(ignition_grid)\n windspeed_ignition_modifiers = wind_speed_rvalue(\"NE\", 10)\n new_ig_grid = []\n for i, row in enumerate(grid):\n new_ig_grid.append([\n ignite(cell, neighbourstates[:, i, j],\n windspeed_ignition_modifiers) for j, cell in enumerate(row)\n ])\n new_ig_grid = np.array(new_ig_grid)\n started_to_burn = []\n for i, row in enumerate(grid):\n started_to_burn.append([\n started_burning(cell, ig_grid[i, j], new_ig_grid[i, j])\n for j, cell in enumerate(row)\n ])\n grid[started_to_burn] = START_BURN\n ig_grid = np.add(new_ig_grid, ig_grid)\n full_burn = []\n for i, row in enumerate(grid):\n full_burn.append([\n fully_burning(cell, ig_grid[i, j], decay_grid[i, j])\n for j, cell in enumerate(row)\n ])\n grid[full_burn] = BURNING\n end_burning = []\n for i, row in enumerate(grid):\n end_burning.append([\n ending_burn(cell, decay_grid[i, j], decay_values[int(\n initial_grid[i, j])]) for j, cell in enumerate(row)\n ])\n grid[end_burning] = END_BURN\n decay_grid[(grid == BURNING) | (grid == END_BURN)] -= 1\n burnt_out = (decay_grid == 0) # find those which have decayed to 0\n grid[(decay_grid == 0\n )] = BURNT #set all that have decayed to zero to BURNT(7)\n water_counter += 1\n\n if (water_counter == 100):\n grid[120:160, 80:120] = initial_grid[120:160, 80:120]\n water_decay_grid[(grid != LAKE)] -= 1 # take one off their decay value\n grid[(water_decay_grid == 0)] = BURNT # switch their state to 5\n ignition_grid = ig_grid\n return grid", "def update_priorities(self, idxes, priorities):\n assert len(idxes) == len(priorities)\n for ndx, priority in zip(idxes, priorities):\n assert priority > 0\n assert 0 <= ndx < len(self.memory)\n self.iter_sum[ndx] = priority ** self.alpha\n self.iter_min[ndx] = priority ** self.alpha\n\n self.max_p = max(self.max_p, priority)", "def _cal_energies(self):\n max_i, max_j, max_k = self._max_grid_indices\n\n corr_func = self._cal_corr_func(\"occupancy\")\n self._free_of_clash = (corr_func < 0.001)\n self._free_of_clash = self._free_of_clash[0:max_i, 0:max_j, 0:max_k] # exclude positions where ligand crosses border\n \n self._meaningful_energies = np.zeros(self._grid[\"counts\"], dtype=float)\n if np.any(self._free_of_clash):\n grid_names = [name for name in self._grid_func_names if name != \"occupancy\"]\n for name in grid_names:\n self._meaningful_energies += self._cal_corr_func(name) \n \n self._meaningful_energies = self._meaningful_energies[0:max_i, 0:max_j, 0:max_k] # exclude positions where ligand crosses border\n \n self._meaningful_energies = self._meaningful_energies[self._free_of_clash] # exclude positions where ligand is in clash with receptor, become 1D array\n self._number_of_meaningful_energies = self._meaningful_energies.shape[0]\n \n return None", "def get_transition(self, row, col, action, tot_row, tot_col):\n\n '''\n Expand the grid of the environment to handle when the \n agent decides to move in the direction of a wall \n '''\n state_probabilities = np.zeros((int(np.sqrt(self.env.observation_space.n)) + 2, int(np.sqrt(self.env.observation_space.n)) + 2), dtype=float)\n\n if action == 'UP':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.33 #UP\n state_probabilities[row, col - 1 ] = 0.33 #LEFT\n state_probabilities[row, col + 1] = 0.33 # RIGHT\n state_probabilities[row + 1, col] = 0.0 #DOWN\n elif action == 'LEFT':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.33 #UP\n state_probabilities[row, col - 1 ] = 0.33 #LEFT\n state_probabilities[row, col + 1] = 0.0 # RIGHT\n state_probabilities[row + 1, col] = 0.33 #DOWN\n elif action == 'RIGHT':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.33 #UP\n state_probabilities[row, col - 1 ] = 0.0 #LEFT\n state_probabilities[row, col + 1] = 0.33 # RIGHT\n state_probabilities[row + 1, col] = 0.33 #DOWN\n elif action == 'DOWN':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.0 # UP\n state_probabilities[row, col - 1] = 0.33 # LEFT\n state_probabilities[row, col + 1] = 0.33 # RIGHT\n state_probabilities[row + 1, col] = 0.33 # DOWN\n\n for row in range (0, tot_row+1):\n if state_probabilities[row, 0] != 0:\n state_probabilities[row, 1] += state_probabilities[row, 0]\n elif state_probabilities[row, -1] != 0:\n state_probabilities[row, -2] += state_probabilities[row, -1]\n\n for col in range (0, tot_col+1):\n if state_probabilities[0, col] != 0:\n state_probabilities[1, col] += state_probabilities[0, col]\n elif state_probabilities[-1, col] != 0:\n state_probabilities[-2, col] += state_probabilities[-1, col]\n\n return state_probabilities[1: 1+tot_row, 1:1+tot_col]", "def get_transition(self, row, col, action, tot_row, tot_col):\n\n '''\n Expand the grid of the environment to handle when the \n agent decides to move in the direction of a wall \n '''\n state_probabilities = np.zeros((int(np.sqrt(self.env.observation_space.n)) + 2, int(np.sqrt(self.env.observation_space.n)) + 2), dtype=float)\n\n if action == 'UP':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.33 #UP\n state_probabilities[row, col - 1 ] = 0.33 #LEFT\n state_probabilities[row, col + 1] = 0.33 # RIGHT\n state_probabilities[row + 1, col] = 0.0 #DOWN\n elif action == 'LEFT':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.33 #UP\n state_probabilities[row, col - 1 ] = 0.33 #LEFT\n state_probabilities[row, col + 1] = 0.0 # RIGHT\n state_probabilities[row + 1, col] = 0.33 #DOWN\n elif action == 'RIGHT':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.33 #UP\n state_probabilities[row, col - 1 ] = 0.0 #LEFT\n state_probabilities[row, col + 1] = 0.33 # RIGHT\n state_probabilities[row + 1, col] = 0.33 #DOWN\n elif action == 'DOWN':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.0 # UP\n state_probabilities[row, col - 1] = 0.33 # LEFT\n state_probabilities[row, col + 1] = 0.33 # RIGHT\n state_probabilities[row + 1, col] = 0.33 # DOWN\n\n for row in range (0, tot_row+1):\n if state_probabilities[row, 0] != 0:\n state_probabilities[row, 1] += state_probabilities[row, 0]\n elif state_probabilities[row, -1] != 0:\n state_probabilities[row, -2] += state_probabilities[row, -1]\n\n for col in range (0, tot_col+1):\n if state_probabilities[0, col] != 0:\n state_probabilities[1, col] += state_probabilities[0, col]\n elif state_probabilities[-1, col] != 0:\n state_probabilities[-2, col] += state_probabilities[-1, col]\n\n return state_probabilities[1: 1+tot_row, 1:1+tot_col]", "def inner_apply(self, inputs, states, cells, mask=None):\n def slice_last(x, no):\n return x[:, no*self.dim: (no+1)*self.dim]\n\n activation = tensor.dot(states, self.W_state) + inputs\n in_gate = self.gate_activation.apply(\n slice_last(activation, 0) + cells * self.W_cell_to_in)\n forget_gate = self.gate_activation.apply(\n slice_last(activation, 1) + cells * self.W_cell_to_forget)\n next_cells = (\n forget_gate * cells +\n in_gate * self.activation.apply(slice_last(activation, 2)))\n out_gate = self.gate_activation.apply(\n slice_last(activation, 3) + next_cells * self.W_cell_to_out)\n next_states = out_gate * self.activation.apply(next_cells)\n\n if mask:\n next_states = (mask[:, None] * next_states +\n (1 - mask[:, None]) * states)\n next_cells = (mask[:, None] * next_cells +\n (1 - mask[:, None]) * cells)\n\n return next_states, next_cells, in_gate, forget_gate, out_gate", "def set_adapted_free_energy(self):\n\t\t\n\t\tactivity_stats = [self.adapted_activity_mu, self.adapted_activity_sigma]\n\t\tadapted_activity = random_matrix([self.Mm], params=activity_stats, \n\t\t\t\t\t\t\t\t\tseed=self.seed_adapted_activity)\n\t\tself.eps = free_energy(self.Ss0, self.Kk1, self.Kk2, adapted_activity)", "def bp_edgeacc(prob, act, is_list_bat=False):\n if isinstance(prob,tuple):\n prob = prob[-1]\n act_hot=(torch.zeros(act.shape[0],3,*act.shape[1:]))#for one hot encoding, 3 channels and then reduce to 2 channels for loss comp\n act_hot = act_hot.to(act.device)\n act_m = act_hot.scatter(1, act.unsqueeze(dim=1), 1)\n dice_scr = dice_coefficient(prob, act_m[:,:,...], is_list_bat, channelcnt=3, nosft=False)\n\n #TODO try toinclude boundary acc and display in save_img\n # predb = prob[-9]\n # # predb = sft(predb)\n # act_enc = torch.cat(list(map(get_hot_enc, act))) # BX3XhXW shaped img\n # act_origs = act_enc.shape#act original shape\n # act_flat = torch.flatten(act_enc, 0,1)\n # edge_img = torch.stack(list(map(get_edge_img, act_flat)))\n # edge_img = edge_img.view(act_origs)\n\n # edge_img = torch.stack(list(map(get_edge_img, act)))\n\n return dice_scr #+ loss_focal", "def activate_random_cell(self, debug=False):\n\n\t\tcandidate = self.get_random_cell()\n\t\thas = candidate.has_Product()\n\t\tif has != None:\n\t\t\tcandidate.set_active_rule(candidate.get_random_rule_of_type(has))\n\t\t\tcandidate.chain_step(debug)\n\t\telse:\n\t\t\tcandidate.set_active_rule(candidate.get_random_rule())\n\t\t\tcandidate.chain_step(debug)", "def objective_distribution():\n # calculate the needed number of cells or take max value (above which\n # higher diversity should not have practic effects)\n volume = min(cf.naive_pool*len(cf.tinf), 10**5)\n # get bins in the required energy range, width depending on nkey\n if cf.nkey == 1:\n # for nkey = 1, a lot of small bins may not be occupied, thus choose\n # larger bins\n bin_size_goal = 0.1\n bin_number = max(np.round((cf.upperlim-cf.thr)/bin_size_goal), 1)\n bin_edges = np.linspace(cf.thr, cf.upperlim, bin_number+1)\n else:\n bin_size_goal = 0.025\n bin_number = max(np.round((cf.upperlim-cf.thr)/bin_size_goal), 1)\n bin_edges = np.linspace(cf.thr, cf.upperlim, bin_number+1)\n # for the midpoint of each bin, get Gaussian distribution value for\n # mean=0.5 and std=0.1\n bin_midpoints = bin_edges[:-1] + (bin_edges[1]-bin_edges[0])/2\n gauss_weights = np.exp(-np.power(bin_midpoints - 0.5, 2.) /\n (2 * np.power(0.1, 2.)))\n # scale so that the sum over the bins contains the required cell number\n norm1 = np.sum(gauss_weights)\n obj_dist = np.floor((volume / norm1) * gauss_weights)\n # give back the objective distribution and bin_edges\n return bin_edges, obj_dist, volume", "def process_current_time(self):\n if self.new_input:\n self.new_input = False\n\n if self.activation_count == self.mask_init_time:\n cuda.Context.synchronize()\n self.mask.calculate()\n\n if self.tsettle == 0:\n # Special case: behave just like a CFSheet\n cuda.Context.synchronize()\n self.activate()\n self.learn()\n\n elif self.activation_count == self.tsettle:\n # Once we have been activated the required number of times\n # (determined by tsettle), reset various counters, learn\n # if appropriate, and avoid further activation until an\n # external event arrives.\n for f in self.end_of_iteration: f()\n\n self.activation_count = 0\n self.new_iteration = True # used by input_event when it is called\n if (self.plastic and not self.continuous_learning):\n self.learn()\n else:\n cuda.Context.synchronize()\n self.activate()\n self.activation_count += 1\n if (self.plastic and self.continuous_learning):\n self.learn()", "def _activation(self,components,activation):\r\n \r\n if activation == \"ReLU\":\r\n components.append(nn.ReLU())\r\n elif activation == \"Sigmoid\":\r\n components.append(nn.Sigmoid())\r\n else:\r\n raise Exception(\"Invalid activation fn: \"+activation)", "def actuator_centres(N_actuators, rho_aper=RHO_APER, rho_obsc=RHO_OBSC):\n\n x0 = np.linspace(-1., 1., N_actuators, endpoint=True)\n delta = x0[1] - x0[0]\n N_in_D = 2*RHO_APER/delta\n print('%.2f actuators in D' %N_in_D)\n max_freq = N_in_D / 2 # Max spatial frequency we can sense\n xx, yy = np.meshgrid(x0, x0)\n x_f = xx.flatten()\n y_f = yy.flatten()\n\n act = []\n for x_c, y_c in zip(x_f, y_f):\n r = np.sqrt(x_c ** 2 + y_c ** 2)\n if r < 0.97 * rho_aper and r > 1.05 * rho_obsc:\n act.append([x_c, y_c])\n total_act = len(act)\n print('Total Actuators: ', total_act)\n return [act, delta], max_freq", "def next_t(cell_list, current_burning, b_grid, current_fuel, f_grid, h_grid, \n i_threshold, w_direction, burnt_cells):\n for cell in cell_list: \n \n # for a cell that's not yet burning\n if b_grid[cell[0]][cell[1]] is False:\n burn = check_ignition(current_burning, current_fuel, h_grid, \n i_threshold, w_direction, cell[0], cell[1])\n if burn:\n burnt_cells.append(cell)\n b_grid[cell[0]][cell[1]] = True\n \n # for a cell that's already burning\n else: \n if f_grid[cell[0]][cell[1]] > 1:\n f_grid[cell[0]][cell[1]] -= 1\n else:\n f_grid[cell[0]][cell[1]] -= 1\n b_grid[cell[0]][cell[1]] = False", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n def getClosest(locs, pos):\n closest = 100000\n closestPos = ()\n totalDist = 0\n for loc in locs:\n dist = manhattanDistance(pos, loc)\n totalDist += dist\n if dist < closest:\n closest = dist\n closestPos = loc\n return (closest, closestPos, totalDist)\n\n #gamestate stuff\n currPos = currentGameState.getPacmanPosition()\n Food = currentGameState.getFood()\n GhostStates = currentGameState.getGhostStates()\n ScaredTimes = [ghostState.scaredTimer for ghostState in GhostStates]\n Capsules = currentGameState.getCapsules()\n value = currentGameState.getScore()\n\n #find out stuff about the pellets remaining\n foodPellets = Food.asList()\n numPellets = len(foodPellets)\n closestFood, closestFoodPos, totalFoodDist = getClosest(foodPellets, currPos)\n\n #find closest ghost\n closestGhostDist = 1000000\n closestGhost = ()\n for ghost in GhostStates:\n distToGhost = manhattanDistance(currPos, ghost.getPosition())\n if distToGhost < closestGhostDist:\n closestGhostDist = distToGhost\n closestGhost = ghost\n\n #find capsules to make ghosts scared, looking at pacman actions I'm not sure if this works or only the \n #scared timer part decides if a capsule gets eaten\n closestCapsuleDist, closestCapsule, totalCapsuleDist = getClosest(Capsules, currPos)\n if currPos in Capsules:\n value += 25\n if closestCapsuleDist == 1:\n value += 1\n\n \n #if ghost is scared prioritize eating them\n if closestGhost.scaredTimer != 0:\n if closestGhostDist > 0:\n value += closestGhost.scaredTimer / closestGhostDist\n if closestGhostDist == 0:\n value += 1000\n\n #avoid getting caught by ghosts\n if closestGhostDist == 1 and closestGhost.scaredTimer == 0:\n value -= 100\n\n #incentivize eating pellets if ghosts aren't scared and avoid getting stuck trying to choose\n if totalFoodDist > 0 and closestGhost.scaredTimer == 0:\n value -= totalFoodDist / numPellets\n value += 1 / closestFood\n\n return value", "def act(self, state):\n state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n self.actor_local.eval()\n\n # Get actions for current state, transformed from probabilities\n #state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n with torch.no_grad():\n probs = self.actor_local(state)#.cpu().detach().numpy()\n self.actor_local.train()\n\n # Transform probability into valid action ranges\n act_min, act_max = self.action_limits\n action = (act_max - act_min) * (probs - 0.5) + (act_max + act_min)/2\n return action", "def uf_activate(self, output_reg):\n if len(self.inputs) is 2:\n self.two_activation(output_reg)\n elif len(self.inputs) is 3:\n self.three_activation(output_reg)\n else:\n self.large_activation(output_reg)", "def __put_activations_on_grid(self, activations, grid, pad=1):\n grid_Y, grid_X = grid\n # get first image in batch to make things simpler\n activ = activations[0, :]\n\n # scale to [0, 255.0]\n mean, var = tf.nn.moments(activ,axes=[0, 1])\n activ = (activ - mean) / tf.maximum(var, 1.0/tf.sqrt(tf.cast(tf.size(activ), tf.float32)))\n\n x_min = tf.reduce_min(activ, axis=[0, 1])\n x_max = tf.reduce_max(activ, axis=[0, 1])\n activ = (activ - x_min) / (x_max - x_min)\n\n # greyscale\n activ = tf.expand_dims(activ, 2)\n # pad X and Y\n x1 = tf.pad(activ, tf.constant([[pad, 0], [pad, 0], [0, 0], [0, 0]]))\n\n # X and Y dimensions, w.r.t. padding\n Y = tf.shape(activ)[0] + pad\n X = tf.shape(activ)[1] + pad\n\n # put NumKernels to the 1st dimension\n x2 = tf.transpose(x1, (3, 0, 1, 2))\n # organize grid on Y axis\n x3 = tf.reshape(x2, tf.stack([grid_X, Y * grid_Y, X, 1]))\n\n # switch X and Y axes\n x4 = tf.transpose(x3, (0, 2, 1, 3))\n # organize grid on X axis\n x5 = tf.reshape(x4, tf.stack([1, X * grid_X, Y * grid_Y, 1]))\n\n # back to normal order (not combining with the next step for clarity)\n x6 = tf.transpose(x5, (2, 1, 3, 0))\n\n # to tf.image_summary order [batch_size, height, width, channels],\n # where in this case batch_size == 1\n x7 = tf.transpose(x6, (3, 0, 1, 2))\n\n # return x8\n return x7" ]
[ "0.5888912", "0.5608717", "0.5381771", "0.5291022", "0.5285021", "0.52689457", "0.5249405", "0.52287054", "0.51110786", "0.5111017", "0.510888", "0.5092028", "0.5085741", "0.5020816", "0.5018179", "0.4999845", "0.4999845", "0.49943686", "0.49876347", "0.49850073", "0.49526134", "0.48909375", "0.48731458", "0.48539147", "0.48506492", "0.48488775", "0.48439196", "0.48258653", "0.48208722", "0.48110127" ]
0.6911425
0
Distributes the cells waiting to enter a GC at this timepoint randomly to the available GCs, supplies a GCentrytime to each of the cells and returns the modified GC waitlist.
def cells_enter_GCs(GC_waiting, celllist, tnow, RIs): for cell in celllist: # get a random GC for entry GCpos = RIs.getR() # set entrytnow into the waiting area and new position cell.GCentrytime = tnow cell.AIDstart = tnow # add cell to correct waitlist GC_waiting[GCpos].append(cell) return GC_waiting
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def long_waiters_die(celllist, tnow):\n survivors = []\n for sublist in celllist:\n newsub = []\n for cell in sublist:\n if tnow - cell.GCentrytime <= cf.tlifeGC:\n newsub.append(cell)\n survivors.append(newsub)\n return survivors", "def select_best_waiters(LFnum, cellSK, GCpos, tnow, AgEpitope, mut_list, RNs):\n # determine the indices of cells to be chosen\n selinds = Boltzchoice(LFnum, [cell.affinity for cell in cellSK], RNs)\n\n # put selected cells on one list, rest on another\n select = [cellSK[i] for i in range(len(cellSK)) if i in selinds]\n rest = [cellSK[i] for i in range(len(cellSK)) if i not in selinds]\n\n # divide the selected cells once to have survivors of first division round\n # only, then choose fate for surviving daughters: another division or\n # differeniation. since we are dividing all cells on the list here and they\n # are not added to the waitlist again, pass empty waitlist.\n selected_daughters, mut_list = cell_division([], select, AgEpitope, tnow,\n mut_list, RNs)\n\n # for these viable daughters, decide how many to divide again and how many\n # to differentiate according to the recycle frequency\n div = np.random.binomial(len(selected_daughters), cf.recycle)\n diff = len(selected_daughters) - div\n # mix daughters (twice, don't trust this function so much)\n random.shuffle(selected_daughters)\n random.shuffle(selected_daughters)\n # make events if count > 0\n new_events = []\n if div > 0:\n event_div = (tnow + cf.thelp + 2*cf.tdiv, 'Divide', GCpos,\n selected_daughters[:div])\n new_events.append(event_div)\n if diff > 0:\n # get number of cells that will become memory cells, ignore rest (PCs)\n memdiff = np.random.binomial(diff, (1 - cf.PCexport))\n event_diff = (tnow + cf.thelp + cf.tdiv + cf.tdiff, 'Differentiate',\n GCpos, selected_daughters[div:div + memdiff])\n new_events.append(event_diff)\n\n return rest, new_events, mut_list", "def create_random_time_interval_gaps(days, gaps_as_percent, avg_gap_as_percent):\n \n print \"days \",days\n #first find the total number of gap days by calculating it as percent of the total days\n total_size_of_missing_days = round(days * (float(gaps_as_percent)/100))\n print \"total size of missing days \", total_size_of_missing_days\n #now, calculate the average gap size as percent of the total number of gap days\n average_gap_in_days = round(total_size_of_missing_days * (float(avg_gap_as_percent)/100)) \n num_of_time_gaps = round(float(days)/average_gap_in_days)\n\n #number of fills is a space around the gaps, from day 0 to last day\n # there are num_of_gaps + 1\n num_of_time_fills = num_of_time_gaps + 1\n size_of_fill = int(round((float(days)-total_size_of_missing_days)/num_of_time_fills))\n \n print \"num_of_time_gaps \", num_of_time_gaps\n print \"num_of_time_files \", num_of_time_fills\n print \"size_of_fill \", size_of_fill\n\n start_indices = []\n begin = 0\n for i in range(0,int(num_of_time_gaps)):\n begin = randint(begin,int(size_of_fill)-1)\n start_indices.append(begin)\n begin = begin + size_of_fill\n if begin + average_gap_in_days > total_size_of_missing_days:\n break\n\n \n print start_indices \n print \"num of time gaps \", num_of_time_gaps \n print \"num of time fills \", num_of_time_fills \n\n interval_tuples = []\n credit_space = days - total_size_of_missing_days\n start = 0\n end = randint(0,avg_size)\n #algorithm - take the size of present data and, using it as a reminder, create a distribution of days", "def source_cut(env, \r\n number, \r\n counter,\r\n generation,\r\n generation_list_come,\r\n generation_list_wait,\r\n generation_list_begin,\r\n generation_list_finish,\r\n df_simtime,\r\n generation_list_name,\r\n sum_cut_number_list):\r\n sum_cut_number = 0\r\n for i in range(number):\r\n sample_j = np.random.choice(df_caltocut_distr['time'])\r\n sum_cut_number += sample_j\r\n for j in range(sample_j):\r\n if j == 0:\r\n if i == 0:\r\n t = generation_list_come[i]#到达时间服从指数分布,此处的t为间隔时间\r\n else:\r\n t = generation_list_come[i] - generation_list_come[i-1]\r\n else:\r\n t = 0\r\n \r\n yield env.timeout(t)\r\n serve_time = np.random.choice(df_simtime['sim_time'])#得到模拟数据\r\n # print(serve_time)\r\n c = document(env, \r\n 'Doc%02d_%02d' %(i,j), \r\n generation,\r\n counter, \r\n time_in_fac,\r\n generation_list_begin,\r\n generation_list_wait,\r\n generation_list_finish,\r\n serve_time,\r\n generation_list_name)\r\n env.process(c)\r\n sum_cut_number_list.append(sum_cut_number)", "def __create_buckets(self, sim, dat_in, dat_out, dat_out_raw,\n dat_out_oracle, sequential):\n print(\"Creating arrival time buckets...\")\n fets = dat_in.dtype.names\n assert \"arrival time us\" in fets, f\"Missing \\\"arrival time us\\\": {fets}\"\n arr_times = dat_in[\"arrival time us\"]\n\n # 100x the min RTT (as determined by the simulation parameters).\n dur_us = 100 * 2 * (sim.edge_delays[0] * 2 + sim.btl_delay_us)\n # Determine the safe starting index. Do not pick indices\n # between 0 and start_idx to make sure that all windows ending\n # on the chosen index fit within the simulation.\n first_arr_time = None\n start_idx = None\n for idx, arr_time in enumerate(arr_times):\n if first_arr_time is None:\n first_arr_time = arr_time\n continue\n if arr_time - first_arr_time >= dur_us:\n start_idx = idx\n break\n num_pkts = dat_in.shape[0]\n assert start_idx is not None and 0 <= start_idx < num_pkts, \\\n f\"Invalid start index: {start_idx}\"\n\n if sequential:\n # Select all valid windows, in order.\n num_wins = num_pkts - start_idx\n pkt_idxs = list(range(start_idx, num_pkts))\n else:\n # The number of windows is the number of times the window\n # durations fits within the simulation.\n num_wins = 10 * math.floor(sim.dur_s * 1e6 / dur_us)\n # Select random intervals from this simulation to create the\n # new input data. win_idx is the index into\n # dat_in_new. pkt_idx is the index of the last packet in this\n # window.\n pkt_idxs = random.choices(range(start_idx, num_pkts), k=num_wins)\n # Records the number of packets that arrived during each\n # interval. This is a structured numpy array where each column\n # is named based on its bucket index. We add extra columns for\n # the non--arrival time features.\n dat_in_new = np.zeros(\n (num_wins,),\n dtype=([(f\"bucket_{bkt}\", \"float\") for bkt in range(self.win)] +\n [col for col in dat_in.dtype.descr\n if col[0] != \"arrival time us\" and col[0] != \"\"]))\n\n for win_idx, pkt_idx in enumerate(pkt_idxs):\n # Find the first packet in this window (whose index will\n # be win_start_idx).\n cur_arr_time = arr_times[pkt_idx]\n win_start_idx = None\n # Move backwards from the last packet in the window until\n # we find a packet whose arrival time is more that dur_us\n # in the past.\n for arr_time_idx in range(pkt_idx, -1, -1):\n if cur_arr_time - arr_times[arr_time_idx] > dur_us:\n # This packet is the first packet that is too far\n # in the past, so we will start the window on the\n # next packet.\n win_start_idx = arr_time_idx + 1\n break\n assert (\n win_start_idx is not None and 0 <= win_start_idx <= pkt_idx), \\\n (\"Problem finding beginning of window! Are there insufficient \"\n \"packets?\")\n self.__bucketize(\n dat_in, win_start_idx, pkt_idx, dat_in_new, win_idx, dur_us,\n self.win)\n\n # Verify that we selected at least as many windows as we intended to.\n num_selected_wins = len(dat_in_new)\n assert num_selected_wins >= num_wins, \\\n f\"Insufficient windows: {num_selected_wins} < {num_wins}\"\n\n return (\n dat_in_new,\n # As an output feature, select only the final ground truth\n # value. I.e., the final ground truth value for this window\n # becomes the ground truth for the entire window.\n np.take(dat_out, pkt_idxs),\n np.take(dat_out_raw, pkt_idxs),\n np.take(dat_out_oracle, pkt_idxs),\n # The buckets all share a scaling group. Each other\n # feature is part of its own group.\n [0] * self.win +\n list(range(1, len(dat_in_new.dtype.names) - self.win + 1)))", "def new_lists():\n free_naives, free_memory = [], []\n GC_waiting = [[] for gc in range(cf.nGCs)]\n return free_naives, free_memory, GC_waiting", "def distribute_waterbag(self):\n # Generate particles by creating trials and finding particles with potential less than emittance, then assign the rest to momentum\n ptclsMade = 0\n phaseSpaceList = []\n while ptclsMade < self.npart:\n ranU = 0.0\n while ranU <= 0:\n ranU = random.random()\n\n # Generate some bounds on the transverse size to reduce waste in generating the bunch\n # Use the lemming method to find the maximum y\n trialH = np.sqrt(ranU)\n newH = self.emit*trialH\n y0 = np.sqrt(newH)\n #self.emittance = newH\n yMax = newton(self.whatsleft, y0)\n\n #bounding the horizontal coordinate is difficult, but it should not exceed the pole\n xMax = self.c\n #xMax = yMax\n\n trialValue = 1e10\n while trialValue >= newH:\n xTrial = 2.*(0.5 - random.random())*xMax\n yTrial = 2.*(0.5 - random.random())*yMax\n trialValue = self.compute_potential(xTrial, yTrial)\n\n initialValue = trialValue\n if initialValue < newH:\n pMag = np.sqrt(2*(newH - initialValue))\n pDir = 2*np.pi* random.random()\n pxHat = pMag * np.cos(pDir)\n pyHat = pMag * np.sin(pDir)\n xReal = xTrial * np.sqrt(self.betax)\n yReal = yTrial * np.sqrt(self.betay)\n pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)\n pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)\n ptclCoords = np.array([xReal, pxReal, yReal, pyReal])\n phaseSpaceList.append(ptclCoords)\n ptclsMade += 1\n\n #Add 3 more particles if creating a quiet start\n if self.quiet:\n self.exact_centroids(ptclCoords, phaseSpaceList)\n ptclsMade += 3\n else:\n print(\"Initial value generated exceeds limiting H. Sampling new value.\")\n\n self.particles[:,:4] = np.asarray(phaseSpaceList)", "def gc_blocks(seq, block_size):\n\n # Make all capital\n seq = seq.upper()\n iterations = len(seq) // block_size\n\n # Iterate through finding the GC content\n gc = []\n for i in range(iterations):\n block = seq[i*block_size:(i+1)*block_size]\n gc.append((block.count('G') + block.count('C')) / block_size)\n return tuple(gc)", "def elapseTime(self, gameState):\n newParticleList = []\n # Pretend each particle is a ghost, and set its position semi-randomly based on how\n # likely the ghost is to move to that position\n for particle in self.particles:\n newPosDist = self.getPositionDistribution(gameState, particle)\n newParticleList.append(util.sample(newPosDist))\n self.particles = newParticleList", "def _generate_solution_w_processing_time_criteria(self, lpt):\n \n operation_list = []\n last_operation_scheduled_on_machine = [None] * self.jssp_instance_data.total_number_of_machines\n available_heap = _JobOperationHeap(self.jssp_instance_data, max_heap=lpt)\n\n while 0 < len(available_heap):\n get_unstuck = 0\n rand_operation = available_heap.pop()\n rand_job_id = rand_operation.get_job_id()\n rand_machine = np.random.choice(rand_operation.get_required_machines())\n tmp_operation_list = []\n \n if isinstance(self.jssp_instance_data, Data_Flexible_Job_Shop):\n while last_operation_scheduled_on_machine[rand_machine] is not None \\\n and last_operation_scheduled_on_machine[rand_machine].get_job_id() == rand_job_id \\\n and last_operation_scheduled_on_machine[rand_machine].get_sequence() + 1 < rand_operation.get_sequence():\n\n tmp_operation_list.append(rand_operation)\n\n rand_operation = available_heap.pop()\n rand_job_id = rand_operation.get_job_id()\n rand_machine = np.random.choice(rand_operation.get_required_machines())\n get_unstuck += 1\n\n if get_unstuck > 50:\n return self.get_solution()\n\n for operation in tmp_operation_list:\n available_heap.push(operation)\n\n if len(available_heap.dict[rand_job_id]) == 0:\n if rand_operation.get_sequence() == self.jssp_instance_data.get_job(rand_job_id).get_max_sequence():\n del available_heap.dict[rand_job_id]\n else:\n for t in self.jssp_instance_data.get_job(rand_job_id).get_operations():\n if t.get_sequence() == rand_operation.get_sequence() + 1:\n available_heap.push(t)\n\n last_operation_scheduled_on_machine[rand_machine] = rand_operation\n operation_list.append([rand_job_id, rand_operation.get_operation_id(), rand_operation.get_sequence(), rand_machine])\n\n return Solution(self.jssp_instance_data, np.array(operation_list, dtype=np.intc))", "def fill(self):\n for _ in range(Pyro4.config.THREADPOOL_MINTHREADS):\n if not self.attemptSpawn():\n break", "def init_rnd(self):\n\n # query max number of threads\n gennum = apache.AP_MPMQ_MAX_SPARE_THREADS\n # make generators\n # this bit is from Python lib reference\n g = random.Random(time.time())\n result = [g]\n for i in range(gennum - 1):\n laststate = g.getstate()\n g = random.Random()\n g.setstate(laststate)\n g.jumpahead(1000000)\n result.append(g)\n return result", "def simulate_generation(chromosomes, gene_pool, environment, seq_to_fitness):\n # 1. calculate fitness value of each chromosome.\n pool = multiprocessing.Pool()\n\n for chromosome in chromosomes:\n pool.apply_async(calculate_fitness_value, args=(\n chromosome, seq_to_fitness, str(chromosome), environment))\n\n pool.close()\n pool.join()\n\n # 2. sort the chromosomes by its fitness value and reverse the list,\n # because the chromosome with the lowest fitness value is the best.\n chromosomes.sort(key=lambda c: seq_to_fitness[str(c)])\n chromosomes = chromosomes[::-1]\n\n # 3. best 10% of chromosomes survive without change.\n num_best = len(chromosomes) // 10\n fittest_chromosome = chromosomes.pop()\n best_chromosomes = [fittest_chromosome]\n for i in range(num_best - 1):\n best_chromosomes.append(chromosomes.pop())\n\n # 4. crossover: fill the vacancies in the population with new\n # chromosomes. The genes of the new chromosomes are mixtures of the\n # genes of two randomly chosen strong chromosomes.\n new_chromosomes = []\n num_of_new = DEFAULT_POPULATION_SIZE - len(best_chromosomes)\n half_index = len(fittest_chromosome) // 2\n\n while len(new_chromosomes) < num_of_new:\n c1 = random.choice(best_chromosomes)\n c2 = random.choice(best_chromosomes)\n new_chromosomes.append(c1[:half_index] + c2[half_index:])\n if len(new_chromosomes) < num_of_new:\n new_chromosomes.append(c1[half_index:] + c2[:half_index])\n if len(new_chromosomes) < num_of_new:\n new_chromosomes.append(c2[:half_index] + c1[half_index:])\n if len(new_chromosomes) < num_of_new:\n new_chromosomes.append(c2[half_index:] + c1[:half_index])\n\n # 5. mutation: Perform mutations on the new chromosomes.\n # the mutation probability for the lower half is 10 percent.\n new_chromosomes = mutate(new_chromosomes, gene_pool, 10, seq_to_fitness)\n\n # 6. Rejoin all chromosomes.\n chromosomes = best_chromosomes + new_chromosomes\n\n return chromosomes, fittest_chromosome", "def fitness_proportional_selection(self) -> List[Character]:\n print(' - selection')\n st = time.time()\n\n # open pool the the amount of cpu cores\n pool = mp.Pool(mp.cpu_count())\n\n # create a character at each position of the characters list\n new_list = pool.map(create_character, [i for i in self.characters])\n\n # close pool and release the cores\n pool.close()\n\n self.characters = new_list\n self.get_diversity()\n self.calc_sum_fitness()\n self.calc_average_fitness()\n self.get_best_fitness()\n self.get_worst_fitness()\n\n # create the wheel as dict with the selection chance and the character\n wheel: Dict[float, Character] = {}\n\n # the new generation\n new_generation: List[Character] = []\n fit_c_generation: float = 0\n new_wheel = {}\n \"\"\"get the chance of all characters to be selected\n \n \"\"\"\n for c in self.characters:\n p_chance = c.fitness / self.sum_fitness\n chance = p_chance * self.size\n s = str(chance)\n s = s.split('.')\n r = int(s[0])\n f_c = '0.' + s[1]\n f_c = float(f_c)\n fit_c_generation += f_c\n if r <= 0:\n wheel[f_c] = c\n while r > 0:\n new_character = copy.deepcopy(c)\n new_generation.append(new_character)\n r -= 1\n\n for k, v in wheel.items():\n new_key = (k / fit_c_generation) * self.size\n new_wheel[new_key] = v\n\n while len(new_generation) < self.size:\n for k in sorted(new_wheel, reverse=True):\n chance = random.uniform(0, fit_c_generation)\n if chance <= k:\n new_character = copy.deepcopy(new_wheel[k])\n new_generation.append(new_character)\n if len(new_generation) >= self.size:\n break\n continue\n e = time.time()\n print(\" - time: \", e - st)\n\n return new_generation", "def free_slot(self, current_time):\r\n self.free_slots.append(current_time)\r\n get_task_events = self.maybe_get_task(current_time)\r\n if len(get_task_events) > 0:\r\n return get_task_events\r\n\r\n if WORK_STEALING:\r\n # Choose a random scheduler.\r\n scheduler = random.randint(0, NUM_SCHEDULERS - 1)\r\n time_slot_freed = self.free_slots.pop(0)\r\n self.idle_ms += current_time - time_slot_freed\r\n new_task_events = self.simulation.get_any_task(self, scheduler, current_time)\r\n assert len(new_task_events) >= 1\r\n return new_task_events\r\n\r\n return []", "def generation(Duration, amount):\n # Generate group sizes, the total group number is \"amount\", the number of people in each group is between 1 and 6\n size = np.random.randint(1, 7, amount)\n # Generate vip situation, based on the probability of 8%\n vip = []\n for i in range(amount):\n num = np.random.randint(0, 101, 1)\n if (num >= 0) & (num <= 8):\n vip.append(True)\n else:\n vip.append(False)\n # Generate the registration time for each group\n timestamp_list = mod_pert_random(0, Duration // 2, Duration, samples=amount).astype(int)\n timestamp_list = list(timestamp_list)\n\n counter = 0\n queue_2 = Queue()\n queue_4 = Queue()\n queue_6 = Queue()\n\n table_2, table_4, table_6 = tablesSetting(6, 4, 2) # Initializing tables\n\n total_timeR_2 = [] # For calculating total average waiting time\n nextGroup_endTime_2 = {} # {No. of table: the ending time of the table}\n\n total_timeR_4 = []\n nextGroup_endTime_4 = {}\n total_timeR_6 = []\n nextGroup_endTime_6 = {}\n\n groupNumb = 0 # all group have their unique ID\n\n for i in range(Duration):\n while i in timestamp_list:\n if size[counter] == 1 or size[counter] == 2:\n queue_2.add_queue(Group(i, 2, vip[counter], groupNumb))\n counter += 1\n groupNumb += 1\n elif size[counter] == 3 or size[counter] == 4:\n queue_4.add_queue(Group(i, 4, vip[counter], groupNumb))\n counter += 1\n groupNumb += 1\n elif size[counter] == 5 or size[counter] == 6:\n queue_6.add_queue(Group(i, 6, vip[counter], groupNumb))\n counter += 1\n groupNumb += 1\n timestamp_list.remove(i) # Deal with the situation that several groups arrive at the same time point\n\n # Run the simulation\n simulation(i, table_2, Duration, queue_2, total_timeR_2, nextGroup_endTime_2)\n simulation(i, table_4, Duration, queue_4, total_timeR_4, nextGroup_endTime_4)\n simulation(i, table_6, Duration, queue_6, total_timeR_6, nextGroup_endTime_6)\n\n # Summary\n if i == Duration-1:\n print(\"Total groups served (groups who finished their meal or on the table currently):\",\n len(total_timeR_2)+len(total_timeR_4)+len(total_timeR_6))\n avg=(sum(total_timeR_2)+sum(total_timeR_4)+sum(total_timeR_6))/(len(total_timeR_2)+len(total_timeR_4)+len(total_timeR_6))\n print('Average waiting time for groups served: {0:.2f}'.format(avg), \"minute(s)\")", "def get_per_node_queued_times(self, per_node_queued_times):\r\n for task in self.__tasks.values():\r\n if task.node_monitor_address not in per_node_queued_times:\r\n per_node_queued_times[task.node_monitor_address] = []\r\n per_node_queued_times[task.node_monitor_address].append(task.adjusted_completion_time() - self.__arrival_time)", "def clusters_allocate_cells(self):\n for cluster in self.clusters:\n cluster.cells[:] = []\n for cell in self.block_proc:\n wdists = []\n for cluster in self.clusters:\n s = cluster.size\n d = ( (cell.x-cluster.x)**2 + (cell.y-cluster.y)**2 +\n (cell.z-cluster.z)**2 )\n d = numpy.sqrt(d)\n c = self.c\n # TODO: choose a better distance function below\n r = d*(c+(1-c)*numpy.exp(-s/d))\n r = numpy.clip(r,0,r)\n wdists.append(r)\n self.clusters[numpy.argmin(wdists)].cells.append(cell)", "def randomize_schedule(self):\n #Creates a new Schedule Object\n new_schedule = Schedule(len(self.chromo_list),self.config)\n\n #For all of the entries in the hash map\n for classes,index in self.hash_map.items():\n #Get New Random Position\n rand = random.randint(0,len(new_schedule.chromo_list))\n total_duration = 0\n temp_index = rand\n\n #Adds the Class for the whole Duration\n while total_duration < classes.duration\\\n and temp_index < len(new_schedule.chromo_list):\n new_chromo = new_schedule.insert_chromosome(Chromosome(),\\\n temp_index)\n new_schedule.number_chromosomes += 1\n #Enters the new class into the hash map\n if not new_schedule.hash_map.has_key(classes):\n new_schedule.hash_map[classes] = temp_index\n #Assigns the class\n new_chromo._class = classes\n new_schedule.calculate_fitness(new_chromo,temp_index)\n total_duration += 1\n temp_index += 1\n\n return new_schedule", "def allocations(self):\n max_clients = self.clients\n allocations = [None] * max_clients\n for client_index in range(max_clients):\n allocations[client_index] = []\n join_point_id = 0\n # start with an artificial join point to allow master to coordinate that all clients start at the same time\n next_join_point = JoinPoint(join_point_id)\n for client_index in range(max_clients):\n allocations[client_index].append(next_join_point)\n join_point_id += 1\n\n for task in self.schedule:\n start_client_index = 0\n for sub_task in task:\n for client_index in range(start_client_index, start_client_index + sub_task.clients):\n allocations[client_index % max_clients].append(sub_task)\n start_client_index += sub_task.clients\n\n # uneven distribution between tasks and clients, e.g. there are 5 (parallel) tasks but only 2 clients. Then, one of them\n # executes three tasks, the other one only two. So we need to fill in a `None` for the second one.\n if start_client_index % max_clients > 0:\n # pin the index range to [0, max_clients). This simplifies the code below.\n start_client_index = start_client_index % max_clients\n for client_index in range(start_client_index, max_clients):\n allocations[client_index].append(None)\n\n # let all clients join after each task, then we go on\n next_join_point = JoinPoint(join_point_id)\n for client_index in range(max_clients):\n allocations[client_index].append(next_join_point)\n join_point_id += 1\n return allocations", "def stress(self, queue, ct):\n result = []\n for i in range(ct//2): # populate with half\n queue.enqueue(i)\n\n while not queue.is_empty(): # take away 2, add 1\n i += 1\n queue.enqueue(i)\n\n result.append(queue.dequeue()) # will eventually drain\n if queue.is_empty():\n return result\n result.append(queue.dequeue())\n return result", "def waiting_times(all_data):\n print('Computing waiting times')\n result = {'p': [], 'alpha': [], 'durations': []}\n for data in all_data:\n N = data['config']['N']\n p = data['config']['p']\n alpha = data['config']['alpha']\n print(f'p = {p}, alpha = {alpha}')\n\n # find dominant strategy at each point in time\n print(' > Finding dominant strategies')\n dom_strats = np.asarray(list(map(lambda e: get_dominant_strategy(e), data['snapshots'])))\n print(f' >> Found {np.unique(dom_strats).size} unique strategies')\n\n if np.unique(dom_strats).size <= 1:\n print(' >> Skipping')\n continue\n\n # detect dominant strategy changes (and durations)\n print(' > Computing durations')\n durations = get_domain_durations(dom_strats)\n durations /= N**2\n print(f' >> Found {durations.size} durations')\n\n # store result\n result['p'].extend([p]*len(durations))\n result['alpha'].extend([alpha]*len(durations))\n result['durations'].extend(durations)\n\n df = pd.DataFrame(result)\n\n # plot w-time distributions\n print(' > Plotting')\n for p in df['p'].unique():\n sub = df[df['p']==p]\n\n plt.figure()\n for alpha, group in sub.groupby(['alpha']):\n sns.distplot(\n group['durations'],\n kde=False, label=rf'$\\alpha={alpha}$')\n\n plt.title(rf'Distribution of waiting times ($p={p}$)')\n plt.xlabel(r'$\\Delta t$')\n plt.ylabel(r'count')\n plt.legend(loc='best')\n\n plt.savefig(f'images/waiting_times_p{p}.pdf')\n\n ## plot wtd dependence on parameters\n plt.figure()\n sns.boxplot(x='alpha', y='durations', hue='p', data=df)\n plt.savefig('images/waiting_times_vs_alpha.pdf')\n plt.close()\n\n return df", "def Wait(self):\n sleep_time = min(self._time_remaining.values())\n time.sleep(sleep_time)\n\n tasks = set()\n for task in self._time_remaining:\n self._time_remaining[task] -= sleep_time\n if self._time_remaining[task] == 0:\n self._time_remaining[task] = self.task_intervals[task]\n tasks.add(task)\n return tasks", "def _get_day_attack_schedule(self):\n planer_args = self.planner_config[\"args\"]\n start_time = datetime.strptime(planer_args[\"min_time\"], \"%H:%M\").time()\n start_date = datetime.combine(datetime.today().date(), start_time)\n end_time = datetime.strptime(planer_args[\"max_time\"], \"%H:%M\").time()\n end_date = datetime.combine(datetime.today().date(), end_time)\n\n random.seed()\n attack_schedule = []\n for start, end in self._split_date_range(start_date, end_date, planer_args[\"times\"]):\n attack_schedule.append(random.uniform(start, end))\n\n return attack_schedule", "def generate_space_cells(cell_number, min_cell_distance=1.0, \n x_range=None, y_range=None, z_range=None,\n space_x_prob_distribution=[1.],\n x_hist=1000000\n ):\n space_ranges = [x_range,y_range,z_range]\n \n # check that the given number of cells fits within the span range\n assert check_cells_fit(cell_number, min_cell_distance, space_ranges)\n del space_ranges\n \n # create initial storage arrays \n coords_array = np.zeros([cell_number, 3])\n \n # works only for x and y axis now; for circular cells\n radius=min_cell_distance*0.5 \n x1_raw = generate_possible_coords(radius,x_range,min_cell_distance)\n x2_raw = generate_possible_coords(min_cell_distance,x_range,min_cell_distance)\n y_space_cell = min_cell_distance/2.*np.sqrt(3.) # from pitagoras\n y_raw=generate_possible_coords(radius,y_range,y_space_cell)\n z_raw = generate_possible_coords(radius,z_range,min_cell_distance)\n \n x1 = True\n all_coords = []\n for next_depth in z_raw:\n if x1 == True:\n x1= False\n else:\n x1 = True\n for next_raw in range(len(y_raw)):\n if x1 == True:\n x1 = False\n for next_coord in range(len(x1_raw)):\n all_coords.append([x1_raw[next_coord],y_raw[next_raw],next_depth])\n else:\n for next_coord in range(len(x2_raw)):\n all_coords.append([x2_raw[next_coord],y_raw[next_raw],next_depth])\n x1 = True\n \n # randomly choose the cell coords number which are needed\n from random import choice\n cumsum_layer_syn_prob = np.cumsum(space_x_prob_distribution)\n # normalize\n cumsum_layer_syn_prob = cumsum_layer_syn_prob/np.max(cumsum_layer_syn_prob) # this line was added, might need to be tested for inh neurons\n\n all_x_layers = np.arange(x_range[0], x_range[1]+x_hist, x_hist)-(0.5*x_hist)\n # first and last 'layer' will have half-width\n all_x_layers[0] = x_range[0]\n all_x_layers[-1] = x_range[1]\n assert len(space_x_prob_distribution) == len(all_x_layers)-1, 'there are '+ str(len(space_x_prob_distribution)) + ' values for probability within x-space, allowed: ' +str(len(all_x_layers)-1)\n for next_cell in range(cell_number):\n all_coords_in_arr = np.array(all_coords)\n\n # choose how far in x-range\n x = np.random.rand()\n layer_idx = np.searchsorted(cumsum_layer_syn_prob, x)\n layer_idx = np.where(cumsum_layer_syn_prob == cumsum_layer_syn_prob[layer_idx])[0][0]\n\n '''\n # choose which # here it was always symmetric, let's now change it so the distribution may not be symmetric\n possible = np.where((all_coords_in_arr[:,0] > (x_hist*layer_idx)) & (all_coords_in_arr[:,0] < x_hist*(layer_idx+1)))[0]\n possible_negative = np.where((all_coords_in_arr[:,0] < (-1*x_hist*layer_idx)) & (all_coords_in_arr[:,0] > x_hist*(-1)*(layer_idx+1)))[0]\n\n possible_all = np.hstack([possible_negative, possible])\n\n next_choice = choice(possible_all) # possibly there is not enough space for the parameters given to fit all the cells\n '''\n\n possible = np.where((all_coords_in_arr[:,0] > all_x_layers[layer_idx]) & (all_coords_in_arr[:,0] < all_x_layers[layer_idx+1]))[0]\n next_choice = choice(possible)\n\n #possible = np.setdiff1d(possible, np.array(next_choice))\n #possible.delete(next_choice)\n\n coords_array[next_cell] = all_coords[next_choice]\n all_coords.pop(next_choice)\n\n return coords_array", "def schedule_offloading(\n rp_boxes: List[np.ndarray]\n) -> List[np.ndarray]:\n return prioritize_larger_rp(rp_boxes)", "def generation_process(self):\n start_time = rospy.get_time()\n end_time = start_time + self.max_time\n index = 0\n while not rospy.is_shutdown() and not self.shutdown and not self.stop_generation:\n # check time or dirt number (if this is a requirement) for termination criteria\n if self.end_after_time:\n current_time = rospy.get_time()\n if current_time > end_time:\n self.shutdown = True\n rospy.loginfo(\n f\"*** WRAPPER MESSAGE ***\\n\\n\\t'{rospy.get_caller_id()}' has reached the maximum generation time:\\n\\t({current_time} s > {end_time} s).\\n\\tNode will stop generating and publishing dirt.\\n\")\n break\n if self.end_after_number:\n # this will only be important in the beginning if max_dirt_number=0\n # (otherwise the number check after publishing will always trigger first)\n if index >= self.max_dirt_number:\n self.shutdown = True\n rospy.loginfo(\n f\"*** WRAPPER MESSAGE ***\\n\\n\\t'{rospy.get_caller_id()}' has reached the maximum dirt number ({self.max_dirt_number}).\\n\\tNode will stop generating and publishing dirt.\\n\")\n break\n\n # Create an (increasing) index, a random trust value and a random position for the new dirt\n header = Header()\n header.stamp = rospy.Time.now()\n index += 1\n trust_value = random.randint(\n self.min_trust, self.max_trust)\n pose = Pose(position=self.__generate_point_based_on_prob(),\n orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0))\n sleep_time = random.randint(\n self.time_interval_min, self.time_interval_max)\n\n # Combine everything\n dirt = DirtObject(header, index, pose, trust_value)\n\n # Publish the dirt\n self.__publish_dirt(dirt)\n\n rospy.loginfo(\"*** WRAPPER MESSAGE ***\\n\\n\\tDirt was generated and publised: [ID: %d, position: (%.2f,%.2f), trust: %d]\\n\\tDirt generation will sleep now for %d seconds.\\n\" % (\n dirt.id, dirt.pose.position.x, dirt.pose.position.y, dirt.trust_value, sleep_time))\n\n # check dirt number (if this is a requirement)\n if self.end_after_number:\n if index >= self.max_dirt_number:\n self.shutdown = True\n rospy.loginfo(\n f\"*** WRAPPER MESSAGE ***\\n\\n\\t'{rospy.get_caller_id()}' has reached the maximum dirt number ({self.max_dirt_number}).\\n\\tNode will stop generating and publishing dirt.\\n\")\n break\n\n # Sleep rest of the (random defined) time\n rospy.sleep(sleep_time)\n\n # State some final values after stopping generation\n duration = rospy.get_time() - start_time\n duration_string = \"%.2f\" % duration\n rospy.loginfo(\n f\"*** WRAPPER MESSAGE ***\\n\\n\\t'{rospy.get_caller_id()}' has generated {index} dirt in total over {duration_string} s.\\n\")", "def _GarbageCollectorWorker(self):\n while not self.stop:\n try:\n # Check the last computation time of the shard mapping.\n next_pass_delay = self._GetSecsUntilNextPass()\n if next_pass_delay is None:\n gevent.sleep(1) # Avoid a fast loop on lookup failures.\n continue\n\n # If we passed the next computation time attempt to run the GC.\n if next_pass_delay <= 0:\n with self.engine.Lock(KEY_LOCK_NAME, LOCK_DURATION_SEC):\n if self._GetSecsUntilNextPass() > 0:\n continue\n self._ResetLastPassTime()\n\n model_provider.GetStates().RunGarbageCollector()\n next_pass_delay = self._GetSecsUntilNextPass()\n\n # Sleep the remainder of the gap, with some randomness so that all\n # clients don't wake up at the same time (+- 2%).\n gevent.sleep(next_pass_delay + (1 - 0.04 * (random.random() - 0.5)))\n\n except Exception:\n LOG.error('Error in Garbage Collector loop.')\n LOG.error(traceback.format_exc())", "def createTimeBlocks(availableTimesDict):\n\n # get all keys of the dict\n availableTimesList = list(availableTimesDict.keys())\n # store first key\n firstSlot = availableTimesList[0]\n # store first key as start of first time block\n timeSlots = [[firstSlot]]\n\n # loop through all keys\n for i in range(len(availableTimesList) - 1):\n key = availableTimesList[i]\n # if a number(minute) has no successor which is 1 greater example: 719, 950\n # then the current number is the end of a time block and the following number\n # is the new start of a block\n # ignore all other numbers\n if (key + 1) not in availableTimesList:\n timeSlots[-1].append(key)\n timeSlots.append([availableTimesList[i + 1]])\n del availableTimesDict[key]\n\n # the last number left in the dict is the end of the last time block\n timeSlots[-1].append(list(availableTimesDict.keys())[0])\n return timeSlots", "def freeBlackList():\n try:\n while True:\n sleep(FREE_BLACK_LIST)\n t = datetime.now()\n black_list_mutex.acquire()\n black_list_copy = dict(black_list)\n black_list_mutex.release()\n for blackIp in black_list_copy:\n if (t - black_list_copy[blackIp]).total_seconds() > SECONDS_TO_FREE:\n black_list_mutex.acquire()\n del black_list[blackIp]\n black_list_mutex.release()\n\n except:\n print(\"error in free black list ip\")\n finally:\n if black_list_mutex.locked():\n black_list_mutex.release()\n sys.exit()" ]
[ "0.62247455", "0.54630756", "0.5448373", "0.5303373", "0.5191223", "0.51758844", "0.50630546", "0.49670702", "0.48943335", "0.48723188", "0.4871407", "0.4869202", "0.48606297", "0.4835821", "0.48289672", "0.48086908", "0.48080382", "0.47971594", "0.47934476", "0.47799096", "0.47490552", "0.47276804", "0.4727582", "0.47255334", "0.47222614", "0.4714471", "0.47086138", "0.47066298", "0.46909648", "0.4678673" ]
0.7323367
0
Given the current number of available limiting factors and the sorted list of waiting cells, this function picks the LFnum cells to be moved on according to the Boltzmann choice and distributes them to the fates differentiation and division according to the given recycle frequency. In order to incorporate double cell division after selection, sequences of selected cells are directly send through division once to see if in the first division round one or two cells survive. The two daughters of this first division are then distributed to the fates of either dividing again or differentiating and are put to event lists. These events are returned with an eventtime according to the help time plus the time needed to divide once (first division as discussed above) plus another division time or differentiation time according to the chosen fate. The waitlist following selection is also returned.
def select_best_waiters(LFnum, cellSK, GCpos, tnow, AgEpitope, mut_list, RNs): # determine the indices of cells to be chosen selinds = Boltzchoice(LFnum, [cell.affinity for cell in cellSK], RNs) # put selected cells on one list, rest on another select = [cellSK[i] for i in range(len(cellSK)) if i in selinds] rest = [cellSK[i] for i in range(len(cellSK)) if i not in selinds] # divide the selected cells once to have survivors of first division round # only, then choose fate for surviving daughters: another division or # differeniation. since we are dividing all cells on the list here and they # are not added to the waitlist again, pass empty waitlist. selected_daughters, mut_list = cell_division([], select, AgEpitope, tnow, mut_list, RNs) # for these viable daughters, decide how many to divide again and how many # to differentiate according to the recycle frequency div = np.random.binomial(len(selected_daughters), cf.recycle) diff = len(selected_daughters) - div # mix daughters (twice, don't trust this function so much) random.shuffle(selected_daughters) random.shuffle(selected_daughters) # make events if count > 0 new_events = [] if div > 0: event_div = (tnow + cf.thelp + 2*cf.tdiv, 'Divide', GCpos, selected_daughters[:div]) new_events.append(event_div) if diff > 0: # get number of cells that will become memory cells, ignore rest (PCs) memdiff = np.random.binomial(diff, (1 - cf.PCexport)) event_diff = (tnow + cf.thelp + cf.tdiv + cf.tdiff, 'Differentiate', GCpos, selected_daughters[div:div + memdiff]) new_events.append(event_diff) return rest, new_events, mut_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Boltzchoice(LFnum, energylist, RNs):\n # transform list to energy values in kT according to experimental\n # affinities and the energy window allowed by the threshold\n energylist = cf.y0 + np.array(energylist) * cf.m\n # calculate norm of initial list\n Norm = sum([math.exp(-ener) for ener in energylist])\n # calculate initial probability vector\n probs = np.array([math.exp(-ener) / Norm for ener in energylist])\n # list to catch indices of selected cells\n selected = []\n # cells to be picked: determined by the lesser of #waiters and #LFs\n cellpick = min(len(energylist), LFnum)\n while len(selected) < cellpick:\n bins = np.cumsum(probs)\n ind = np.digitize(RNs.getR(), bins)\n selected.append(ind)\n # now, set the probability of the selected cell to 0 and renormalise\n # the remaining probability vector\n newNorm = Norm - math.exp(-energylist[ind])\n probs[ind] = 0\n probs = probs * Norm / newNorm\n Norm = newNorm\n\n return selected", "def cell_division(waitlist, celllist, AgEpitope, tnow, mut_list, RNs):\n for cell in celllist:\n # get list of 0 to 2 daughters\n dlist, mut_list = divide(cell, AgEpitope, tnow, mut_list, RNs)\n # add daughters to waitlist\n waitlist = waitlist + dlist\n return waitlist, mut_list", "def fletching(n):\n\n print(\"Get ready, fletching will start in 10s\")\n rt.wait(10)\n \n print(\"starting...\")\n \n for i in range((n/150)):\n \n process = (float(i)*150+150)/float(n)*100 #calculate percentage till done\n eta = (16*n/150)-16*i-16 #calculate time till done (assumption:average cycle = 16s)\n\n kb.press(\"1\",0.2)\n rt.wait(1)\n kb.press(\"space\",0.2)\n rt.wait(10)\n #USER update on progress:\n #Process: 0.00% ETA: 00s -- 00min\n\n print(\"PROCESS: \", '%.2f' % process,\"% \", \"ETA: \", int(eta)/3600, \"hour\",int(eta)/60-((int(eta)/3600)*60),\"min\",int(eta)%60,\"sec\")", "def run(brickheight,bricklength,walllength,wallheight,occupied=[],answer=[],globall=[]):\n if bricklength == brickheight:\n for t in range(walllength-bricklength+1):\n for s in range(wallheight-brickheight +1):\n column = t\n row = s\n if test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer):\n put(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer)\n if end(brickheight,bricklength,walllength,wallheight,occupied,answer):\n return answer\n else:\n return False\n if bricklength != brickheight:\n for t in range(walllength):\n for s in range(wallheight):\n column = t\n row = s\n\n if test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer) and \\\n test(bricklength,brickheight,row,column,walllength,wallheight,occupied,answer):\n occupied2 = occupied[:]\n answer2 = answer[:]\n\n put(bricklength,brickheight,row,column,walllength,wallheight,occupied,answer)\n if not end(brickheight,bricklength,walllength,wallheight,occupied,answer):\n run(brickheight,bricklength,walllength,wallheight,occupied,answer,globall)\n else:\n globall.append(answer)\n\n put(brickheight,bricklength,row,column,walllength,wallheight,occupied2,answer2)\n if not end(brickheight,bricklength,walllength,wallheight,occupied2,answer2):\n run(brickheight,bricklength,walllength,wallheight,occupied2,answer2,globall)\n else:\n globall.append(answer)\n \n elif test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer):\n put(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer)\n if not end(brickheight,bricklength,walllength,wallheight,occupied,answer):\n run(brickheight,bricklength,walllength,wallheight,occupied,answer,globall)\n else:\n globall.append(answer)\n \n elif test(bricklength,brickheight,row,column,walllength,wallheight,occupied,answer):\n put(bricklength,brickheight,row,column,walllength,wallheight,occupied,answer)\n if not end(brickheight,bricklength,walllength,wallheight,occupied,answer):\n run(brickheight,bricklength,walllength,wallheight,occupied,answer,globall)\n else:\n globall.append(answer)\n return globall", "def HillClimberD():\n\t\n\t# Prepares the variables\n\tspaceCraftId = main.createObjectsSpaceCraft(\"DE\")\n\tcargoListName = \"SwitchD\"\n\tcargoListIdOld = main.createObjectsCargoList(3)\n\tstartFile = \"records/OptimalRandomList.csv\"\n\tFilledInParcels = []\n\tusedParcels = []\n\tunusedParcels = supportHC.MakeTemporaryCargoList(cargoListIdOld,\"records/resultsFirstHalf.csv\",cargoListName,0,True)\n\treDoFile = \"records/Used.csv\"\n\t# First iteration uses the original division from OptimalRandomList.csv \n\tfor x in range(0,8):\n\t\tsupportHC.MakeTemporaryCargoList(cargoListIdOld,\"records/resultsFirstHalf.csv\",cargoListName,x)\n\t\tcargoListId = main.createObjectsCargoList(cargoListName)\n\t\tsupportHC.generateRandomList(startFile,cargoListId,spaceCraftId)\n\t\tsaveFile = \"records/OptimalFirstRun\"+str(x)+\".csv\"\n\t\tusedParcels, unusedAttempt = supportHC.hillClimber(startFile, \"records/OptimalResultsD.csv\", saveFile, cargoListId, spaceCraftId, [], False, 5, 100000, 20, 15, True, True)\n\t\t\n\t\t# Store the unused Parcels to use later on\n\t\tunusedParcels = unusedParcels + unusedAttempt\n\t\tFilledInParcels.append(usedParcels)\n\tsupportHC.SaveToRetry(reDoFile,FilledInParcels)\n\t\n\t# Try to switch around to try to fit the remaining parcels in anyway\n\tfor x in range(0,8):\n\t\tstartFile = \"records/OptimalFirstRun\"+str(x)+\".csv\"\n\t\tsupportHC.MakeTemporaryCargoList(cargoListIdOld,reDoFile,cargoListName,x,False,unusedParcels)\n\t\tcargoListId = main.createObjectsCargoList(cargoListName)\n\t\tsupportHC.generateRandomList(startFile,cargoListId,spaceCraftId)\n\t\tunusedParcels = supportHC.hillClimber(startFile, \"records/OptimalResultsD.csv\", \"records/OptimalFinalResulsts.csv\", cargoListId, spaceCraftId, [], False, 5, 100000, 20, 15, True, True)[1]\n\n\t# Send 1 more shipment to send the last parcels\n\tstartFile = \"records/OptimalRandomList.csv\"\n\tsupportHC.SaveToRetry(reDoFile,[unusedParcels])\n\tsupportHC.MakeTemporaryCargoList(cargoListIdOld,reDoFile,cargoListName,0,False)\n\tcargoListId = main.createObjectsCargoList(cargoListName)\n\tsupportHC.generateRandomList(startFile,cargoListId,spaceCraftId)\n\tsupportHC.hillClimber(startFile, \"records/OptimalResultsD.csv\", \"records/OptimalFinalResulsts.csv\", cargoListId, spaceCraftId, [], False, 5, 100000, 20, 15, True)", "def data_update(fname_list,lname_list,favorite_number,freq) :\n\ttemp = []\n\tnum = 0\n\tcounter = 1\n\n\t#Taking the employee name and checking if it is not blank.\n\twhile 1 :\n\n\t\tfname = raw_input(\"Enter your first name:\")\n\t\tlname = raw_input(\"Enter your last name:\")\n\t\t\n\t\tif fname == \"\" or lname == \"\" : \n\t\t\tprint \"Enter a valid Name\"\n\t\t\tcontinue\n\n\t\telse :\t\n\t\t\tfname_list.append(fname)\n\t\t\tlname_list.append(lname)\n\t\t\tbreak\t\n\n\t#Taking in the input for the favorite numbers.\n\twhile counter < 7 :\n\t\ttry :\n\n\t\t\t#Slot #1\n\t\t\tif counter==1 :\n\t\t\t\n\t\t\t\tnum = int(raw_input(\"select 1st # (1 thru 69):\"))\n\n\t\t\t#Slot #2\n\t\t\telif counter==2 : \n\t\t\t\t\n\t\t\t\tnum = int(raw_input(\"select 2nd # (1 thru 69 excluding \"+ str(temp[0]) + \"):\"))\n\n\t\t\t#Slot #3\n\t\t\telif counter==3 : \n\t\t\t\t\t\t\t\n\t\t\t\tnum = int(raw_input(\"select 3rd # (1 thru 69 excluding \" + str(temp[0]) + \" and \"+ str(temp[1]) + \"):\"))\n\n\t\t\t#Slot #4\n\t\t\telif counter==4 : \n\t\t\t\t\t\t\t\n\t\t\t\tnum = int(raw_input(\"select 4th # (1 thru 69 excluding \" + str(temp[0]) + \", \" + str(temp[1]) + \" and \" + str(temp[2]) + \"):\"))\n\n\t\t\t#Slot #5\n\t\t\telif counter==5 : \n\t\t\t\t\t\t\t\n\t\t\t\tnum = int(raw_input(\"select 5th # (1 thru 69 excluding \" + str(temp[0]) + \", \" + str(temp[1]) + \", \" + str(temp[2]) + \" and \" + str(temp[3]) + \"):\"))\n\n\t\t\t#Powerball slot\n\t\t\telif counter==6 :\n\t\t\t\n\t\t\t\tnum = int(raw_input(\"select Power Ball # (1 thru 26):\"))\n\n\t\t#Non integer input\n\t\texcept ValueError :\t\n\t\t\t\t\n\t\t\t\tprint \"Enter Valid Input\"\n\t\t\t\tcontinue\t\n\n\t\t#Powerball number is out of the specified range.\n\t\tif (counter==6) and (num < 1 or num > 26) :\n\t\t\t\t\n\t\t\t\tprint \"Enter Powerball No. in range\"\n\t\t\t\tcontinue\t\n\n\t\t#One of the first five numbers is either repeated or out of the specified range.\n\t\tif (counter != 6) and ((num < 1 or num > 69) or (num in temp)) :\n\t\t\t\n\t\t\tprint \"Enter Valid Input, Number out of range or duplicate\"\n\t\t\tcontinue\n\n\t\tfreq[counter - 1].append(num)\n\t\tcounter += 1\n\t\ttemp.append(num)\n\t\t\n\t#Adding the current employee's favorite number list to the list for all employees.\n\tfavorite_number.append(temp)\n\treturn", "def simulate_boundary(self,print_every=1000,do_F_bound=True):\n n_t = self.t_span.size\n self.n_t = n_t\n x = self.x0.copy()\n self._triangulate(x)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.x = x.copy()\n self.x_save = np.ones((n_t,int(self.n_c*self.b_extra),2))*np.nan\n self.tri_save = -np.ones((n_t,int(self.tris.shape[0]*self.b_extra),3),dtype=np.int32)\n self.generate_noise_boundary()\n if do_F_bound is True:\n for i in range(n_t):\n if i % print_every == 0:\n print(i / n_t * 100, \"%\")\n self.triangulate(x,recalc_angles=True)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.tri_save[i,:self.tris.shape[0]] = self.tris\n self.get_A(self.neighbours,self.vs)\n self.get_P(self.neighbours,self.vs)\n F = self.get_F(self.neighbours,self.vs)\n # F_bend = get_F_bend(self.n_c, self.CV_matrix, self.n_C, x, self.zeta)\n F_soft = weak_repulsion_boundary(self.Cents,self.a,self.k, self.CV_matrix,self.n_c,self.n_C)\n F_bound = boundary_tension(self.Gamma_bound,self.n_C,self.n_c,self.Cents,self.CV_matrix)\n x += self.dt*(F + F_soft + self.v0*self.noise[i,:x.shape[0]] + F_bound)\n # + F_bend + F_bound\n\n self.x = x\n self.x_save[i,:x.shape[0]] = x\n else:\n for i in range(n_t):\n if i % print_every == 0:\n print(i / n_t * 100, \"%\")\n self.triangulate(x, recalc_angles=True)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.tri_save[i, :self.tris.shape[0]] = self.tris\n self.get_A(self.neighbours, self.vs)\n self.get_P(self.neighbours, self.vs)\n F = self.get_F(self.neighbours, self.vs)\n F_soft = weak_repulsion_boundary(self.Cents, self.a, self.k, self.CV_matrix, self.n_c, self.n_C)\n x += self.dt * (F + F_soft + self.v0 * self.noise[i, :x.shape[0]])\n\n self.x = x\n self.x_save[i, :x.shape[0]] = x\n print(\"Simulation complete\")\n return self.x_save", "def task_divider(first_num, last_num):\r\n\r\n num_amount = last_num - first_num + 1\r\n index = first_num\r\n reminder = num_amount % WORK_UNIT_LENGTH\r\n global work_units\r\n global work_units_amount\r\n global work_status\r\n while index - 1 + WORK_UNIT_LENGTH <= last_num - reminder:\r\n work_unit_data = {\r\n \"first_num\": index,\r\n \"last_num\": index - 1 + WORK_UNIT_LENGTH,\r\n }\r\n Db.insert_work_unit(work_unit_data)\r\n work_units_amount = work_units_amount + 1\r\n index = index + WORK_UNIT_LENGTH\r\n\r\n if reminder > 0:\r\n work_unit_data = {\r\n \"first_num\": last_num - reminder + 1,\r\n \"last_num\": last_num\r\n }\r\n Db.insert_work_unit(work_unit_data)\r\n work_units_amount = work_units_amount + 1\r\n work_status = Db.WorkStatusNames.has_work.value\r\n #print(work_units)\r", "def _refine_dwell2_time(limit_type, n_dwells, min_dwell, max_dwell, limit, opt_fun, results):\n\n # This is the configuration for working with a max temperature limit (as opposed to a min temperature limit).\n max_min = 'max'\n min_max = 'min'\n\n if 'min' in limit_type:\n max_min = 'min'\n min_max = 'max'\n\n # dwell2_range defines the possible dwell 2 guesses, first defined in log space\n dwell2_range = np.logspace(1.0e-6, 1, n_dwells, endpoint=True) / n_dwells\n dwell2_range = min_dwell + \\\n (max_dwell - min_dwell) * (dwell2_range - dwell2_range[0]) / (dwell2_range[-1] - dwell2_range[0])\n\n # Run the dwell1_state-dwell2_state schedule using the possible dwell 2 guesses\n output = np.array([opt_fun(t) for t in dwell2_range], dtype=[('duration2', float), ('max', float),\n ('mean', float), ('min', float)])\n\n # Ensure the results are sorted. Although dwell2_range will be sorted, the output may not when two or more dwell\n # times are close, where temperature oscillations from instabilities in the Xija model can cause the results to lose\n # this order.\n #\n # The column that is used to sort the results also depends on the limit type.\n output_sorted = np.sort(output, order=max_min)\n ind = np.searchsorted(output_sorted[max_min], limit)\n\n if ind == 0:\n # np.searchsorted finds the first suitable location by default, so if ind == 0, then the duration must\n # fall at the bounded value. This is not true if ind == -1 (the last value).\n results[max_min + '_temp'] = limit\n results['dwell_2_time'] = output['duration2'][ind]\n results[min_max + '_temp'] = output[min_max][ind]\n results['mean_temp'] = output['mean'][ind]\n results['converged'] = True\n\n else:\n t_bound = (output_sorted['duration2'][ind - 1], output_sorted['duration2'][ind])\n dwell2_range = np.linspace(np.min(t_bound), np.max(t_bound), n_dwells, endpoint=True)\n output = np.array([opt_fun(t) for t in dwell2_range],\n dtype=[('duration2', float), ('max', float), ('mean', float),\n ('min', float)])\n\n # In rare conditions where all 'x' values are very close and 'wobble' a bit, it may not be sorted. If it\n # is not sorted, the quadratic method will result in an error. The linear method is more tolerant of this\n # condition. Additionally, the quadratic has a tendency to produce some really weird results even when the\n # data appears sensible.\n f_dwell_2_time = interpolate.interp1d(output[max_min], output['duration2'], kind='linear', assume_sorted=False)\n f_non_limit_temp = interpolate.interp1d(output[max_min], output[min_max], kind='linear', assume_sorted=False)\n f_mean_temp = interpolate.interp1d(output[max_min], output['mean'], kind='linear', assume_sorted=False)\n\n results[max_min + '_temp'] = limit\n results['dwell_2_time'] = f_dwell_2_time(limit).item()\n results['mean_temp'] = f_mean_temp(limit).item()\n results[min_max + '_temp'] = f_non_limit_temp(limit).item()\n\n results['converged'] = True\n\n return results, output", "def grid_visibilities_parallel(self, visibilities,min_attenuation = 1e-10, N = 120):\n\n #Find out the number of frequencies to process per thread\n nfreq = len(self.frequencies)\n numperthread = int(np.ceil(nfreq/self.n_obs))\n offset = 0\n nfreqstart = np.zeros(self.n_obs,dtype=int)\n nfreqend = np.zeros(self.n_obs,dtype=int)\n infreq = np.zeros(self.n_obs,dtype=int)\n for i in range(self.n_obs):\n nfreqstart[i] = offset\n nfreqend[i] = offset + numperthread\n\n if(i==self.n_obs-1):\n infreq[i] = nfreq - offset\n else:\n infreq[i] = numperthread\n\n offset+=numperthread\n\n # Set the last process to the number of frequencies\n nfreqend[-1] = nfreq\n\n processes = []\n\n ugrid = np.linspace(-self.uv_max, self.uv_max, self.n_uv +1 ) # +1 because these are bin edges.\n \n centres = (ugrid[1:] + ugrid[:-1]) / 2\n \n visgrid = np.zeros((self.n_uv, self.n_uv, len(self.frequencies)), dtype=np.complex128)\n\n\n if(os.path.exists(self.datafile[0][:-4]+\".kernel_weights.npy\")):\n kernel_weights = np.load(self.datafile[0][:-4]+\".kernel_weights.npy\")\n else:\n kernel_weights=None\n \n if kernel_weights is None:\n weights = np.zeros((self.n_uv, self.n_uv, len(self.frequencies)))\n\n visgrid_buff_real = []\n visgrid_buff_imag = []\n weights_buff = []\n\n #Lets split this array up into chunks\n for i in range(self.n_obs):\n\n visgrid_buff_real.append(multiprocessing.RawArray(np.sctype2char(visgrid.real),visgrid[:,:,nfreqstart[i]:nfreqend[i]].size))\n visgrid_buff_imag.append(multiprocessing.RawArray(np.sctype2char(visgrid.imag),visgrid[:,:,nfreqstart[i]:nfreqend[i]].size))\n visgrid_tmp_real = np.frombuffer(visgrid_buff_real[i])\n visgrid_tmp_imag = np.frombuffer(visgrid_buff_imag[i])\n visgrid_tmp_real = visgrid[:,:,nfreqstart[i]:nfreqend[i]].real.flatten()\n visgrid_tmp_imag = visgrid[:,:,nfreqstart[i]:nfreqend[i]].imag.flatten()\n\n\n if(kernel_weights is None):\n weights_buff.append(multiprocessing.RawArray(np.sctype2char(weights),weights[:,:,nfreqstart[i]:nfreqend[i]].size))\n weights_tmp = np.frombuffer(weights_buff[i])\n weights_tmp = weights[:,:,nfreqstart[i]:nfreqend[i]]\n else:\n weights_buff.append(None)\n\n processes.append(multiprocessing.Process(target=self._grid_visibilities_buff,args=(self.n_uv,visgrid_buff_real[i],visgrid_buff_imag[i],weights_buff[i], visibilities[:,nfreqstart[i]:nfreqend[i]],self.frequencies[nfreqstart[i]:nfreqend[i]],self.baselines,centres,self._instr_core.sigma(self.frequencies[nfreqstart[i]:nfreqend[i]]),min_attenuation, N) ))\n\n for p in processes:\n p.start()\n\n for p in processes:\n p.join()\n\n for i in range(self.n_obs):\n\n visgrid[:,:,nfreqstart[i]:nfreqend[i]].real = np.frombuffer(visgrid_buff_real[i]).reshape(self.n_uv,self.n_uv,nfreqend[i]-nfreqstart[i])\n visgrid[:,:,nfreqstart[i]:nfreqend[i]].imag = np.frombuffer(visgrid_buff_imag[i]).reshape(self.n_uv,self.n_uv,nfreqend[i]-nfreqstart[i])\n\n if(kernel_weights is None):\n weights[:,:,nfreqstart[i]:nfreqend[i]] = np.frombuffer(weights_buff[i]).reshape(self.n_uv,self.n_uv,nfreqend[i]-nfreqstart[i])\n\n if kernel_weights is None:\n kernel_weights = weights\n \n visgrid[kernel_weights!=0] /= kernel_weights[kernel_weights!=0]\n\n return visgrid,kernel_weights", "def driver() :\n\t\n\t#The lists for the first name, last name and the favorite number for all employees.\n\tfname_list = [] \n\tlname_list = []\n\tfavorite_number = []\n\n\t#The list used in calculating the frequency of the numbers at a specific slot. \n\tfreq = [[] for _ in xrange(6)]\n\n\t#The result list containing the Powerball winning number.\n\tres=[]\n\tinput_choice=\"\"\n\n\t# Taking the user inputs until N or n is entered as input.\n\twhile 1 : \n\t\tinput_choice = raw_input(\"Enter employee info? [Y/N] \") \n\n\t\t# if the user inputs lowercase y or n it would still work.\n\t\tif input_choice in ['y','Y']:\n\n\t\t\tdata_update(fname_list,lname_list,favorite_number,freq)\n\n\t\telif input_choice in ['n','N'] :\n\t\t\t\n\t\t\tbreak\n\n\t\telse :\n\t\t\tprint \"Invalid Choice\"\n\t\t\tcontinue\t\t\n\n\tn_employees = len(fname_list)\n\tcounter = 0\n\tprint \"\\n\\n\"\n\n\t#Printing the user names and their favorite numbers to stdout.\n\twhile counter < n_employees :\n\t\tprint fname_list[counter] + \" \" + lname_list[counter] + \" \" + \" \".join(map(str,favorite_number[counter][:-1])) + \" Powerball: \" + str(favorite_number[counter][5])\n\t\tcounter += 1\n\n\tprint \" \\n\\n \"\n\n\t#If No employee info was entered.\n\tif n_employees==0:\n\t\tprint \"No Employee Found\"\n\n\t#Calculating the numbers with max frequency in each slot. If not unique, a random number would be used.\n\telse : \n\t\titr = 0\n\t\twhile itr < 6 :\n\t\t\tcount=Counter(freq[itr])\n\n\t\t\t#There is just one number to choose from in this slot. \n\t\t\tif len(count)==1 :\n\t\t\t\tres.append(count.most_common()[0][0])\n\n\t\t\t#There is no unique number with max frequency.\t\n\t\t\telif count.most_common()[0][1] == count.most_common()[1][1] :\n\t\t\t\tif itr < 5 :\n\t\t\t\t\tres.append(random.randint(1,69))\n\t\t\t\telse :\n\t\t\t\t\tres.append(random.randint(1,26))\n\n\t\t\t#The number with max frequency is unique. \t\n\t\t\telse :\n\t\t\t\tres.append(count.most_common()[0][0])\n\n\t\t\titr += 1\n\n\t\t#Printing out the winning Powerball number.\n\t\tprint \"Powerball winning number:\\n\"\n\t\tprint \" \".join(map(str,res[:-1])) + \" Powerball: \" + str(res[5])\n\t\n\treturn", "def selectBestSchedule(self, remainder):\n # gas boiler? no schedules available!\n if self.getTER1() == 0:\n return -1\n\n\n #load_sched = [[0 for x in range(len(self.schedules[0])-1)] for y in range(self.noOfSchedules)]\n abs_sum = [0 for x in range(self.noOfSchedules)]\n max_min_diff = [0 for x in range(self.noOfSchedules)]\n #remainder_average = [0 for x in range(self.noOfSchedules)]\n #NO_worse_slots = [0 for x in range(self.noOfSchedules)] # saves number of timeslots in which the remainder is worse for each schedule\n\n min_diff = 0\n idx_min_diff = -1\n child_load = [0 for x in range(len(self.schedules[0])-1)]\n\n\n #if self.Children: # if not a leave node: use local knowledge about child loads\n # for c in range(len(self.Children)):\n # for t in range(len(child_load)):\n # child_load[t] += self.EConsumptionChildCurves[c][t]\n\n for s in range(self.noOfSchedules):\n\n current_remainder = [0 for x in range(len(remainder))]\n current_remainder_abs = [0 for x in range(len(remainder))]\n\n for t in range(len(remainder)):\n # add schedule load curve to compensation curve\n current_remainder[t] = remainder[t] + self.EConsumptionScheduleCurves[s][t] #- child_load[t]\n\n # as currently chosen schedule is included in remainder, subtract it (if not in first round)\n if self.chosenScheduleIndex != -1:\n current_remainder[t] -= self.EConsumptionChosenSchedule[t]\n\n current_remainder_abs[t] = abs(current_remainder[t])\n #if current_remainder_abs[t] > remainder[t]:\n # NO_worse_slots[s] += 1\n\n\n # accumulated absolute gradients as measure for similarity of curves\n abs_sum[s] = sum(current_remainder_abs)\n max_min_diff[s] = max(current_remainder)- min(current_remainder)\n #remainder_average[s] = sum(current_remainder_abs)/len(current_remainder_abs)\n\n #print 'abs_grad_sum: {0}'.format(abs_grad_sum[s])\n\n # new minimal abs difference?\n if self.OPTcriterion == 'maxmindiff':\n if idx_min_diff == -1 or min_diff - max_min_diff[s] > 0.001 : # min difference is 0.001 Watt to avoid oscillations\n idx_min_diff = s\n min_diff = max_min_diff[s]\n elif self.OPTcriterion == 'absremainder':\n if idx_min_diff == -1 or min_diff - abs_sum[s] > 0.001 : # min difference is 0.001 Watt to avoid oscillations\n idx_min_diff = s\n min_diff = abs_sum[s]\n\n if (idx_min_diff != self.chosenScheduleIndex):\n self.chosenSchedule = copy.deepcopy(self.schedules[idx_min_diff])\n if self.chosenScheduleIndex != -1:\n self.prevChosenScheduleIndex = self.chosenScheduleIndex # remember previously chosen schedule\n self.chosenScheduleIndex = idx_min_diff\n self.EConsumptionChosenSchedule = copy.deepcopy(self.EConsumptionScheduleCurves[idx_min_diff])\n #print 'ID {0}: new schedule has index {1}'.format(self.CommID, idx_min_diff)\n return 1\n else:\n if self.chosenScheduleIndex != -1:\n self.prevChosenScheduleIndex = self.chosenScheduleIndex\n #print 'ID {0}: new schedule = old schedule with index {1}'.format(self.CommID, self.chosenScheduleIndex)\n return 0", "def run_partial_vhf(trj, chunk_length, selection1, selection2, n_chunks, water=True,\n step=1, r_range=(0, 1.0), bin_width=0.005, n_bins=None,\n self_correlation=True, periodic=True, opt=True):\n # Calculate intervals between starting points\n starting_frames = np.linspace(0, trj.n_frames-chunk_length, n_chunks, dtype=int)\n vhf_list = list()\n if step != 1:\n frames_in_chunk = int(chunk_length / step)\n else:\n frames_in_chunk = chunk_length\n for idx, start in enumerate(starting_frames):\n end = start + chunk_length\n chunk = trj[start:end:step]\n print(f\"Analyzing frames {start} to {end}...\")\n r, g_r_t = compute_partial_van_hove(trj=chunk,\n chunk_length=frames_in_chunk,\n selection1=selection1,\n selection2=selection2,\n r_range=r_range,\n bin_width=bin_width,\n n_bins=n_bins,\n self_correlation=self_correlation,\n periodic=periodic,\n opt=opt,)\n \n vhf_list.append(g_r_t)\n\n vhf_mean = np.mean(vhf_list, axis=0)\n t_save = trj.time[0:chunk_length:step]\n\n return r, t_save, vhf_mean", "def after_crop_gribs(msg, config, checklist):\n next_workers = {\n \"crash\": [],\n \"failure 00\": [],\n \"failure 06\": [],\n \"failure 12\": [],\n \"failure 18\": [],\n \"success 00\": [],\n \"success 06\": [],\n \"success 12\": [],\n \"success 18\": [],\n }\n if msg.type == \"success 06\":\n next_workers[\"success 06\"].append(\n NextWorker(\"nowcast.workers.grib_to_netcdf\", args=[\"forecast2\"])\n )\n if msg.type == \"success 12\":\n next_workers[\"success 12\"].append(\n NextWorker(\"nowcast.workers.grib_to_netcdf\", args=[\"nowcast+\"])\n )\n return next_workers[msg.type]", "def reset(self):\n\n\t\tf = self.no_of_ferries\n\t\tt = self.no_of_discrete_time_intervals\n\t\tvmax = self.maximam_velocity_vector\n\t\tports = self.port_coordinates_vector\n\t\ttrips = self.no_of_trips_vector\n\t\thaltTime = self.halt_time_at_port\n\t\tstartBuffer = self.buffer_before_start\n\n\t\tschedule = np.array([[0.0 for x in range(t)] for y in range(f)])\n\t\t\n\t\t#Find distance from port co-ordinates\n\t\tportA = ports[0]\n\t\tportB = ports[1]\n\t\tself.dst = dst = distance.euclidean(portA, portB)\n\n\t\tfinishTime = [0.0 for x in range(f)]\n\t\tstartTime = [0.0 for x in range(f)]\n\n\t\t#Calculate total time for all ferries to complete required trips considering respective maximum velocities\n\t\tfor fIndex in range(f):\n\t\t\tif(fIndex > 0):\n\t\t\t\tstartTime[fIndex] = startTime[fIndex - 1] + startBuffer #TODO: Randomize start time\n\t\t\ttripTime = ((2 * dst * trips[fIndex])/vmax[fIndex]) + haltTime\n\t\t\tfinishTime[fIndex] = (startTime[fIndex] + tripTime)\n\n\t\tself.time_step = time_step = max(finishTime)/(t-1);\n\t\tlogging.debug(\"Time step: %f hrs\" % time_step)\n\t\tlogging.debug(\"Total time: %s hrs\" % format(max(finishTime), '.2f'))\n\n\t\tself.fSchedule = schedule = self.getLinearSchedule(schedule, startTime)\t\n\t\treturn schedule;", "def bindEvents(fusionEvents,divisionEvents, buff):\n #1/Finding correspondances\n fusion_indices = []\n fusion_labels = []\n fusion_labels_2 = [] # In label 2 says with which cell the disappearded one has\n for events,label in fusionEvents:\n index,osef,labels = events\n fusion_indices.append(index)\n fusion_labels.append(labels[0])\n fusion_labels_2.append(label)\n \n division_indices = []\n division_labels = []\n division_labels_2 = [] # Tells in which cell it is created\n for events,label in divisionEvents:\n index,osef,labels = events\n division_indices.append(index)\n division_labels.append(labels[0])\n division_labels_2.append(label)\n \n associated_division_list = []\n associated_indexes = []\n for i in fusion_indices:\n ind = next((x for x in division_indices if x>i),-1)\n if ind>0:\n associated_division_list.append((i,ind))\n corr_ind_fusion = fusion_indices.index(i)\n corr_ind_division = division_indices.index(ind)\n associated_indexes.append((corr_ind_fusion,corr_ind_division))\n\n \n #2/removing corresponding elements\n for j in range(len(associated_division_list)):\n index_fus, index_div = associated_indexes[j]\n if division_labels_2[index_div]==fusion_labels_2[index_fus]:\n #If they are not equal, means that the process of division/fusion \n #has not happened on the same blob and hence is not relevant\n big_label = division_labels_2[index_div]\n small_label = fusion_labels[index_fus]\n new_label = division_labels[index_div] #Replace after division this label by small label\n first_index = fusion_indices[index_fus]\n second_index = division_indices[index_div]\n \n for k in range(second_index-first_index):\n splitCell(buff,first_index+k,big_label,small_label)\n \n #Attribution of the new created cells to each one of the previous cells:\n #For this, we take the closest centroid\n #centroid of the big label\n last_image = buff[:,:,second_index]\n xs,ys = centroids2(last_image,[big_label,new_label])\n xs0,ys0 = centroids2(buff[:,:,second_index-1],[big_label,small_label])\n dist_regular = (xs0[0]-xs[0])**2 + (ys0[0]-ys[0])**2 + (xs0[1]-xs[1])**2 + (ys0[1]-ys[1])**2\n dist_inverted = (xs0[0]-xs[1])**2 + (ys0[0]-ys[1])**2 + (xs0[1]-xs[0])**2 + (ys0[1]-ys[0])**2\n \n if dist_regular>dist_inverted:\n print \"ca marche pas gael euh quoi?\"\n tmp_stack = buff[:,:,second_index:]\n tmp_stack[buff[:,:,second_index:]==big_label]=small_label\n tmp_stack[buff[:,:,second_index:]==new_label]=big_label\n buff[:,:,second_index:] = tmp_stack\n division_labels = [x if (x!=new_label and x!=big_label) else big_label if x==new_label else small_label for x in division_labels]\n fusion_labels = [x if x!=new_label and x!=big_label else big_label if x==new_label else small_label for x in fusion_labels]\n division_labels_2= [x if x!=new_label and x!=big_label else big_label if x==new_label else small_label for x in division_labels_2]\n fusion_labels_2= [x if x!=new_label and x!=big_label else big_label if x==new_label else small_label for x in fusion_labels_2]\n else:\n print \"ca marche bien gael\"\n \"\"\"Reassigning new labels\"\"\"\n tmp_stack = buff[:,:,second_index:]\n tmp_stack[tmp_stack==new_label] = small_label\n buff[:,:,second_index:] = tmp_stack\n division_labels = [x if x!=new_label else small_label for x in division_labels]\n fusion_labels = [x if x!=new_label else small_label for x in fusion_labels]\n division_labels_2 = [x if x!=new_label else small_label for x in division_labels_2]\n fusion_labels_2 = [x if x!=new_label else small_label for x in fusion_labels_2]", "def roulette_wheel_selection(fitness, n):\n\n # calculate standard propabilites in regard to fitness scores\n sum_of_fitness = np.sum(fitness)\n\n # since smaller is better, inverse it\n probabilities = [fit/sum_of_fitness for fit in fitness]\n\n # build cummulative probabilites\n cum_propabilites = [sum(probabilities[:i]) for i in range(1, len(probabilities)+1)]\n\n # list of indexes of selected members\n indx_list = []\n\n while len(indx_list) != n:\n\n # generate random number pepresenting the ball in the roulette\n r = random.uniform(0, 1)\n\n for indx, prob in enumerate(cum_propabilites):\n # we found the place the ball fell down\n if r <= prob:\n indx_list.append(indx)\n break\n\n return indx_list", "def _getsteps(num_of_steps, limit):\n steps = []\n current = 0.0\n for i in range(0, num_of_steps):\n if i == num_of_steps - 1:\n steps.append(int(round(limit)))\n else:\n steps.append(int(round(current)))\n current += float(limit) / float(num_of_steps - 1)\n return steps", "def scheduleusingfcfs():\n df = []\n nextProcessStart = 0\n currentDate = date.today()\n while not readyQueue.empty():\n process = readyQueue.get()\n if nextProcessStart == 0:\n nextProcessStart = process.arrivalTime\n process.completionTime = addTimes(nextProcessStart, process.burstTime)\n fcfsDoneList.append(process)\n\n df.append(dict(Task=process.processName, Start=str(currentDate) + \" \" + str(nextProcessStart),\n Finish=str(currentDate) + \" \"\n + str(process.completionTime)))\n\n nextProcessStart = addTimes(nextProcessStart, process.burstTime)\n\n fig = ff.create_gantt(df, title=\"First Come, First Served\")\n fig.write_image(\"./fcfs.png\")\n\n # Calculate turnaround time and waiting time\n calculateTurnaroundAndWaitingTime(fcfsDoneList)\n\n drawTable(fcfsDoneList, \"fcfsTable.png\")", "def calc_f_RL(self, readOnly=False, do_solvation=True, redo=False):\n if self.data['CD'].protocol == []:\n return # Initial CD is incomplete\n\n # Initialize variables as empty lists or by loading data\n if self.args.params['CD']['pose'] == -1:\n f_RL_FN = os.path.join(self.args.dir['CD'], 'f_RL.pkl.gz')\n else:\n f_RL_FN = os.path.join(self.args.dir['CD'], \\\n 'f_RL_pose%03d.pkl.gz'%self.args.params['CD']['pose'])\n\n dat = load_pkl_gz(f_RL_FN)\n if (dat is not None):\n (self.f_L, self.stats_RL, self.f_RL, self.B) = dat\n else:\n self._clear_f_RL()\n if readOnly:\n return True\n\n if redo:\n for key in self.f_RL.keys():\n if key != 'grid_MBAR':\n self.f_RL[key] = []\n self.B = {'MMTK_MBAR': []}\n for phase in self.args.params['CD']['phases']:\n for method in ['min_Psi', 'mean_Psi', 'EXP', 'MBAR']:\n self.B[phase + '_' + method] = []\n\n # Make sure all the energies are available\n for c in range(self.data['CD'].cycle):\n if len(self.data['CD'].Es[-1][c].keys()) == 0:\n self.log.tee(\" skipping the binding PMF calculation\")\n return\n if not hasattr(self, 'f_L'):\n self.log.tee(\" skipping the binding PMF calculation\")\n return\n\n start_string = \"\\n>>> Complex free energy calculations, starting at \" + \\\n time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime()) + \"\\n\"\n self.log.recordStart('BPMF')\n\n updated = False\n\n def set_updated_to_True(updated, start_string, quiet=False):\n if (updated is False):\n self.log.set_lock('CD')\n if not quiet:\n self.log.tee(start_string)\n return True\n\n K = len(self.data['CD'].protocol)\n\n # Store stats_RL\n # Internal energies\n self.stats_RL['u_K_sampled'] = \\\n [self._u_kln([self.data['CD'].Es[-1][c]],[self.data['CD'].protocol[-1]]) \\\n for c in range(self.data['CD'].cycle)]\n self.stats_RL['u_KK'] = \\\n [np.sum([self._u_kln([self.data['CD'].Es[k][c]],[self.data['CD'].protocol[k]]) \\\n for k in range(len(self.data['CD'].protocol))],0) \\\n for c in range(self.data['CD'].cycle)]\n\n # Interaction energies\n for c in range(len(self.stats_RL['Psi_grid']), self.data['CD'].cycle):\n self.stats_RL['Psi_grid'].append(\n (self.data['CD'].Es[-1][c]['LJr'] + \\\n self.data['CD'].Es[-1][c]['LJa'] + \\\n self.data['CD'].Es[-1][c]['ELE'])/(R*self.T_TARGET))\n updated = set_updated_to_True(updated,\n start_string,\n quiet=not do_solvation)\n\n # Estimate cycle at which simulation has equilibrated\n eqc_o = self.stats_RL['equilibrated_cycle']\n self.stats_RL['equilibrated_cycle'] = self._get_equilibrated_cycle('CD')\n if self.stats_RL['equilibrated_cycle'] != eqc_o:\n updated = set_updated_to_True(updated,\n start_string,\n quiet=not do_solvation)\n\n # Store rmsd values\n if (self.args.params['CD']['rmsd'] is not False):\n k = len(self.data['CD'].protocol) - 1\n for c in range(self.data['CD'].cycle):\n if not 'rmsd' in self.data['CD'].Es[k][c].keys():\n confs = [conf for conf in self.data['CD'].confs['samples'][k][c]]\n self.data['CD'].Es[k][c]['rmsd'] = self.get_rmsds(confs)\n self.stats_RL['rmsd'] = [(np.hstack([self.data['CD'].Es[k][c]['rmsd']\n if 'rmsd' in self.data['CD'].Es[k][c].keys() else [] \\\n for c in range(self.stats_RL['equilibrated_cycle'][-1], \\\n self.data['CD'].cycle)])) \\\n for k in range(len(self.data['CD'].protocol))]\n\n # Calculate CD free energies that have not already been calculated\n while len(self.f_RL['grid_MBAR']) < self.data['CD'].cycle:\n self.f_RL['grid_MBAR'].append([])\n while len(self.stats_RL['mean_acc']) < self.data['CD'].cycle:\n self.stats_RL['mean_acc'].append([])\n\n for c in range(self.data['CD'].cycle):\n # If solvation free energies are not being calculated,\n # only calculate the grid free energy for the current cycle\n if (not do_solvation) and c < (self.data['CD'].cycle - 1):\n continue\n if self.f_RL['grid_MBAR'][c] != []:\n continue\n\n fromCycle = self.stats_RL['equilibrated_cycle'][c]\n extractCycles = range(fromCycle, c + 1)\n\n # Extract relevant energies\n CD_Es = [Es[fromCycle:c+1] \\\n for Es in self.data['CD'].Es]\n\n # Use MBAR for the grid scaling free energy estimate\n (u_kln, N_k) = self._u_kln(CD_Es, self.data['CD'].protocol)\n MBAR = self.run_MBAR(u_kln, N_k)[0]\n self.f_RL['grid_MBAR'][c] = MBAR\n updated = set_updated_to_True(updated,\n start_string,\n quiet=not do_solvation)\n\n self.log.tee(\" calculated grid scaling free energy of %.2f RT \"%(\\\n self.f_RL['grid_MBAR'][c][-1])+\\\n \"using cycles %d to %d\"%(fromCycle, c))\n\n # Average acceptance probabilities\n mean_acc = np.zeros(K - 1)\n for k in range(0, K - 1):\n (u_kln, N_k) = self._u_kln(CD_Es[k:k + 2],\n self.data['CD'].protocol[k:k + 2])\n N = min(N_k)\n acc = np.exp(-u_kln[0, 1, :N] - u_kln[1, 0, :N] + u_kln[0, 0, :N] +\n u_kln[1, 1, :N])\n mean_acc[k] = np.mean(np.minimum(acc, np.ones(acc.shape)))\n self.stats_RL['mean_acc'][c] = mean_acc\n\n if not do_solvation:\n if updated:\n if not self.log.run_type.startswith('timed'):\n self.log.tee(write_pkl_gz(f_RL_FN, \\\n (self.f_L, self.stats_RL, self.f_RL, self.B)))\n self.log.clear_lock('CD')\n return True\n\n # Make sure postprocessing is complete\n from AlGDock.postprocessing import Postprocessing\n pp_complete = Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run()\n if not pp_complete:\n return False\n self.calc_f_L()\n\n # Make sure all the phase energies are available\n for c in range(self.data['CD'].cycle):\n for phase in self.args.params['CD']['phases']:\n for prefix in ['L', 'RL']:\n if not prefix + phase in self.data['CD'].Es[-1][c].keys():\n self.log.tee(\" postprocessed energies for %s unavailable\" % phase)\n return\n\n # Store stats_RL internal energies for phases\n for phase in self.args.params['CD']['phases']:\n self.stats_RL['u_K_'+phase] = \\\n [self.data['CD'].Es[-1][c]['RL'+phase][:,-1]/(R*self.T_TARGET) \\\n for c in range(self.data['CD'].cycle)]\n\n # Interaction energies\n for phase in self.args.params['CD']['phases']:\n if (not 'Psi_' + phase in self.stats_RL):\n self.stats_RL['Psi_' + phase] = []\n for c in range(len(self.stats_RL['Psi_' + phase]),\n self.data['CD'].cycle):\n self.stats_RL['Psi_'+phase].append(\n (self.data['CD'].Es[-1][c]['RL'+phase][:,-1] - \\\n self.data['CD'].Es[-1][c]['L'+phase][:,-1] - \\\n self.args.original_Es[0][0]['R'+phase][:,-1])/(R*self.T_TARGET))\n\n # Predict native pose\n if self.args.params['CD']['pose'] == -1:\n (self.stats_RL['pose_inds'], self.stats_RL['scores']) = \\\n self._get_pose_prediction()\n\n # BPMF assuming receptor and complex solvation cancel\n self.B['MMTK_MBAR'] = [-self.f_L['BC_MBAR'][-1][-1] + \\\n self.f_RL['grid_MBAR'][c][-1] for c in range(len(self.f_RL['grid_MBAR']))]\n\n # BPMFs\n for phase in self.args.params['CD']['phases']:\n for key in [phase + '_solv']:\n if not key in self.f_RL:\n self.f_RL[key] = []\n for method in ['min_Psi', 'mean_Psi', 'EXP', 'MBAR']:\n if not phase + '_' + method in self.B:\n self.B[phase + '_' + method] = []\n\n # Receptor solvation\n f_R_solv = self.args.original_Es[0][0]['R' + phase][:, -1] / (\n R * self.T_TARGET)\n\n for c in range(len(self.B[phase + '_MBAR']), self.data['CD'].cycle):\n updated = set_updated_to_True(updated, start_string)\n extractCycles = range(self.stats_RL['equilibrated_cycle'][c], c + 1)\n\n # From the full grid to the fully bound complex in phase\n u_RL = np.concatenate([\\\n self.data['CD'].Es[-1][c]['RL'+phase][:,-1]/(R*self.T_TARGET) \\\n for c in extractCycles])\n u_sampled = np.concatenate([\\\n self.stats_RL['u_K_sampled'][c] for c in extractCycles])\n\n du = u_RL - u_sampled\n min_du = min(du)\n weights = np.exp(-du + min_du)\n\n # Filter outliers\n if self.args.params['CD']['pose'] > -1:\n toKeep = du > (np.mean(du) - 3 * np.std(du))\n du = du[toKeep]\n weights[~toKeep] = 0.\n\n weights = weights / sum(weights)\n\n # Exponential average\n f_RL_solv = -np.log(np.exp(-du + min_du).mean()) + min_du - f_R_solv\n\n # Interaction energies\n Psi = np.concatenate([self.stats_RL['Psi_'+phase][c] \\\n for c in extractCycles])\n min_Psi = min(Psi)\n max_Psi = max(Psi)\n\n # Complex solvation\n self.f_RL[phase + '_solv'].append(f_RL_solv)\n\n # Various BPMF estimates\n self.B[phase + '_min_Psi'].append(min_Psi)\n self.B[phase + '_mean_Psi'].append(np.sum(weights * Psi))\n self.B[phase+'_EXP'].append(\\\n np.log(sum(weights*np.exp(Psi-max_Psi))) + max_Psi)\n\n self.B[phase+'_MBAR'].append(\\\n - self.f_L[phase+'_solv'][-1] - self.f_L['BC_MBAR'][-1][-1] \\\n + self.f_RL['grid_MBAR'][-1][-1] + f_RL_solv)\n\n self.log.tee(\" calculated %s binding PMF of %.5g RT with cycles %d to %d\"%(\\\n phase, self.B[phase+'_MBAR'][-1], \\\n self.stats_RL['equilibrated_cycle'][c], c))\n\n if updated:\n self.log.tee(\n write_pkl_gz(f_RL_FN, (self.f_L, self.stats_RL, self.f_RL, self.B)))\n self.log.tee(\"\\nElapsed time for binding PMF estimation: \" + \\\n HMStime(self.log.timeSince('BPMF')))\n self.log.clear_lock('CD')", "def guess_baudrate_divisor(klass, config):\n\t\t# build a list of all the millisecond time values to check\n\t\ttimes_tuples_keys = ['header', 'three', 'two', 'one', 'zero',\n\t\t 'foot', 'repeat', 'pre', 'post']\n\t\ttimes_keys = ['ptrail', 'phead', 'gap', 'repeat_gap']\n\t\ttimes = []\n\t\ttimes.append(1000000)\t# can't have a baudrate of less than 1\n\t\tfor k in times_tuples_keys:\n\t\t\tif k in config:\n\t\t\t\ttimes.append(config[k][0])\n\t\t\t\ttimes.append(config[k][1])\n\t\tfor k in times_keys:\n\t\t\tif k in config:\n\t\t\t\ttimes.append(config[k])\n\t\t# try finding a common factor among them all\n\t\ttotal_factor = 1\n\t\tfactored = True\n\t\twhile factored:\n\t\t\tfactored = False\n\t\t\tfactor_errors = {}\n\t\t\tfor factor in [2,3,5,7,11,17,19,23]:\n\t\t\t\tnew_factor = 1.0 * total_factor * factor\n\t\t\t\tint_times = [int(t / new_factor) for t in times]\n\t\t\t\trounded_times = [t * new_factor for t in int_times]\n\t\t\t\terrors = [abs(o-n)*1.0/o for o,n in zip(times, rounded_times)]\n\t\t\t\tfactor_errors[factor] = max(errors)\n\t\t\t# figure out which factor had the least error\n\t\t\tnew_factor = None\n\t\t\tmin_error = 0.05\n\t\t\tfor f,e in factor_errors.items():\n\t\t\t\tif e < min_error:\n\t\t\t\t\tmin_error = e\n\t\t\t\t\tnew_factor = total_factor * f\n\t\t\tif new_factor:\n\t\t\t\tfactored = True\n\t\t\t\ttotal_factor = new_factor\n\t\treturn total_factor", "def divide(mother, AgEpitope, tnow, mut_list, RNs):\n dlist = []\n # get new sequences, additional mutation counts and block status\n # for the daughters; mutations may happen during division ONLY if\n # the cell's family has been in the GC for long enough to have enough AID\n if ((tnow - mother.AIDstart) >= cf.tAID): # mutations can happen\n seq1, mutcount1, block1 = mutate_seq(mother.sequence[:],\n mother.block, RNs)\n seq2, mutcount2, block2 = mutate_seq(mother.sequence[:],\n mother.block, RNs)\n else: # mutational programme is not switched on yet (daughter=mother)\n seq1, mutcount1, block1 = mother.sequence[:], 0, mother.block\n seq2, mutcount2, block2 = mother.sequence[:], 0, mother.block\n\n num_muts = 0\n num_ben = 0\n # make new Bcell objects if sequences are okay\n if seq1 is not None:\n # if cell is blocked, affinity <= affinity0\n if not block1:\n Emax = E_best(seq1, AgEpitope)\n else:\n Emax = min(E_best(mother.sequence0, AgEpitope),\n E_best(seq1, AgEpitope))\n daughter1 = Bcell(sequence=seq1, sequence0=mother.sequence0[:],\n affinity=Emax, affinity0=mother.affinity0,\n origin=mother.origin,\n mutations=mother.mutations + mutcount1,\n family=mother.family, birthtime=mother.birthtime,\n GCentrytime=tnow,\n AIDstart=mother.AIDstart, block=block1)\n dlist.append(daughter1)\n # mutation counting\n num_muts += mutcount1\n if Emax > mother.affinity:\n num_ben += 1\n\n if seq2 is not None:\n # if cell is blocked, affinity <= affinity0\n if not block2:\n Emax = E_best(seq2, AgEpitope)\n else:\n Emax = min(E_best(mother.sequence0, AgEpitope),\n E_best(seq2, AgEpitope))\n daughter2 = Bcell(sequence=seq2, sequence0=mother.sequence0[:],\n affinity=Emax, affinity0=mother.affinity0,\n origin=mother.origin,\n mutations=mother.mutations + mutcount2,\n family=mother.family, birthtime=mother.birthtime,\n GCentrytime=tnow,\n AIDstart=mother.AIDstart, block=block2)\n dlist.append(daughter2)\n # mutation counting\n num_muts += mutcount2\n if Emax > mother.affinity:\n num_ben += 1\n\n mut_list.append((tnow, mother.family, num_muts, num_ben))\n del mother\n return dlist, mut_list", "def downscale(n, ldd, stream_thres, conv_factor, logger, ch):\n logger.info(str(\"Processing volume_t.\" + \"%03.f\" % (n+1)))\n volMapFile = os.path.join(downscaleLoc,str(\"volume_t.\" + \"%03.f\") % (n+1))\n volume_target = readmap(volMapFile)\n stream = streamorder(ldd) # make a stream order map\n # make a river-map, rivers are streams with strahler order < the largest order - a threshold\n # rivers = ifthenelse(scalar(stream) < mapmaximum(scalar(stream)) - stream_thres,boolean(0), boolean(stream))\n rivers = ifthenelse(scalar(stream) < stream_thres,boolean(0), boolean(stream))\n report(rivers,os.path.join(downscaleLoc,'rivers.map'))\n # initialize loop\n floodedLand = volume_target*0\n count = 0\n floodHeightInactiveCells = volume_target*0\n # now iterate in a loop, 15 meters is assumed to be the largest inundation level possible. Increase by steps of 0.3\n # check volume of cells taken into consideration\n volInRiver = ifthenelse(rivers==1,volume_target,scalar(0))\n volInLargeCells = areamaximum(volInRiver,ordinal(uniqueid_target))\n for level in arange(0.0,30,0.1):\n logger.debug('Processing with inundation depth = ' + str(level))\n\n \"\"\"\n Below, a short explanation of the maps, generated in this PCRaster routine is given. The principle idea is to impose a certain water level on river cells\n an check where the backwater of this imposed height may go to upstream through use of the local drain directions and elevation map\n The routine also checks where the imposed water in each cell comes from (i.e. from which 0.5 degree cell).\n In the end, the total volume of backwater from each 0.5 deg. cell is computed and compared to PCRGLOB volumes.\n If the imposed volume exceeds the PCRGLOB volume, the 0.5 deg. cell is assumed to be 'depleted' and the river cells are excluded from\n the river network in further processing steps. In the next step, a slightly higher level is imposed and the volume check is repeated.\n Hence, more downstream cells may impose backwater on the target cells under consideration in later steps.\n In the end of the routine, all volumes of each pcrglob cell should be accounted for in the downscaled map.\n\n floodInRiver: flood level, with resp. to MSL imposed on the river network map\n floodInRiverUpstream: the flood level of floodInRiver, imposed on the upstream area of each river pixel\n idInRiver: id of te 0.5 deg. cell, imposed on the river network map\n idInRiverUpstream: id imposed on the upstream area of each river cell.\n volInRiver: the volume of flood water in each 0.5 deg. pixel, imposed on the river network\n volInRiverUpstream: flood water volume, imposed on the upstream area of each river pixel\n areaInRiver: cell size, imposed on river network map\n areaInRiverUpstream: total surface area of areas with the same idInRiverUpstream value\n floodedLandTemp: The water level in areas, which would occur if the current 'level' was imposed on the river network\n floodedLandAv: The flooded water level, averaged over the idInRiverUpstream areas\n floodedLandTotal: The total volume of flood water in each contiguous area of idInRiverUpstream\n floodedLand: A volume comparison is made between floodedLandTotal and volInRiverUpstream.\n If floodedLandTotal is smaller, then the amount of imposed water will be smaller then the\n volume, computed by PCRGLOB in the 0.5 degree area. The inundation height in the cell will be updated in floodedLand.\n If the volume is exceeded, the cell will not be updated and the river cells in this area will be removed.\n Hence, backwater from more downstream cells can still impact on the cell under consideration.\n\n TO-DO: als een cel inactief wordt, dan gaat een benedenstroomse cel ineens heel veel water dumpen op deze plaatsen met als gevolg, mogelijk ernstige overschrijding van het volume uit die cel.\n \"\"\"\n floodInRiver = ordinal((ifthenelse(rivers==1,scalar(level)+dem,scalar(0)))*100)\n idInRiver = ordinal(ifthenelse(rivers==1,uniqueid_target,scalar(0)))\n volInRiver = ifthenelse(rivers==1,volume_target,scalar(0))\n areaInRiver = ifthenelse(rivers==1,surf,scalar(0))\n floodInRiverUpstream = subcatchment(ldd,floodInRiver)\n idInRiverUpstream = subcatchment(ldd,idInRiver)\n if level > 0:\n changedSourceCells = ifthenelse(idInRiverOld != idInRiverUpstream, boolean(1),boolean(0)) # if a different 0.5 deg. area is the source of floods\n floodHeightInactiveCells = ifthenelse(changedSourceCells,floodedLand,floodHeightInactiveCells)\n volInRiverUpstream = areamaximum(volInRiver,idInRiverUpstream)\n areaInRiverUpstream = areatotal(areamaximum(areaInRiver,idInRiverUpstream),idInRiverUpstream) # compute total catchment area of Id cell\n floodedLandTemp = min(max(scalar(floodInRiverUpstream)/100-dem,0),level)\n floodedLandTempAv = areaaverage(max(floodedLandTemp - floodHeightInactiveCells, 0),idInRiverUpstream)\n floodedLandTotal = floodedLandTempAv*areaInRiverUpstream\n # check which cells have a changed source area of .5 degrees and subtract the volume there\n floodedLand = ifthenelse(floodedLandTotal < volInRiverUpstream, max(scalar(floodedLandTemp),scalar(floodedLand)), scalar(floodedLand))# hieronder uitrekenen of volume al meer is dan eerder of niet.\n # update relevant river streams (exclude the ones that are already saturated)\n rivers = ifthenelse(floodedLandTotal < volume_target, rivers, boolean(0))\n idInRiverOld = idInRiverUpstream\n\n vol_pcrglob = pcr2numpy(volInLargeCells,0)/conv_factor\n vol_pcr = vol_pcrglob.sum()\n volmodelled = pcr2numpy(floodedLand*surf,0)\n vol_mod = volmodelled.sum()\n #\n logger.info(str('volume_t.' + '%03.f' + ': Volume PCRGLOB: ' + '%03.3f' + 'km3, Volume downscaling: ' + '%03.3f' + 'km3' + ', perc. diff: ' + '%2.2f' + '%%') % (n+1,vol_pcr/1e9, vol_mod/1e9, (vol_mod-vol_pcr)/vol_pcr*100))\n return logger, ch, floodedLand\n # end of function part", "def after_make_fvcom_rivers_forcing(msg, config, checklist):\n next_workers = {\n \"crash\": [],\n \"failure x2 nowcast\": [],\n \"failure r12 nowcast\": [],\n \"success x2 nowcast\": [],\n \"success r12 nowcast\": [],\n }\n return []", "def doCalculation(self, E1, E2, muL, muR, T, pot, C, TCalc, Density, E0, L):\n NEcut = len(E1) #we determine the number of single-particle states that we use\n VG=np.diag(pot)\n E= int(0.5*np.size(VG))\n V = VG[0:E] #since the potential of both barriers is symmetric and we only tunnel through one barrier. Therefore we only use one half of the potential.\n dx= L/(np.size(pot))\n\n #Following prints are for debugging purposes:\n #print(\"---------------------------------------------------------------------\")\n #print(\"---------------------------------------------------------------------\")\n #print(\"Hier beginnt die Ausgabe von Rates:\")\n #print(\"---------------------------------------------------------------------\")\n #print(\"V:\", V)\n #print(\"E1:\", E1)\n #print(\"E2:\", E2)\n #print(\"C:\", C)\n\n kB=0.08629 #Boltzmann constant in meV/K\n \n \n def fermi(E,mu,T):\n \"\"\"This fermi-function tells us with which likelyhood a state with an E is occupied on the lead.\n E(float): energy difference between the initial and the final state that the tunneling electron has to carry.\n mu(float): chemical potential of either drain(muR) or source(muL).\n T(float): temperature.\n \"\"\"\n if (E-mu)/T > 600:\n f=0\n\t\t\t\t\n else:\n f=1/(math.exp((E-mu)/(kB*T) )+1)\n return(f)\n \n\n\t#This function is called by the Gamma_ij-equations and includes the transmission-coefficient for each tunnelling-event\n #and the density of state function of the source and drain. \n def Gamma(Ea,Eb,V):\n \"\"\":math:`\\\\Gamma` includes the transmission coefficient and DOS: :math:`\\Gamma = | t |^2 * DOS`\n\n Ea(float): energy of initial state\n Eb(float): energy of final state\n V(np.array): barrier potential\n \"\"\"\n #print(Ea)\n #print(V)\n return (np.absolute(TCalc.calculate_transmission(Ea,V,dx))**2*Density.calculate_DensityofStates(np.absolute(Ea-Eb)))\n \n #These next four functions are used to calculate the transition rates.Each function for a different kind of transition:\n #We distinguish between transitions, in which the number of electrons on the dot changes from one to two(Gamma_12) and reverse(Gamma_21).\n #And between transitions in which the number of electrons on the dot change from zero to one(Gamma_01) and reverse(Gamma_10).\n\n def Gamma_12(Ea,Eb,mu,T):\n \"\"\"Calculates the rate of a transition from a one body state to a two body state.\n\n Ea(float): energy of initial state\n Eb(float): energy of final state\n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n summe=0\n j=0\n Cb=C[np.where(E2==Eb)[0][0]]\n while j< NEcut:\n summe=Cb[np.where(E1==Ea)[0][0]][j]+summe\n j=j+1\n return(Gamma(Ea,Eb,V)*(np.absolute(summe))**2*fermi((Eb-Ea),mu,T))\n\n\n def Gamma_01(Eb,mu,T):\n \"\"\"Calculates the transition rate from the vacuum state to a one-body state.\n\n Eb(float): energy of final state\n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n return(Gamma(E0,Eb,V)*fermi((Eb-E0),mu,T))\n\n def Gamma_21(Ea,Eb,mu,T):\n \"\"\"Calculates the rate of a transition from a two body state to a one body state.\n\n Ea(float): energy of initial state\n Eb(float): energy of final state\n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n summe=0\n nu=0\n Ca=C[np.where(E2==Ea)[0][0]]\n while nu < NEcut:\n summe=summe+Ca[np.where(E1==Eb)[0][0]][nu]\n nu=nu+1\n return(Gamma(Ea,Eb,V)*(np.absolute(summe))**2*(1-fermi((Ea-Eb),mu,T)))\n\n def Gamma_10(Ea,mu,T):\n \"\"\"Calculates the rate of a transition from a one body state to the vacuum state.\n\n Ea(float): energy of initial state \n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n return(Gamma(Ea,E0,V)*(1-fermi((Ea-E0),mu,T)))\n\n #creating the output matrices that later contain all the transition rates through either\n #the left or the right barrier\n Gamma_R=np.zeros((1+np.size(E1)+np.size(E2),1+np.size(E1)+np.size(E2)))\n Gamma_L=np.zeros((1+np.size(E1)+np.size(E2),1+np.size(E1)+np.size(E2)))\n\n #using a loop to fill the output matrices with transition rates.\n i_=0\n for i in E1:\n j_=0\n for j in E2:\n Gamma_L[i_+1][j_+1+np.size(E1)]=Gamma_12(i,j,muL,T)\n Gamma_L[j_+1+np.size(E1)][i_+1]=Gamma_21(j,i,muL,T)\n Gamma_R[i_+1][j_+1+np.size(E1)]=Gamma_12(i,j,muR,T)\n Gamma_R[j_+1+np.size(E1)][i_+1]=Gamma_21(j,i,muR,T)\n j_=j_+1\n Gamma_L[0][i_+1]=Gamma_10(i,muL,T)\n Gamma_R[0][i_+1]=Gamma_10(i,muR,T)\n Gamma_L[i_+1][0]=Gamma_01(i,muL,T)\n Gamma_R[i_+1][0]=Gamma_01(i,muR,T)\n i_=1+i_\n\n #print(\"Gamma_L und Gamma_R:\")\n #print(Gamma_L,Gamma_R)\n #print(\"-----------------------------------------------------------------------\")\n #print(\"---------------------------------------------------------------------\")\n return(Gamma_L,Gamma_R)", "def select_best_chanels():\r\n \r\n \r\n all_paths = [['data_bci\\\\row_data\\\\subject1\\\\'], ['data_bci\\\\row_data\\\\subject2\\\\'],['data_bci\\\\row_data\\\\subject3\\\\']]\r\n\r\n train_subjects = ['01']\r\n test_subject = '02'\r\n freq = 512\r\n\r\n cutoff_beggining = 0\r\n columns_to_read = ['Fp1', 'AF3' ,'F7', 'F3', 'FC1', 'FC5', 'T7', 'C3', 'CP1', 'CP5',\r\n 'P7', 'P3', 'Pz', 'PO3', 'O1', 'Oz', 'O2', 'PO4', 'P4', 'P8', 'CP6',\r\n 'CP2', 'C4', 'T8', 'FC6', 'FC2', 'F4', 'F8', 'AF4', 'Fp2', 'Fz', 'Cz','class']\r\n seq_len = 0\r\n cut_step = 0\r\n num_perseg = freq\r\n num_overlap = int(num_perseg/2)\r\n min_freq=8\r\n max_freq=45\r\n \r\n chanels_rank = rank_chanels()\r\n \r\n result = []\r\n for i in range(1, len(chanels_rank)):\r\n intermidiate_result = []\r\n for path in all_paths:\r\n train_full_data, train_full_data_filtered, train_full_anots, test_full_data, test_full_filtered, test_full_annoations = read_filter(path, train_subjects,test_subject, columns_to_read, cutoff_beggining, seq_len, cut_step)\r\n\r\n train_psd_signals = eval_psd_not_modulated(train_full_data, num_perseg, num_overlap, freq, min_freq, max_freq)\r\n test_psd_signals = eval_psd_not_modulated(test_full_data, num_perseg, num_overlap, freq, min_freq, max_freq) \r\n\r\n train_psd_signals = flatten_data(train_psd_signals[:,:,chanels_rank[:i]])\r\n test_psd_signals = flatten_data(test_psd_signals[:,:,chanels_rank[:i]])\r\n \r\n acc = evalute_subset(train_psd_signals, test_psd_signals, train_full_anots, test_full_annoations)\r\n intermidiate_result.append(acc)\r\n \r\n result.append(intermidiate_result)\r\n #mean_subject_acc = np.array([sum(humans_acc)/len(humans_acc) for humans_acc in result])\r\n #best_idx = np.argmax(mean_subject_acc)\r\n\r\n return result, chanels_rank", "def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)", "def drive(self, lrtList):\n self.busy = True\n self.myMotor.enable()\n\n sleepTime = 0\n self.lmp = 0\n self.rmp = 0\n\n for lrt in lrtList:\n\n if lrt.left > 127: # left is going in reverse\n self.lmp = round(254 * (((lrt.left - 128))/127))\n self.lmd = self.BWD\n \n else: # left is going forward\n self.lmp = round(254 * (((lrt.left))/127))\n self.lmd = self.FWD\n \n if lrt.right > 127: # right is going in reverse\n self.rmp = round(254 * (((lrt.right - 128))/127))\n self.rmd = self.BWD\n \n else: # right is going forward\n self.rmp = round(254 * (((lrt.right))/127))\n self.rmd = self.FWD\n \n sleepTime = (lrt.time * 10) / 1000\n self.myMotor.set_drive(self.L_MTR, self.lmd, self.lmp)\n self.myMotor.set_drive(self.R_MTR, self.rmd, self.rmp)\n time.sleep(sleepTime)\n \n self.myMotor.set_drive(0,0,0)\n self.myMotor.set_drive(1,0,0)\n self.myMotor.disable()\n self.busy = False", "def sub_division(width: float, minimum_division: float, stretch_factor: float) -> list:\n\n sum_x = 0\n next_ = minimum_division\n new_grid = []\n max_dx = 20/100\n x = width/2\n\n while sum_x < x:\n remaining = x - sum_x\n\n if next_ > max_dx:\n n = np.ceil(remaining/max_dx)\n\n if n == 0:\n new_grid.append(remaining)\n\n next_ = remaining/n\n\n for _ in range(0, int(n)):\n new_grid.append(next_)\n sum_x += next_\n\n remaining = x - sum_x\n\n if next_ < remaining:\n new_grid.append(next_)\n sum_x += next_\n else:\n remaining += new_grid[-1]\n new_grid[-1] = remaining/2\n new_grid.append(remaining/2)\n sum_x = x\n\n next_ = next_ * stretch_factor\n\n x1 = new_grid[::-1]\n x2 = new_grid+x1\n\n return x2", "def fitness_proportional_selection(self) -> List[Character]:\n print(' - selection')\n st = time.time()\n\n # open pool the the amount of cpu cores\n pool = mp.Pool(mp.cpu_count())\n\n # create a character at each position of the characters list\n new_list = pool.map(create_character, [i for i in self.characters])\n\n # close pool and release the cores\n pool.close()\n\n self.characters = new_list\n self.get_diversity()\n self.calc_sum_fitness()\n self.calc_average_fitness()\n self.get_best_fitness()\n self.get_worst_fitness()\n\n # create the wheel as dict with the selection chance and the character\n wheel: Dict[float, Character] = {}\n\n # the new generation\n new_generation: List[Character] = []\n fit_c_generation: float = 0\n new_wheel = {}\n \"\"\"get the chance of all characters to be selected\n \n \"\"\"\n for c in self.characters:\n p_chance = c.fitness / self.sum_fitness\n chance = p_chance * self.size\n s = str(chance)\n s = s.split('.')\n r = int(s[0])\n f_c = '0.' + s[1]\n f_c = float(f_c)\n fit_c_generation += f_c\n if r <= 0:\n wheel[f_c] = c\n while r > 0:\n new_character = copy.deepcopy(c)\n new_generation.append(new_character)\n r -= 1\n\n for k, v in wheel.items():\n new_key = (k / fit_c_generation) * self.size\n new_wheel[new_key] = v\n\n while len(new_generation) < self.size:\n for k in sorted(new_wheel, reverse=True):\n chance = random.uniform(0, fit_c_generation)\n if chance <= k:\n new_character = copy.deepcopy(new_wheel[k])\n new_generation.append(new_character)\n if len(new_generation) >= self.size:\n break\n continue\n e = time.time()\n print(\" - time: \", e - st)\n\n return new_generation" ]
[ "0.611805", "0.5760464", "0.51817936", "0.5052647", "0.500325", "0.49627137", "0.49470758", "0.48633406", "0.4851506", "0.48268813", "0.48119494", "0.47923657", "0.47850725", "0.4731373", "0.46988776", "0.46939072", "0.4659001", "0.46333724", "0.4608515", "0.46050254", "0.4599794", "0.45920628", "0.45874822", "0.4585595", "0.4581821", "0.45705387", "0.45660844", "0.45593894", "0.45549697", "0.45294163" ]
0.7092034
0
Monkeypatches graphqlcore library to trace graphql calls execution.
def patch(): logger.debug('Patching `graphql.graphql` function.') wrapt.wrap_function_wrapper(graphql, 'graphql', _traced_graphql)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _traced_graphql(func, _, args, kwargs):\n\n schema = args[0]\n\n # get the query as a string\n if len(args) > 1:\n request_string = args[1]\n else:\n request_string = kwargs.get('request_string')\n\n if isinstance(request_string, Document):\n query = request_string.loc.source.body\n else:\n query = request_string\n\n # allow schemas their own tracer with fall-back to the global\n tracer = getattr(schema, 'datadog_tracer', ddtrace.tracer)\n\n if not tracer.enabled:\n return func(*args, **kwargs)\n\n with tracer.trace(\n RES,\n span_type=TYPE,\n service=SERVICE,\n resource=_resolve_query_res(query)\n ) as span:\n span.set_tag(QUERY, query)\n result = None\n try:\n result = func(*args, **kwargs)\n return result\n finally:\n # `span.error` must be integer\n span.error = int(result is None or result.invalid)\n if result is not None:\n span.set_tag(ERRORS, result.errors)\n span.set_metric(INVALID, int(result.invalid))", "def _trace(self):\n self.__aceQLHttpApi.trace()", "def quiet_graphql():\n def null_excepthook(cls, exc, tb):\n pass\n global logger, null_handler, saved_excepthook\n saved_excepthook = sys.excepthook\n sys.excepthook = null_excepthook\n logger = logging.getLogger('graphql.execution.executor')\n null_handler = None\n if not logger.hasHandlers():\n null_handler = logging.NullHandler()\n logger.addHandler(null_handler)", "def trace(self, s):\n self.__aceQLHttpApi.trace(s)", "def trace(self, *args, **kwargs): # real signature unknown\n pass", "def trace(self, trace=...):\n ...", "def _run_query(self):", "def set_trace():\n Bdb().set_trace()", "def wrapGraph(self, query) :\n\t\tif self.graph :\n\t\t\treturn \" GRAPH <%s> { %s } \" % (self.graph, query)\n\t\telse :\n\t\t\treturn query", "def trace_pipeline(pipe):\n _patch_multi_exec_execute(pipe)", "def rpc(self) -> global___Rpc:", "def make_query(self):", "def query(gp, *args):\n\n def decorator(f):\n add_query_pattern(gp, f, args)\n\n return decorator", "def gdb(*args):\n _gdb_python_call_gen('gdb', *args)()", "def dbtrace_ui():\n\n pass", "def log_sql(db):\n import types\n\n execute_sql = db.execute_sql\n\n def _execute_sql(self, *args, **kwargs):\n _logger.debug(args)\n _logger.debug(kwargs)\n return execute_sql(*args, **kwargs)\n\n db.execute_sql = types.MethodType(_execute_sql, db)", "def _tree_query_parallel_helper(tree, *args, **kwargs):\n return tree.query(*args, **kwargs)", "def prolog(self, *args):\n return _ida_hexrays.Hexrays_Hooks_prolog(self, *args)", "def execute_query(self, *args, **kwargs):", "def unquiet_graphql():\n global null_handler\n sys.excepthook = saved_excepthook\n if null_handler:\n logger.removeHandler(null_handler)\n null_handler = None", "def _inject_trace_middleware_to_args(trace_middleware, args, kwargs):\n # type: (Callable, Tuple, Dict) -> Tuple[Tuple, Dict]\n middlewares_arg = 8\n if _graphql_version >= (3, 2):\n # middleware is the 10th argument graphql.execute(..) version 3.2+\n middlewares_arg = 9\n\n # get middlewares from args or kwargs\n try:\n middlewares = get_argument_value(args, kwargs, middlewares_arg, \"middleware\") or []\n if isinstance(middlewares, MiddlewareManager):\n # First we must get the middlewares iterable from the MiddlewareManager then append\n # trace_middleware. For the trace_middleware to be called a new MiddlewareManager will\n # need to initialized. This is handled in graphql.execute():\n # https://github.com/graphql-python/graphql-core/blob/v3.2.1/src/graphql/execution/execute.py#L254\n middlewares = middlewares.middlewares # type: Iterable\n except ArgumentError:\n middlewares = []\n\n # Note - graphql middlewares are called in reverse order\n # add trace_middleware to the end of the list to wrap the execution of resolver and all middlewares\n middlewares = list(middlewares) + [trace_middleware]\n\n # update args and kwargs to contain trace_middleware\n args, kwargs = set_argument_value(args, kwargs, middlewares_arg, \"middleware\", middlewares)\n return args, kwargs", "def _resolver_middleware(next_middleware, root, info, **args):\n pin = Pin.get_from(graphql)\n if not pin or not pin.enabled():\n return next_middleware(root, info, **args)\n\n with pin.tracer.trace(\n name=\"graphql.resolve\",\n resource=info.field_name,\n span_type=SpanTypes.GRAPHQL,\n ) as span:\n span.set_tag_str(COMPONENT, config.graphql.integration_name)\n\n return next_middleware(root, info, **args)", "def dbtrace(*args, filter: Union[AnyStr, bool]=\"\", info: bool=True, keyword: Union[AnyStr,\n List[AnyStr], bool]=\"\", mark: bool=True, output: Union[AnyStr, bool]=\"\", timed:\n bool=True, title: AnyStr=\"\", verbose: bool=True, q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass", "def trace(func, api_name=''):\n if hasattr(func, 'api'):\n api_name = func.api\n def trace_func(self, *args, **kwargs):\n log.debug('%s: %s' % (api_name, args))\n return func(self, *args, **kwargs)\n trace_func.api = api_name\n return trace_func", "def query_graphql(raw_query, endpoint):\n query = \" \".join(shlex.split(raw_query, posix=False))\n r = requests.get(endpoint, params={\"query\": query})\n if r.status_code == 200:\n return r.json()\n elif r.status_code == 400:\n response = r.json()\n assert \"errors\" in response\n raise GraphQLError(\"\".join([e[\"message\"] for e in response[\"errors\"]]))\n else:\n raise requests.exceptions.RequestException(\n f\"HTTP Status: {r.status_code}, Response Body: {r.text}\"\n )", "def settrace(function): # real signature unknown; restored from __doc__\n pass", "def trace(config: Optional[Config] = None) -> ContextManager[None]:\n if config is None:\n config = get_default_config()\n return trace_calls(\n logger=config.trace_logger(),\n code_filter=config.code_filter(),\n sample_rate=config.sample_rate(),\n max_typed_dict_size=config.max_typed_dict_size(),\n )", "def set_trace(stop=True, **kwargs):\n Qdb(**kwargs).set_trace(sys._getframe().f_back, stop=stop)\n # We use f_back so that we start in the caller of this function.", "def register_sql_proceedures(self):", "def get_graph(self, engine, args):\n raise NotImplementedError(\"Override in subclass\")" ]
[ "0.6343182", "0.56782556", "0.5526918", "0.5416044", "0.5391245", "0.52725345", "0.52248937", "0.49928382", "0.49856138", "0.48868668", "0.4881803", "0.4879053", "0.48706585", "0.4842129", "0.48220646", "0.47949135", "0.47835508", "0.4759621", "0.47522473", "0.4750627", "0.4726433", "0.47222862", "0.46826136", "0.46486133", "0.46461233", "0.46448192", "0.46420252", "0.4639513", "0.4629322", "0.4625359" ]
0.6942677
0
Wrapper for graphql.graphql function.
def _traced_graphql(func, _, args, kwargs): schema = args[0] # get the query as a string if len(args) > 1: request_string = args[1] else: request_string = kwargs.get('request_string') if isinstance(request_string, Document): query = request_string.loc.source.body else: query = request_string # allow schemas their own tracer with fall-back to the global tracer = getattr(schema, 'datadog_tracer', ddtrace.tracer) if not tracer.enabled: return func(*args, **kwargs) with tracer.trace( RES, span_type=TYPE, service=SERVICE, resource=_resolve_query_res(query) ) as span: span.set_tag(QUERY, query) result = None try: result = func(*args, **kwargs) return result finally: # `span.error` must be integer span.error = int(result is None or result.invalid) if result is not None: span.set_tag(ERRORS, result.errors) span.set_metric(INVALID, int(result.invalid))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_graphql(raw_query, endpoint):\n query = \" \".join(shlex.split(raw_query, posix=False))\n r = requests.get(endpoint, params={\"query\": query})\n if r.status_code == 200:\n return r.json()\n elif r.status_code == 400:\n response = r.json()\n assert \"errors\" in response\n raise GraphQLError(\"\".join([e[\"message\"] for e in response[\"errors\"]]))\n else:\n raise requests.exceptions.RequestException(\n f\"HTTP Status: {r.status_code}, Response Body: {r.text}\"\n )", "def run_graphql(query: str, token: str):\n response = requests.post(\n 'https://api.github.com/graphql',\n json={'query': query},\n headers={'Authorization': 'Bearer ' + token})\n response.raise_for_status()\n return response.json()", "def patch():\n logger.debug('Patching `graphql.graphql` function.')\n wrapt.wrap_function_wrapper(graphql, 'graphql', _traced_graphql)", "def graphiql(request):\n del request\n graphiql_filepath = pathlib.Path(__file__).absolute().parent / \"graphiql.html\"\n with open(graphiql_filepath) as f:\n return django.http.response.HttpResponse(f.read())", "def make_query(graph, ns, request_schema, response_schema):\n @graph.route(\"/v1/foo/get\", Operation.Query, ns)\n @qs(request_schema)\n @response(response_schema)\n def foo_query():\n \"\"\"\n My doc string\n \"\"\"\n request_data = load_query_string_data(request_schema)\n response_data = dict(\n result=True,\n value=request_data[\"required_value\"],\n )\n return dump_response_data(response_schema, response_data, Operation.Query.value.default_code)", "def get_query():\n query = \"\"\"{\n repository(name: \"flux\", owner: \"fluxcd\") {\n forkCount\n issues {\n totalCount\n }\n pullRequests {\n totalCount\n }\n releases {\n totalCount\n }\n stargazers {\n totalCount\n }\n watchers {\n totalCount\n }\n }\n}\n \"\"\"\n return query", "def _send_gql_request(gateway_port):\n mutation = (\n f'mutation {{'\n + '''docs(data: {text: \"abcd\"}) { \n id \n } \n }\n '''\n )\n c = Client(host='localhost', port=gateway_port, protocol='http')\n return c.mutate(mutation=mutation)", "def execute_graphql_request(\n self, request, data, query, variables, operation_name, show_graphiql=False\n ):\n if not query:\n if show_graphiql:\n return None\n raise HttpError(HttpResponseBadRequest(\"Must provide query string.\"))\n\n try:\n backend = self.get_backend(request)\n document = backend.document_from_string(self.schema, query)\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)\n\n if request.method.lower() == \"get\":\n operation_type = document.get_operation_type(operation_name)\n if operation_type and operation_type != \"query\":\n if show_graphiql:\n return None\n\n raise HttpError(\n HttpResponseNotAllowed(\n [\"POST\"],\n \"Can only perform a {} operation from a POST request.\".format(\n operation_type\n ),\n )\n )\n\n # Check request weight\n try:\n if self.list_limit or self.weight_limit or self.depth_limit:\n if document:\n fragments = get_fragments(document.document_ast.definitions)\n definitions_total_weight = 0\n for definition in document.document_ast.definitions:\n if not isinstance(definition, OperationDefinition):\n continue\n\n if operation_name and definition.name != operation_name:\n continue\n\n def_weight = self.calculate_action_weight(\n definition.selection_set,\n fragments)\n definitions_total_weight += def_weight\n if self.weight_limit and definitions_total_weight > self.weight_limit:\n raise QueryWeightExceeded(\"Your query exceeds the maximum query weight allowed\")\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)\n\n try:\n extra_options = {}\n if self.executor:\n # We only include it optionally since\n # executor is not a valid argument in all backends\n extra_options[\"executor\"] = self.executor\n\n return document.execute(\n root_value=self.get_root_value(request),\n variable_values=variables,\n operation_name=operation_name,\n context_value=self.get_context(request),\n middleware=self.get_middleware(request),\n **extra_options\n )\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)", "def query(self, query: str, variables: Optional[Any] = None) -> Dict:\n resp = self.post(\"graphql\", json={\"query\": query, \"variables\": variables})\n if \"errors\" in resp:\n raise Exception(resp[\"errors\"][0][\"message\"])\n return resp[\"data\"]", "def run_graphql_query(\n self,\n query,\n headers,\n status_code=200):\n request = requests.post(PH_API_URL, data=json.dumps(query), headers=headers)\n if request.status_code == status_code:\n return request.json()\n else:\n raise Exception(\n \"Unexpected status code returned: {}\".format(\n request.status_code)\n )", "def graphql_query(self, end_cursor, user_id) -> tuple[dict, list, tuple[str, bool]]:\n query_params = {\n 'query_hash': '8c2a529969ee035a5063f2fc8602a0fd',\n 'variables': json.dumps({\"id\":user_id,\"first\":8,\"after\":end_cursor})\n }\n\n url_post = 'https://www.instagram.com/graphql/query/'\n\n _, res_post = self.request_safe(url_post, params_request_safe=query_params)\n \n try:\n res_json = res_post.json()\n except json.JSONDecodeError:\n soup = BeautifulSoup(res_post.text, 'html.parser')\n elm = soup.find('div', attrs={'class':'error-container'})\n if not elm is None and \"Error\" in elm.text:\n raise Exception(elm.text)\n if res_post.json().get('message') == 'rate limited':\n raise Exception(res_post.json().get('message'))\n \n has_next_page = res_post.json()['data']['user']['edge_owner_to_timeline_media']['page_info']['has_next_page']\n end_cursor = res_post.json()['data']['user']['edge_owner_to_timeline_media']['page_info']['end_cursor']\n edges = res_post.json()['data']['user']['edge_owner_to_timeline_media']['edges']\n return (res_post.json(), edges, (end_cursor, has_next_page))", "def make_query(self):", "def query(self, query):", "def execute_graphql_request(\n schema, # type: GraphQLSchema\n params, # type: RequestParams\n allow_only_query=False, # type: bool\n backend=None, # type: GraphQLBackend\n **kwargs # type: Any\n):\n if not params.query:\n raise HttpQueryError(400, \"Must provide query string.\")\n\n try:\n if not backend:\n backend = get_default_backend()\n document = backend.document_from_string(schema, params.query)\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)\n\n if allow_only_query:\n operation_type = document.get_operation_type(params.operation_name)\n if operation_type and operation_type != \"query\":\n raise HttpQueryError(\n 405,\n \"Can only perform a {} operation from a POST request.\".format(\n operation_type\n ),\n headers={\"Allow\": \"POST\"},\n )\n\n try:\n return document.execute(\n operation_name=params.operation_name, variables=params.variables, **kwargs\n )\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)", "def query(self, query, authorization_required=True):\n url = 'https://{}/api/v1/graphql'.format(self.host)\n headers = {\n 'Content-Type': 'application/json',\n }\n json = {\n 'query': query,\n }\n # Login if not yet done\n if authorization_required:\n if not self.authorized:\n self.login()\n if self.token:\n headers['Authorization'] = 'Bearer {}'.format(self.token)\n\n request = self.session.post(\n url, headers=headers, json=json,\n verify=self.verify)\n return request", "def query(output, query):\n gqlapi = gql.get_api()\n print_output(output, gqlapi.query(query))", "def visit_query(self, query):\n return query", "def wrapGraph(self, query) :\n\t\tif self.graph :\n\t\t\treturn \" GRAPH <%s> { %s } \" % (self.graph, query)\n\t\telse :\n\t\t\treturn query", "def query(self, *, sparql: str) -> Result:\n pass", "def is_graphql_type(graphql_type):\n # Helper function to work around the fact that \"is_type\" is a poorly-named function.\n return is_type(graphql_type)", "def query(self, **kwargs):", "def query_mocker(key, json, body_match=\"query\"):\n payload={}\n if type(json) == list:\n json={'edges': [{'node': item} for item in json]}\n payload[key]=json\n return graphql_request_mocker(payload=payload, body_match=body_match)", "def execute_gql_query(\n self, gql_name: str, **kwargs\n ) -> Union[dict, list, int]:\n self._query_type = \"query\"\n data = self._exec(gql_name, kwargs)\n return data", "def execute_gql_mutation(\n self, gql_name: str, **kwargs\n ) -> Union[dict, list, int]:\n self._query_type = \"mutation\"\n data = self._exec(gql_name, kwargs)\n return data", "def extract_media_gql(data):\n user = data[\"owner\"]\n media_id = \"%s_%s\" % (data[\"id\"], user[\"id\"])\n if \"full_name\" in user:\n # for hashtag user contain {'id': '2041641294'}\n user = extract_user_short(user)\n else:\n user[\"pk\"] = user.pop(\"id\")\n location = data.get(\"location\")\n if location:\n location = {\"pk\": int(location.get(\"id\")), \"name\": location.get(\"name\")}\n media_type = {\"GraphImage\": 1, \"GraphVideo\": 2, \"GraphSidecar\": 8}[data[\"__typename\"]]\n product_type = data.get(\"product_type\", \"\")\n video_url = \"\"\n if media_type == 2:\n video_url = data[\"video_url\"]\n if not product_type:\n product_type = \"feed\"\n shortcode = ''\n if 'shortcode' in data:\n shortcode = data[\"shortcode\"]\n return {\n \"pk\": int(data[\"id\"]),\n \"taken_at\": int(data[\"taken_at_timestamp\"]),\n \"id\": media_id,\n \"media_type\": media_type,\n \"product_type\": product_type,\n \"code\": shortcode,\n \"thumbnail_url\": sorted(\n data.get(\"display_resources\", data.get('thumbnail_resources')), # display_resources - user feed, thumbnail_resources - hashtag feed\n key=lambda o: o[\"config_width\"] * o[\"config_height\"],\n ).pop()[\"src\"],\n \"location\": location,\n \"user\": user,\n \"comment_count\": json_value(data, \"edge_media_to_comment\", \"count\"),\n \"like_count\": json_value(data, \"edge_media_preview_like\", \"count\"),\n \"caption_text\": json_value(\n data, \"edge_media_to_caption\", \"edges\", 0, \"node\", \"text\", default=\"\"\n ),\n \"usertags\": [\n extract_usertag(usertag['node'])\n for usertag in data.get(\"edge_media_to_tagged_user\", {}).get(\"edges\", [])\n ],\n \"video_url\": video_url,\n \"view_count\": int(data.get('video_view_count') or 0),\n \"video_duration\": data.get('video_duration'),\n \"title\": data.get(\"title\") or None,\n \"resources\": [\n extract_resource_gql(edge['node'])\n for edge in data.get('edge_sidecar_to_children', {}).get('edges', [])\n ]\n }", "async def run_query(query):\n async with httpx.AsyncClient(timeout=None) as client:\n response = await client.post(\n BLAZEGRAPH_URL,\n headers=BLAZEGRAPH_HEADERS,\n data=query,\n )\n assert response.status_code < 300\n return response.json()['results']['bindings']", "def query():\n query = request.json.get('query')\n variables = request.json.get('variables') # Todo: add handling variables\n logger.debug('Query: %s', request.json)\n result = schema.execute(query)\n result_hash = format_result(result)\n return result_hash", "def _make_query(self):\r\n raise NotImplementedError()", "def graphql_request_mocker(payload=None, body_match=None):\n @functools.wraps(mock_graphql_request)\n def wrapper(mocker, status_code=None, error=None, attempts=None):\n return mock_graphql_request(mocker, payload=payload,\n body_match=body_match, error=error, status_code=status_code,\n attempts=attempts)\n return wrapper", "def NaturalQuery(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.66280913", "0.6354022", "0.632226", "0.6310396", "0.58947986", "0.5789386", "0.56969786", "0.5674856", "0.56201303", "0.5613056", "0.5543577", "0.5480393", "0.54736185", "0.5457626", "0.54279554", "0.53645164", "0.53170335", "0.53066283", "0.52594376", "0.5255184", "0.52508664", "0.52161986", "0.52112806", "0.519612", "0.51598674", "0.5158175", "0.5146439", "0.5142576", "0.51242197", "0.5087145" ]
0.68230045
0
Convert from a PGM file to a FITS file.
def convert_to_fits(image_file, clobber=True): img = load_image(image_file) fits.writeto(image_file.replace('pgm', 'fits'), img, clobber=clobber) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_as_fits(self, filename):", "def f2tif(path,is_gray=1): \n# import tifffile\n import tqdm\n print(\"==============================================\")\n print(\"Convert file to tif stack!\")\n pathout = path[:-4]+'_'+str(is_gray)+'.tif' \n video = mp.VideoFileClip(path)\n i=0\n for fr in tqdm.tqdm(video.iter_frames()):\n if is_gray == 1:\n fr= cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY) \n if i == 0:\n tifffile.imwrite(pathout,fr, append=False)\n else:\n tifffile.imwrite(pathout,fr, append=True)\n i += 1\n print(\"==============================================\")\n print(\"TIF convertion Done!\")\n print(\"nFrames=\"+str(i))\n video.reader.close()# To fix handel error problem", "def test_io_import_fmi_pgm_shape():\n root_path = pysteps.rcparams.data_sources[\"fmi\"][\"root_path\"]\n filename = os.path.join(root_path, \"20160928\",\n \"201609281600_fmi.radar.composite.lowest_FIN_SUOMI1.pgm.gz\")\n R, _, _ = pysteps.io.import_fmi_pgm(filename, gzipped=True)\n assert R.shape == (1226, 760)", "def nx2sif(fn, g):\n with open(fn, 'w') as fh:\n for e in g.edges_iter():\n fh.write('%s pp %s\\n' % (e[0], e[1]))", "def read_single_filename(fn, scale=1.0, datatype=\"float\"):\n with open(fn) as f:\n data = np.array(read_pgm(f)).astype('int16')\n if datatype == \"float\":\n data = img_as_float(img_as_int(data))\n return rescale(data, scale)", "def ascii_to_tiff(infile, outfile, refIm):", "def read_pgm(pgm_file):\n\n # First line contains some image meta-info\n p5, width, height, depth = pgm_file.readline().split()\n\n # Ensure we're actually reading a P5 file\n assert p5 == b'P5'\n assert depth == b'65535', \"Only 16-bit PGM files are supported\"\n\n width, height = int(width), int(height)\n\n data = np.fromfile(pgm_file, dtype='<u2', count=width * height)\n\n return data.reshape(height, width).astype(np.uint32)", "def create_pgm_file(\r\n width: int, height: int, file_name: str, comment: str, img: list, greylevel=255\r\n) -> None:\r\n FILE = open(file_name, \"wb\")\r\n\r\n # Defining the PGM Headers\r\n pgm_header = f\"P2\\n#{comment}\\n{str(width)} {str(height)}\\n{str(greylevel)}\\n\"\r\n pgmHeader_byte = bytearray(pgm_header, \"utf-8\")\r\n\r\n # Writing the PGM Headers into the file\r\n FILE.write(pgmHeader_byte)\r\n\r\n # Creating the rows of the data\r\n for row in img:\r\n row = [str(x) for x in row]\r\n FILE.write(bytearray(\" \".join(row) + \"\\n\", \"utf-8\"))\r\n\r\n FILE.close()", "def asc_to_gtif(i_dir):\n\n # Set search for all files with suffix in specified folder\n q = join(i_dir, \"*.asc\")\n # List of all TIF files\n asc_fps = glob.glob(q)\n\n # Loop over all files\n for item in asc_fps:\n # Open ASC file\n data = np.loadtxt(item, delimiter=\";\")\n\n # Determine the size of the output array\n x_size = np.count_nonzero(data[:, 0] == data[0, 0])\n y_size = np.count_nonzero(data[:, 1] == data[0, 1])\n\n # Transform columns to grid\n arr = np.reshape(data[:, 2], (1, x_size, y_size), order=\"F\")\n arr = np.flip(arr, axis=1)\n\n # Determine pixel resolution\n arr_x = np.reshape(data[:, 0], (x_size, y_size), order=\"F\")\n pix_x = arr_x[0, 1] - arr_x[0, 0]\n arr_y = np.reshape(data[:, 1], (x_size, y_size), order=\"F\")\n pix_y = arr_y[1, 0] - arr_y[0, 0]\n\n # Determine top-left coordinates\n left = data[:, 0].min()\n top = data[:, 1].max() + pix_y # Adjust for pixel size\n\n # Set meta data for GeoTIF\n transform = from_origin(left, top, pix_x, pix_y)\n si_crs = {'init': 'EPSG:3794'} # D96/TM\n\n _, name = split(item[:-4])\n save_file = join(i_dir, name + '.tif')\n\n # Save array as with metadata as GeoTIFF\n new_dataset = rasterio.open(save_file, \"w\", driver=\"GTiff\",\n height=arr.shape[1], width=arr.shape[2],\n count=1, dtype=str(arr.dtype),\n crs=si_crs,\n transform=transform, compress=\"lzw\")\n new_dataset.write(arr)\n new_dataset.close()\n\n # Remove ASC file\n # remove(item)\n\n # Output message:\n out_msg = 'Successfully converted ASC files to GeoTIFF!'\n\n return out_msg", "def save2nifti(self, file_path):\n #Define nifti1 datatype codes\n NIFTI_TYPE_UINT8 = 2 # unsigned char\n NIFTI_TYPE_INT16 = 4 # signed short\n NIFTI_TYPE_INT32 = 8 # signed int.\n NIFTI_TYPE_FLOAT32 = 16 # 32 bit float.\n NIFTI_TYPE_COMPLEX64 = 32 # 64 bit complex = 2 32 bit floats\n NIFTI_TYPE_FLOAT64 = 64 # 64 bit float = double.\n NIFTI_TYPE_RGB24 = 128 # 3 8 bit bytes.\n NIFTI_TYPE_INT8 = 256 # signed char.\n NIFTI_TYPE_UINT16 = 512 # unsigned short.\n NIFTI_TYPE_UINT32 = 768 # unsigned int.\n NIFTI_TYPE_INT64 = 1024 #signed long long.\n NIFTI_TYPE_UINT64 = 1280 # unsigned long long.\n NIFTI_TYPE_FLOAT128 = 1536 # 128 bit float = long double.\n NIFTI_TYPE_COMPLEX128 = 1792 #128 bit complex = 2 64 bit floats.\n NIFTI_TYPE_COMPLEX256 = 2048 # 256 bit complex = 2 128 bit floats\n NIFTI_TYPE_RGBA32 = 2304 # 4 8 bit bytes.\n\n #Detect the data type of the input data.\n data_type = {\n np.uint8: NIFTI_TYPE_UINT8,\n np.uint16: NIFTI_TYPE_UINT16,\n np.uint32: NIFTI_TYPE_UINT32,\n np.float32: NIFTI_TYPE_FLOAT32,\n np.int16: NIFTI_TYPE_INT16,\n np.int32: NIFTI_TYPE_INT32,\n np.int8: NIFTI_TYPE_INT8\n }\n if sys.maxint > 2 ** 32: # The platform is 64 bit\n data_type[np.float128] = NIFTI_TYPE_FLOAT128\n data_type[np.float64] = NIFTI_TYPE_FLOAT64\n data_type[np.int64] = NIFTI_TYPE_INT64\n data_type[np.uint64] = NIFTI_TYPE_UINT64\n data_type[np.complex64] = NIFTI_TYPE_COMPLEX64\n data_type[np.complex128] = NIFTI_TYPE_COMPLEX128\n data_type[np.complex256] = NIFTI_TYPE_COMPLEX256\n\n data = np.rot90(self._data, 3)\n if data_type.has_key(data.dtype.type):\n self._header['datatype'] = data_type[data.dtype.type]\n self._header['cal_max'] = data.max()\n self._header['cal_min'] = 0\n image = nib.nifti1.Nifti1Image(data, None, self._header)\n nib.nifti1.save(image, file_path)", "def save2nifti(self, file_path):\n # Define nifti1 datatype codes\n NIFTI_TYPE_UINT8 = 2 # unsigned char\n NIFTI_TYPE_INT16 = 4 # signed short\n NIFTI_TYPE_INT32 = 8 # signed int.\n NIFTI_TYPE_FLOAT32 = 16 # 32 bit float.\n NIFTI_TYPE_COMPLEX64 = 32 # 64 bit complex = 2 32 bit floats\n NIFTI_TYPE_FLOAT64 = 64 # 64 bit float = double.\n NIFTI_TYPE_RGB24 = 128 # 3 8 bit bytes.\n NIFTI_TYPE_INT8 = 256 # signed char.\n NIFTI_TYPE_UINT16 = 512 # unsigned short.\n NIFTI_TYPE_UINT32 = 768 # unsigned int.\n NIFTI_TYPE_INT64 = 1024 # signed long long.\n NIFTI_TYPE_UINT64 = 1280 # unsigned long long.\n NIFTI_TYPE_FLOAT128 = 1536 # 128 bit float = long double.\n NIFTI_TYPE_COMPLEX128 = 1792 # 128 bit complex = 2 64 bit floats.\n NIFTI_TYPE_COMPLEX256 = 2048 # 256 bit complex = 2 128 bit floats\n NIFTI_TYPE_RGBA32 = 2304 # 4 8 bit bytes.\n\n # Detect the data type of the input data.\n data_type = {\n np.uint8: NIFTI_TYPE_UINT8,\n np.uint16: NIFTI_TYPE_UINT16,\n np.uint32: NIFTI_TYPE_UINT32,\n np.float32: NIFTI_TYPE_FLOAT32,\n np.int16: NIFTI_TYPE_INT16,\n np.int32: NIFTI_TYPE_INT32,\n np.int8: NIFTI_TYPE_INT8\n }\n if sys.maxint > 2 ** 32: # The platform is 64 bit\n data_type[np.float128] = NIFTI_TYPE_FLOAT128\n data_type[np.float64] = NIFTI_TYPE_FLOAT64\n data_type[np.int64] = NIFTI_TYPE_INT64\n data_type[np.uint64] = NIFTI_TYPE_UINT64\n data_type[np.complex64] = NIFTI_TYPE_COMPLEX64\n data_type[np.complex128] = NIFTI_TYPE_COMPLEX128\n data_type[np.complex256] = NIFTI_TYPE_COMPLEX256\n\n header = nib.Nifti1Header()\n if self.data.shape[1] == 1:\n new_shape = (self.data.shape[0], 1, 1)\n else:\n new_shape = (self.data.shape[0], 1, 1, self.data.shape[1])\n data = self.data.reshape(new_shape)\n\n if data.dtype.type in data_type:\n header['datatype'] = data_type[data.dtype.type]\n header['cal_max'] = data.max()\n header['cal_min'] = data.min()\n image = nib.Nifti1Image(data, None, header)\n nib.nifti1.save(image, file_path)", "def matrice_vers_pgm(M,fichier):\n fic_out = open(fichier,\"w\")\n\n # Entête du fichier pgm\n n = len(M) # Nb de lignes\n p = len(M[0]) # Nb de colonnes\n fic_out.write(\"P2\\n\") # Image en niveaux de gris\n fic_out.write(str(p) + \" \" + str(n) + \"\\n\") # Nb de colonnes et de lignes\n fic_out.write(\"255\\n\") # Niveaux de gris \n for ligne in M:\n for x in ligne:\n fic_out.write('{:4d}'.format(x))\n fic_out.write('\\n')\n\n fic_out.close()\n return", "def fits_to_png(fn_in, fn_out=None, vmin=None, vmax=None, scaling='arcsinh'):\n\n\t# setting fn_out\n\textension = '.png'\n\n\tif fn_out is None:\n\t\tbase_in, ext_in = os.path.splitext(fn_in)\n\n\t\tif ext_in == '.fits': \n\t\t\tfn_out = base_in+extension\n\t\telse: \n\t\t\tfn_out = fn_in+extension\n\n\tif not os.path.isfile(fn_in):\n\t\tprint(\"skipping \"+fn_in+\" as in file does not exist\")\n\telse:\n\t\t# read in\n\t\timg = fits.getdata(fn_in)\n\n\t\timg_scaled = scale_img(img, vmin=vmin, vmax=vmax, scaling=scaling)\n\n\t\tsi.imsave(fn_out, img_scaled)", "def read_pfm(filename):\n\n\twith open(filename, \"r\") as handle:\n\t\tmotif = motifs.read(handle, \"pfm\")\n\tmotif.pseudocounts = .25\n\tmotif.background = {'A':0.3,'C':0.2,'G':0.2,'T':0.3}\n\n\treturn motif", "def load_pfm(filename):\n filename = process(filename)\n with open(filename, \"r\", encoding=\"ISO-8859-1\") as file:\n nc = 3 if file.readline().rstrip() == \"PF\" else 1\n width, height = [int(x) for x in file.readline().rstrip().split()]\n shape = (height, width, nc)\n img = np.fromfile(file, '{0}{1}'.format(\"<\" if float(file.readline().rstrip()) < 0 else \">\",'f') )\n img = np.reshape(img, shape)\n return np.flip(np.flip(img, 2), 0).copy()", "def import_fits(self, file_path, pixscale = 7.77/43):\n hdulist = fits.open(file_path, memmap=True)\n data = hdulist[0].data\n \n shape = data.shape\n \n ## Create Image objects\n if len(shape) == 2:\n return cls(data,pixscale)\n elif len(shape) == 3:\n image_list = []\n \n ## Iterate over data cube and intianlize Image objects\n for i in range(data.shape[0]):\n single_image_data = data[i,:,:]\n image_list.append(cls(single_image_data,pixscale))\n return image_list\n else:\n print shape\n sys.exit(\"FITs Read Error: Must be 2-D or 3-D Image datacube\")\n \n def export_fits(self, mask=None, **kwargs):\n \"\"\"Export Image as a NumPy array to a FITS file\"\"\"\n \n ## Check key word arguments\n save_file = kwargs.pop('save_file', 'image.fits')\n fill_value = kwargs.pop('fill_value',0.)\n \n ## Check if mask provided matches data shape\n if self.is_valid_mask(mask):\n masked_data = np.ma.MasedArray()", "def write_file(self, i, path, fout):\n\n test_file = path + '/' + self.output[i]\n # Write file name\n print(test_file, file=fout, end='\\n\\n')\n\n extension = os.path.splitext(test_file)[1]\n if extension == '.fits' or extension == 'FITS':\n import subprocess\n prog = self.bindir + '/fits2ascii.py -i ' + test_file\n output = subprocess.check_output(prog.split(), shell=False)\n data = output.decode()\n else:\n fin = open(test_file, 'r')\n data = fin.read()\n fin.close()\n #fout.write(data)\n print(data, file=fout)\n print(file=fout, end='\\n')", "def write_to_fits(self, filename=None, gain=GAIN):\n\n hdu_new = self.hdu_ideal\n hdu_new[1].data = (self.data/gain).astype('uint16') # Convert to ADU in 16 bit integers.\n\n if filename is None:\n filename = self.ima_path[:-5] + self.modif_str + '.fits'\n hdu_new.writeto(filename, overwrite=True)\n\n print('Writing to file: ' + filename)", "def uncompress(filein, pathout=None, overwrite=True):\n filein = pathlib.Path(filein)\n if filein.suffix != '.fz':\n return\n if pathout is not None:\n pathout = filein.parent\n fileout = (pathout or filein.parent) / filein.stem\n if fileout.exists() and not overwrite:\n raise RuntimeError('Output exists and overwrite is False: {0}'.format(fileout))\n with fitsio.FITS(str(filein), mode='r') as IN:\n with fitsio.FITS(str(fileout), mode='rw', clobber=overwrite) as OUT:\n for hdu in IN:\n header = hdu.read_header()\n data = hdu.read()\n OUT.write(data, header=header, extname=hdu.get_extname())\n return str(fileout)", "def genPSFimage(filename=None):\n hdu=pf.open(filename)\n nn = len(hdu)\n for i in range(1,nn):\n img = hdu[i].data[0][4:].reshape(npix,npix)\n img = img/img.sum()\n hdu[i].data = img\n #hdu.scale('int16', '', bzero=32768)\n newfilename = filename[:-7]+'_stamp.fits'\n hdu.writeto(newfilename)\n os.system('gzip '+newfilename)", "def load_fermi_image():\n\n path = get_path('fermi_counts.fits.gz', location='local')\n hdu = fits.open(path)[1]\n\n return hdu", "def read_pgm(filename, byteorder='>'):\n with open(filename, 'rb') as f:\n buffer = f.read()\n try:\n header, width, height, maxval = re.search(\n b\"(^P5\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\", buffer).groups()\n except AttributeError:\n raise ValueError(\"Not a raw PGM file: '%s'\" % filename)\n return np.frombuffer(buffer,dtype='u1' if int(maxval) < 256 else byteorder+'u2',count=int(width)*int(height),offset=len(header)).reshape((int(height), int(width)))", "def make_fifi_file(self, tmpdir):\n ffile = self.make_file(tmpdir, 'test_fifi.fits')\n fits.setval(ffile, 'INSTRUME', value='FIFI-LS')\n return ffile", "def write_pfm(filename, img, scale=1):\n if img.dtype.name != 'float32':\n raise TypeError('Image dtype must be float32.')\n\n with open(filename, 'w') as file:\n file.write('PF\\n' if img.shape[2] == 3 else 'Pf\\n')\n file.write('{w} {h}\\n'.format(w=img.shape[1], h=img.shape[0]))\n\n endian = img.dtype.byteorder\n\n if endian == '<' or endian == '=' and sys.byteorder == 'little':\n scale = -scale\n\n file.write('%f\\n' % scale)\n img = np.flip(np.flip(img, 2), 0)\n img.tofile(file)", "def convert_tile(fname, out_fname, compression, filter_opts):\n with h5py.File(out_fname, 'w') as fid:\n with rasterio.open(fname) as ds:\n # global attributes\n attach_attributes(fid, ds.tags())\n\n # find and convert every subsdataset (sds)\n for sds_name in ds.subdatasets:\n with rasterio.open(sds_name) as sds:\n ds_name = Path(sds_name.replace(':', '/')).name\n\n # create empty or copy the user supplied filter options\n if not filter_opts:\n f_opts = dict()\n else:\n f_opts = filter_opts.copy()\n\n # use sds native chunks if none are provided\n if 'chunks' not in f_opts:\n f_opts['chunks'] = list(sds.block_shapes[0])\n\n # modify to have 3D chunks if we have a multiband sds\n if sds.count == 3:\n # something could go wrong if a user supplies\n # a 3D chunk eg (2, 256, 340)\n f_opts['chunks'].insert(0, 1)\n f_opts['chunks'] = tuple(f_opts['chunks'])\n else:\n f_opts['chunks'] = tuple(f_opts['chunks'])\n\n # subdataset attributes and spatial attributes\n attrs = sds.tags()\n attrs['geotransform'] = sds.transform.to_gdal()\n attrs['crs_wkt'] = sds.crs.wkt\n\n # ensure single band sds is read a 2D not 3D\n data = sds.read() if sds.count == 3 else sds.read(1)\n\n # write to disk as an IMAGE Class Dataset\n write_h5_image(data, ds_name, fid, attrs=attrs,\n compression=compression,\n filter_opts=f_opts)", "def read_pypeit_fits_new(self, filename, unit='f_lam', exten=1):\n\n # Open the fits file\n try:\n hdu = fits.open(filename)\n except:\n raise ValueError(\"Filename not found\", str(filename))\n\n self.header = hdu[0].header\n self.unit = unit\n\n # Check pypeit header keywords\n #\n # dispersion\n if 'OPT_WAVE' in hdu[exten].columns.names:\n self.dispersion = hdu[exten].data['OPT_WAVE']\n if 'wave' in hdu[exten].columns.names:\n self.dispersion = hdu[exten].data['wave']\n # flux density\n if 'OPT_FLAM' in hdu[exten].columns.names:\n self.flux = hdu[exten].data['OPT_FLAM']* 1e-17\n if 'flux' in hdu[exten].columns.names:\n self.flux = hdu[exten].data['flux']* 1e-17\n\n # mask\n if 'OPT_MASK' in hdu[exten].columns.names:\n self.mask = np.array(hdu[exten].data['OPT_MASK'], dtype=bool)\n if 'mask' in hdu[exten].columns.names:\n self.mask = np.array(hdu[exten].data['mask'], dtype=bool)\n\n # ivar\n if 'OPT_FLAM_IVAR' in hdu[exten].columns.names:\n self.flux_ivar = hdu[exten].data['OPT_FLAM_IVAR']\n if 'ivar' in hdu[exten].columns.names:\n self.flux_ivar = hdu[exten].data['ivar']\n if 'sigma' not in hdu[exten].columns.names:\n # No flux density 1 sigma error stored in this format\n # Calculate the 1 sigma error.\n self.get_fluxden_error_from_ivar()\n # 1 sigma flux density error\n if 'OPT_FLAM_SIG' in hdu[exten].columns.names:\n self.flux_err = hdu[exten].data['OPT_FLAM_SIG'] * 1e-17\n\n\n\n # Mask all pixels where the flux error is 0\n new_mask = np.ones_like(self.mask, dtype=bool)\n new_mask[self.flux_err == 0] = 0\n self.mask = new_mask\n\n # self.dispersion_unit = 1. * u.AA\n # self.fluxden_unit = 1e-17*u.erg/u.s/u.cm**2/u.AA\n\n if 'TELLURIC' in hdu[exten].columns.names:\n self.telluric = hdu[exten].data['TELLURIC']\n if 'OBJ_MODEL' in hdu[exten].columns.names:\n self.obj_model = hdu[exten].data['OBJ_MODEL']* 1e-17", "def read_pgm(filename, byteorder='>'):\n with open(filename, 'rb') as f:\n buffer = f.read()\n try:\n header, width, height, maxval = re.search(\n b\"(^P5\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\", buffer).groups()\n except AttributeError:\n raise ValueError(\"Not a raw PGM file: '%s'\" % filename)\n return np.frombuffer(buffer,\n dtype='u1' if int(maxval) < 256 else byteorder+'u2',\n count=int(width)*int(height),\n offset=len(header)\n ).reshape((int(height), int(width)))", "def read_pgm(filename, byteorder='>'):\n with open(filename, 'rb') as f:\n buffer = f.read()\n try:\n header, width, height, maxval = re.search(\n b\"(^P5\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\", buffer).groups()\n except AttributeError:\n raise ValueError(\"Not a raw PGM file: '%s'\" % filename)\n return numpy.frombuffer(buffer,\n dtype='u1' if int(maxval) < 256 else byteorder+'u2',\n count=int(width)*int(height),\n offset=58\n ).reshape((int(height), int(width)))", "def read_pgm(filename, byteorder='>'):\n with open(filename, 'rb') as f:\n buffer = f.read()\n try:\n header, width, height, maxval = re.search(\n b\"(^P5\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\", buffer).groups()\n except AttributeError:\n raise ValueError(\"Not a raw PGM file: '%s'\" % filename)\n return np.frombuffer(buffer,\n dtype='u1' if int(maxval) < 256 else byteorder+'u2',\n count=int(width)*int(height),\n offset=len(header)\n ).reshape((int(height), int(width)))", "def tofits(outfilename, pixelarray, hdr=None, verbose=True):\n # print \"LOGX:: Entering `tofits` method/function in %(__file__)s\" %\n # globals()\n pixelarrayshape = pixelarray.shape\n if verbose:\n print(\"FITS export shape : (%i, %i)\" % (pixelarrayshape[0], pixelarrayshape[1]))\n\n if pixelarray.dtype.name == \"bool\":\n pixelarray = np.cast[\"uint8\"](pixelarray)\n\n if os.path.isfile(outfilename):\n os.remove(outfilename)\n\n if hdr == None: # then a minimal header will be created\n hdu = pyfits.PrimaryHDU(pixelarray.transpose())\n else: # this if else is probably not needed but anyway ...\n hdu = pyfits.PrimaryHDU(pixelarray.transpose(), hdr)\n\n hdu.writeto(outfilename, output_verify='ignore')\n\n if verbose:\n print(\"Wrote %s\" % outfilename)" ]
[ "0.5638094", "0.5415192", "0.53508157", "0.5323675", "0.5319744", "0.52432823", "0.52354145", "0.52249837", "0.5175307", "0.517244", "0.5168601", "0.51595557", "0.5145556", "0.5114861", "0.5103376", "0.50699925", "0.5058637", "0.50480443", "0.50276166", "0.50008374", "0.49999297", "0.4965367", "0.49560982", "0.49470767", "0.49437162", "0.49428222", "0.49269065", "0.49265474", "0.49204412", "0.49099573" ]
0.65275276
0
Calculate the FWHM on a single bright star (either open or closed loops). image_file either FITS or PGM fwhm_init (def=2) pixels for FWHM initial guess
def calc_fwhm_on_bright_star(image_file, print=True, fwhm_init=2.0): img = load_image(image_file) # Calculate the bacgkround bkg = photutils.Background(img, img.shape, filter_shape=(1,1), method='median') threshold = bkg.background + (30.0 * bkg.background_rms) sigma = 2.0 * gaussian_fwhm_to_sigma # FWHM = 2. pixels kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3) kernel.normalize() segm = detect_sources(img, threshold, npixels=5, filter_kernel=kernel) props = source_properties(img, segm) tbl = properties_table(props) # Check for junk stars (cosmic rays) idx = np.where((tbl['semimajor_axis_sigma'] > 1) & (tbl['semiminor_axis_sigma'] > 1))[0] tbl = tbl[idx] tbl['image_name'] = image_file if print == True: reformat_source_table(tbl) print_source_table(tbl) return tbl
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_psf_fwhm_inpix_moffat(arr):\n\tmodel = fit_moffat(arr)\n\n\tfwhm = 2.* model.gamma * np.sqrt( 2.**(1./model.alpha) - 1. )\n\n\treturn fwhm", "def estimateFWHM(imgID, side='blue'):\r\n\r\n iraf.unlearn('imexam')\r\n iraf.rimexam.fittype = \"gaussian\"\r\n iraf.delete('trace.xy', verify=\"no\")\r\n iraf.delete('fwhm.log', verify=\"no\")\r\n # extract the position of the trace\r\n f = open('database/ap%s%04d' % (side, imgID), 'r')\r\n dat = f.read()\r\n xy = dat.split('\\n')[5].split()[1:3]\r\n f.close()\r\n f = open('trace.xy', 'w')\r\n f.write('%s %s\\n' % (xy[0], xy[1]))\r\n f.close()\r\n # run imexam\r\n if side == 'blue':\r\n defkey = 'j'\r\n else:\r\n defkey = 'k'\r\n iraf.imexam('%s%04d' % (side, imgID), '1', logfile='fwhm.log', keeplog=\"yes\", defkey=defkey, imagecur='trace.xy', use_display=\"no\", autoredraw=\"no\")\r\n # load values\r\n f = open('fwhm.log', 'r')\r\n dat = f.read()\r\n fwhm = float(dat.split('\\n')[1].split('=')[4].split()[0])\r\n f.close()\r\n # cleanup\r\n os.unlink(\"fwhm.log\")\r\n os.unlink(\"trace.xy\")\r\n\r\n # update the header\r\n f = pyfits.open('%s%04d.spec.fits' % (side, imgID))\r\n #f[0].header.update('FWHM', np.round(fwhm, 2), 'FWHM estimate of the trace [pix]')\r\n f[0].header['FWHM']=np.round(fwhm, 2) #, 'FWHM estimate of the trace [pix]')\r\n f.writeto('%s%04d.spec.fits' % (side, imgID), clobber=True)\r\n f.close()\r\n if os.access('%s%04d_flux.spec.fits' % (side, imgID), os.F_OK):\r\n f = pyfits.open('%s%04d_flux.spec.fits' % (side, imgID))\r\n #f[0].header.update('FWHM', np.round(fwhm, 2), 'FWHM estimate of the trace [pix]')\r\n f[0].header['FWHM']= np.round(fwhm, 2)\r\n f.writeto('%s%04d_flux.spec.fits' % (side, imgID), clobber=True)\r\n f.close()", "def measure_fwhm(image, plot=True, printout=True):\n\n # Find FWHM\n # ----------\n\n fitted_line = fit_gaussian2d(image)\n\n # Find fitted center\n x_mean, y_mean = [i.value for i in [fitted_line.x_mean, fitted_line.y_mean]]\n\n # Estimate FWHM using gaussian_sigma_to_fwhm\n x_fwhm = fitted_line.x_stddev * gaussian_sigma_to_fwhm\n y_fwhm = fitted_line.y_stddev * gaussian_sigma_to_fwhm\n\n # Find half max\n hm = fitted_line(x_mean, y_mean) / 2.\n\n # Find the mean of the x and y direction\n mean_fwhm = np.mean([x_fwhm, y_fwhm])\n mean_fwhm = int(np.round(mean_fwhm))\n\n # Print info about fit and FWHM\n # ------------------------------\n\n if printout:\n print(\"Image Max: {}\".format(image.max()))\n print(\"Amplitude: {}\".format(fitted_line.amplitude.value))\n print(\"Center: ({}, {})\".format(x_mean, y_mean))\n print(\"Sigma = ({}, {})\".format(fitted_line.x_stddev.value,\n fitted_line.y_stddev.value, ))\n print(\"Mean FWHM: {} Pix \".format(mean_fwhm))\n print(\"FWHM: (x={}, y={}) Pix \".format(x_fwhm, y_fwhm))\n\n if plot:\n\n fig, [ax0, ax1, ax2, ax3] = plot_fit(image, fitted_line)\n\n # Make x and y grid to plot to\n y_arange, x_arange = np.mgrid[:image.shape[0], :image.shape[1]]\n\n # Plot input image with FWHM and center\n # -------------------------------------\n\n ax0.imshow(image, cmap='gray_r')\n\n ax0.axvline(x_mean - x_fwhm / 2, c='c', linestyle=\"--\", label=\"X FWHM\")\n ax0.axvline(x_mean + x_fwhm / 2, c='c', linestyle=\"--\")\n\n ax0.axhline(y_mean - y_fwhm / 2, c='g', linestyle=\"--\", label=\"Y FWHM\")\n ax0.axhline(y_mean + y_fwhm / 2, c='g', linestyle=\"--\")\n\n ax0.set_title(\"Center and FWHM Plot\")\n ax0.legend()\n\n # Plot X fit\n # ----------\n\n ax2.axvline(x_mean, linestyle=\"-\", label=\"Center\")\n ax2.axvline(x_mean - x_fwhm / 2, c='c', linestyle=\"--\", label=\"X FWHM\")\n ax2.axvline(x_mean + x_fwhm / 2, c='c', linestyle=\"--\")\n ax2.axhline(hm, c=\"black\", linestyle=\"--\", label=\"Half Max\")\n\n ax2.legend()\n\n # Plot Y fit\n # ----------\n\n ax3.axvline(y_mean, linestyle=\"-\", label=\"Center\")\n ax3.axvline(y_mean - y_fwhm / 2, c='g', linestyle=\"--\", label=\"Y FWHM\")\n ax3.axvline(y_mean + y_fwhm / 2, c='g', linestyle=\"--\")\n ax3.axhline(hm, c=\"black\", linestyle=\"--\", label=\"Half Max\")\n\n ax3.legend()\n\n plt.show()\n\n return np.array([x_fwhm, y_fwhm])", "def g2dfwhm(img):\n npix = img.shape[0]\n rowCen,colCen = adaptiveCentroid(img,1.1/scale)\n row,col = np.mgrid[0:npix,0:npix]\n row = row - rowCen\n col = col - colCen\n A0,sigmac0 = moments(img)\n sigmar0 = sigmac0\n rho0 = 0.\n B0 = 0.\n p0=np.array([sigmac0,sigmar0,rho0,A0, B0])\n def residualg2d(p,x,y,xc,yc,I):\n sigmax,sigmay,rho,A,B = p\n Ierr = np.sqrt(abs(I))+0.00001 # to avoid those = 0, add a small number \n res = (gaussian2d(x,y,xc,yc,sigmax,sigmay,rho,A,B) - I)/Ierr\n return res.flatten()\n p = leastsq(residualg2d,p0,args=(col,row,colCen,rowCen,img))[0]\n sigmac,sigmar,rho,A,B = p\n Mcc = sigmac**2\n Mrr = sigmar**2\n Mrc = rho**2*Mcc*Mrr\n M20 = Mrr + Mcc\n M22 = complex(Mcc - Mrr,2*Mrc)\n whiskerLength = np.sqrt(np.abs(M22))\n lambdap = 0.5*(M20 + abs(M22))\n lambdam = 0.5*(M20 - abs(M22))\n fwhm_g2d = np.sqrt(2.*np.log(2.))*(np.sqrt(lambdap)+np.sqrt(lambdam))\n #fwhm = np.sqrt(M20/2.)*2.35482*scale\n return A, B, whiskerLength, fwhm_g2d", "def calc_psf_fwhm_inpix_gaussian(arr):\n\tmodel = fit_gaussian(arr)\n\n\tsigma = max(model.y_stddev, model.x_stddev)\n\tfwhm = 2.355 * sigma\n\n\treturn fwhm", "def calc_psf_fwhm(arr, mode='moffat'):\n\n\tif mode == 'moffat':\n\t\treturn calc_psf_fwhm_inpix_moffat(arr)\n\telif mode == 'gaussian':\n\t\treturn calc_psf_fwhm_inpix_gaussian(arr)\n\telse:\n\t\traise ValueError(\"mode not recognized\")", "def fwhm(self):\n if not self.has_fwhm():\n if self.has_sepobjects():\n fwhm_pxl = self.sepobjects.get_fwhm_pxl(isolated_only=True,\n stars_only=True)\n self.set_fwhm(fwhm_pxl/self.units_to_pixels(\"arcsec\").value*\\\n units.arcsec)\n else:\n raise AttributeError(\"'fwhm' is not defined and no sepobjects loaded.\")\n return self._derived_properties[\"fwhm\"]", "def fwhm(x, y, data, criteria='last'):\n # native calculation is a radius, \"HWHM\", *2 is FWHM\n return estimate_size(x=x, y=y, data=data, metric='fwhm', criteria=criteria) * 2", "def fwhm(self, criteria='last'):\n return fwhm(self.x, self.y, self.data, criteria=criteria)", "def fwhm(self):\n model_dict = dict(zip(self.model().param_names, self.model().parameters))\n if self.model_type == self._MOFFAT2D:\n gamma, alpha = [model_dict[ii] for ii in (\"gamma_0\", \"alpha_0\")]\n FWHM = 2.0 * gamma * np.sqrt(2 ** (1 / alpha) - 1)\n FWHM_x, FWHM_y = None, None\n elif self.model_type == self._GAUSSIAN2D:\n sigma_x, sigma_y = [model_dict[ii] for ii in (\"x_stddev_0\", \"y_stddev_0\")]\n FWHM = 2.3548 * np.mean([sigma_x, sigma_y])\n FWHM_x, FWHM_y = 2.3548 * sigma_x, 2.3548 * sigma_y\n return FWHM, FWHM_x, FWHM_y", "def wfwhm(img,sigma):\n nrow,ncol=img.shape\n Isum = img.sum()\n Icol = img.sum(axis=0) # sum over all rows\n Irow = img.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol))\n maxItr = 50\n EP = 0.0001\n for i in range(maxItr):\n wrmat = wr(ROW,COL,rowmean,colmean,sigma)\n IWmat = img*wrmat\n IWcol = IWmat.sum(axis=0)\n IWrow = IWmat.sum(axis=1)\n IWsum = IWmat.sum()\n drowmean = np.sum((rowgrid-rowmean)*IWrow)/IWsum\n dcolmean = np.sum((colgrid-colmean)*IWcol)/IWsum\n rowmean = rowmean+2.*drowmean\n colmean = colmean+2.*dcolmean\n if drowmean**2+dcolmean**2 <= EP:\n break\n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mrr = np.sum(rowgrid**2*IWrow)/IWsum\n Mcc = np.sum(colgrid**2*IWcol)/IWsum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*IWmat)/IWsum\n Cm = np.matrix([[Mcc,Mrc],[Mrc,Mrr]])\n Cw = np.matrix([[sigma**2,0.],[0.,sigma**2]])\n Cimg = (Cm.I - Cw.I).I\n Mcc = Cimg[0,0]\n Mrr = Cimg[1,1]\n Mrc = Cimg[0,1]\n M20 = Mrr + Mcc\n M22 = complex(Mcc - Mrr,2*Mrc)\n e1 = M22.real/M20.real\n e2 = M22.imag/M20.real\n whiskerLength = np.sqrt(np.abs(M22))\n lambdap = 0.5*(M20 + abs(M22))\n lambdam = 0.5*(M20 - abs(M22))\n fwhmw = np.sqrt(2.*np.log(2.))*(np.sqrt(lambdap)+np.sqrt(lambdam))\n return e1,e2,whiskerLength,fwhmw", "def fwhm(self):\n return self._get_mean_and_samples_attribute('fwhm')", "def test_fwhm(self):\n m = self.sp.model\n bp = SpectralElement(\n Gaussian1D, mean=m.mean, amplitude=m.amplitude, stddev=m.stddev)\n assert_quantity_allclose(bp.fwhm(), 100 * u.AA, rtol=1e-3) # 0.1%", "def fwhm(self) -> float:\n return 2 * np.sqrt(2 * np.log(2)) * self.width", "def moffat_convolution(im_array,n_fwhm,beta,fwhm) :\n\n r_s = fwhm/(2. *math.sqrt(2.**(1./beta)-1.))\n\t\n im_kernel_array = gauss_kernel(n_fwhm,beta,r_s)\n conv_image = signal.convolve(im_array,im_kernel_array,mode = 'same')\n\n return (conv_image)", "def moffat_convolution_fft(im_array,n_fwhm,beta,fwhm) :\n\n r_s = fwhm/(2. *math.sqrt(2.**(1./beta)-1.))\n\n im_kernel_array = moffat_kernel(n_fwhm,beta,r_s)\n fftconv_image = signal.fftconvolve(im_array,im_kernel_array,mode = 'same')\n\n return (fftconv_image)", "def psfphot(image, clobber=globclob, verbose=globver, pixtol=3.0,\n maxnpsf=5, interact=yes):\n\n # Defaults / constants\n psfmult=5.0 #standard factor (multiplied by fwhm to get psfradius)\n psfmultsmall=3.0 #similar to psfmult, adjusted for nstar and substar\n\n # Necessary package\n iraf.imutil()\n\n # Detect stars\n iqpkg.iqobjs(image, 3.0, 50000.0, wtimage=\"\", skyval=\"!MEDSKY\")\n\n root = image[:-5]\n [gain, rnoise, fwhm] = get_head(image, [\"GAIN\", \"READNOI\", \"SEEPIX\"])\n fwhm = float(fwhm); rnoise = float(rnoise)\n\n iraf.iterstat(image)\n \n # Saturation level\n if not check_head(image, \"SATURATE\"):\n saturate = 60000.0\n else:\n saturate = get_head(image, \"SATURATE\")\n \t \n # Update datapars and daopars\n iraf.datapars.fwhmpsf=fwhm\n iraf.datapars.sigma=iraf.iterstat.sigma\n iraf.datapars.datamin=iraf.iterstat.median-10*iraf.iterstat.sigma\n iraf.datapars.datamax=70000.0\n iraf.datapars.readnoise=rnoise\n iraf.datapars.epadu=gain \n iraf.daopars.psfrad=psfmult*fwhm\n iraf.daopars.fitrad=fwhm\n iraf.daopars.function=\"gauss,moffat15,moffat25,lorentz,penny1\"\n\n # coo file\n stars = Starlist(\"%s.stars\" % image)\n outf = open(\"%s.coo.1\" % image[:-5], \"w\")\n for star in stars:\n outf.write(\"%10.3f%10.3f\\n\" % (star.xval, star.yval))\n outf.close()\n\n #initial photometry\n iraf.daophot.phot(root,'default','default',aperture=fwhm,verify=no,\n verbose=verbose)\n\n iraf.datapars.datamax=30000.0\n iraf.pstselect(root,'default','default',maxnpsf,interactive=yes,\n verify=no,verbose=verbose)\n\n iraf.psf(root,'default','default','default','default','default',\n interactive=interact,verify=no,verbose=verbose)\n\n iraf.allstar(root,'default','default','default','default','default',\n verify=no,verbose=verbose)\n\n iraf.iterstat(\"%s.sub.fits\" % root)\n\n iraf.datapars.sigma=iraf.iterstat.sigma\n iraf.datapars.datamin=iraf.iterstat.median-10*iraf.iterstat.sigma\n\n iraf.datapars.datamax=70000.0\n iraf.daophot.phot(\"%s.sub.fits\" % root, \"SN.coo\", 'default', 'default',\n aperture=fwhm, verify=no, verbose=verbose)\n\n iraf.datapars.datamax=30000.0\n iraf.daopars.fitrad=fwhm*2.0\n iraf.allstar(\"%s.sub.fits\" % root, 'default', \"%s.psf.1.fits\" % root, \n 'default', 'default', 'default', verify=no, verbose=no)", "def getFWHM(antenna, freq):\n diameter = getDiameter(antenna)\n lam = 299792458.0 / (freq * 1e9)\n fwhmo = lam / math.pi * 180.0 * 60.0\n fwhm = 1.22 * fwhmo / diameter\n return fwhm", "def hpf(im, goal, window, j=0):\r\n\r\n # Fourier Transform\r\n F_im = dip.fft2(im)\r\n h, w = im.shape\r\n preset = False\r\n\r\n # Was there scope provided\r\n if j != 0:\r\n scope = np.array([j])\r\n preset = True\r\n else:\r\n scope = range(0, h)\r\n\r\n # Searching for the appropriate cutoff frequency\r\n for i in scope:\r\n freq_square = i\r\n\r\n # Error Check\r\n q = int(freq_square / 2)\r\n if q > w: # Error code\r\n print(\"Error! The filter width is larger than the transform!\")\r\n\r\n # Take a 1/4 square from each quadrant\r\n F_im[0:q, 0:q] = 0 # top left\r\n F_im[0:q, w - q:w] = 0 # top right\r\n F_im[h - q:h, 0:q] = 0 # bottom left\r\n F_im[h - q:h, w - q:w] = 0 # bottom right\r\n\r\n # Take real part only\r\n im_new = np.abs(dip.ifft2(F_im))\r\n\r\n # Loop if target frequency isn't provided\r\n if preset == False:\r\n if (np.mean(im_new) - goal) < window:\r\n return im_new, i\r\n else:\r\n return im_new, i", "def simulate_star(fwhm, mag, integ, bgd=None, roff=0, coff=0):\n\n img_size = 8\n img_size2 = img_size * img_size\n\n if not isinstance(bgd, (int, float)):\n raise TypeError(\"simulate_star:: bgd expected to be (int, float)\")\n\n star = np.zeros((img_size, img_size))\n\n # Mag to counts conversion\n gain = 5. # e-/ADU\n counts = integ * transform.mag_to_count_rate(mag) / gain\n\n # Gaussian model\n halfsize = np.int(img_size / 2)\n row, col = np.mgrid[-halfsize:halfsize, -halfsize:halfsize] + 0.5\n sigma = fwhm / (2. * np.sqrt(2. * np.log(2.)))\n g = np.exp(-((row - roff)**2 / sigma**2 + (col - coff)**2 / sigma**2) / 2.)\n\n # Zero 6x6 corners\n g = cntr.zero_6x6_corners(g, centered=True)\n\n # Normalize to counts\n i1 = np.int(halfsize + 0.5 - 3)\n i2 = np.int(halfsize + 0.5 + 3)\n g = counts * g / g[i1:i2][i1:i2].sum()\n\n # Simulate star\n star = np.random.normal(g)\n\n # Add background\n if np.shape(bgd) == ():\n bgd = np.ones((img_size, img_size)) * bgd\n\n star = star + bgd\n\n return np.rint(star)", "def joint_bilateral(filename,flash_image,noflash_image,sigma_spatial,sigma_intensity):\n\t# make a simple Gaussian function taking the squared radius\n\tgaussian = lambda r2, sigma: np.exp(-0.5*r2/sigma**2)\n\tflash_image = cv2.cvtColor(flash_image,cv2.COLOR_BGR2RGB)\n\tnoflash_image = cv2.cvtColor(noflash_image,cv2.COLOR_BGR2RGB)\n\n\t# define the window width to be the 2 time the spatial std. dev. to\n\t# be sure that most of the spatial kernel is actually captured\n\twin_width = int(3*sigma_spatial +1)\n\twgt_sum = np.zeros_like(flash_image).astype(np.float64)\n\tresult = np.zeros_like(flash_image).astype(np.float64)\n\tout= np.zeros_like(flash_image).astype(np.float64)\n\t\n\t\n\tfor i in tqdm(range(flash_image.shape[-1]),desc=\"Going through color channels\"):\n\t\tnorm_flash_image = normalize(flash_image[:,:,i])\n\t\tnorm_noflash_image = normalize(noflash_image[:,:,i])\n\t\tfor shft_x in range(-win_width,win_width+1):\n\t\t\tfor shft_y in range(-win_width,win_width+1):\n\t\t\t\t# compute the spatial contribution\n\t\t\t\tspatial = gaussian(shft_x**2+shft_y**2, sigma_spatial )\n\t\n\t\t\t\t# shift by the offsets to get image window\n\t\t\t\twindow = np.roll(norm_flash_image, [shft_y, shft_x], axis=[0,1])\n\t\t\t\twindow1 = np.roll(norm_noflash_image, [shft_y, shft_x], axis=[0,1])\n\t\t\t\t# compute the intensity contribution\n\t\t\t\tcombined_filter = spatial*gaussian((window-norm_flash_image)**2, sigma_intensity )\n\t\n\t\t\t\t# result stores the mult. between combined filter and image window\n\t\t\t\tresult[:,:,i] += window1*combined_filter\n\t\t\t\twgt_sum[:,:,i] += combined_filter\n\tout = normalize(result/wgt_sum)\n\t# normalize the result and return\n\tplt.imsave(\"outputImages/JointBilateral_\"+filename+\"_\"+str(sigma_spatial)+\"_\"+ str(sigma_intensity) + \".png\" ,out,dpi=600)\n\treturn out", "def fwhmmon(fwhm):\n seeing = fwhm.read(binary=True)*0.109\n if APF.seeinglist == []:\n APF.seeinglist = [seeing]*15\n else:\n APF.seeinglist.append(seeing)\n APF.seeinglist = APF.seeinglist[-15:]\n APF.seeing = np.median(np.array(APF.seeinglist,dtype=float))", "def addseeingImgFFTmoffat(img = None,fwhm=None):\n beta = 3.5\n alpha = fwhm/scale/(2.*np.sqrt(2**(1/beta)-1))\n kern = moffat_seeing(npix,alpha=alpha,beta=beta)\n img = img.astype('f') # required for the fftconvolve\n covimg = convolveH(img,kern)\n covimg = covimg/covimg.sum()\n return covimg", "def addStar(image, center, flux, fwhm):\n sigma = fwhm/FwhmPerSigma\n func = afwMath.GaussianFunction2D(sigma, sigma, 0)\n starImage = afwImage.ImageF(image.getBBox(afwImage.PARENT))\n # The flux in the region of the image will not be exactly the desired flux because the Gaussian\n # does not extend to infinity, so keep track of the actual flux and correct for it\n actFlux = 0\n # No function exists that has a fractional x and y offset, so set the image the slow way\n for i in range(image.getWidth()):\n x = center[0] - i\n for j in range(image.getHeight()):\n y = center[1] - j\n pixVal = flux * func(x, y)\n actFlux += pixVal\n starImage[i, j] += pixVal\n starImage *= flux / actFlux\n \n image += starImage", "def shear_est(self, gal_image, psf_image, noise=None, F=False):\n # gal_ps = self.pow_spec(gal_image)\n gal_ps = gal_image\n # gal_ps = hk_tool_box.smooth(gal_ps,self.size)\n if noise is not None:\n nbg = self.pow_spec(noise)\n self.flux2 = numpy.sqrt(gal_ps[int(self.size/2), int(self.size/2)]/numpy.sum(self.rim*gal_ps)*numpy.sum(self.rim))\n # nbg = hk_tool_box.smooth(nbg,self.size)\n # rim = self.border(2, size)\n # n = numpy.sum(rim)\n # gal_pn = numpy.sum(gal_ps*rim)/n # the Possion noise of galaxy image\n # nbg_pn = numpy.sum(nbg*rim)/n # the Possion noise of background noise image\n gal_ps = gal_ps - nbg# + nbg_pn - gal_pn\n\n if F:\n psf_ps = psf_image\n else:\n psf_ps = self.pow_spec(psf_image)\n # self.get_radius_new(psf_ps, 2)\n wb, beta = self.wbeta(self.hlr)\n maxi = numpy.max(psf_ps)\n idx = psf_ps < maxi / 100000.\n wb[idx] = 0\n psf_ps[idx] = 1.\n tk = wb/psf_ps * gal_ps\n\n # ky, kx = self.ky, self.kx\n # #\n # kx2 = kx*kx\n # ky2 = ky*ky\n # kxy = kx*ky\n # k2 = kx2 + ky2\n # k4 = k2*k2\n # mn1 = (-0.5)*(kx2 - ky2) # (-0.5)*(kx**2 - ky**2)\n # mn2 = -kxy # -kx*ky\n # mn3 = k2 - 0.5*beta**2*k4 # kx**2 + ky**2 - 0.5*beta**2*(kx**2 + ky**2)**2\n # mn4 = k4 - 8*kx2*ky2 # kx**4 - 6*kx**2*ky**2 + ky**4\n # mn5 = kxy*(kx2 - ky2) # kx**3*ky - kx*ky**3\n\n # mn1 = self.mn1\n # mn2 = self.mn2\n mn3 = self.k2 - 0.5*beta**2*self.k4\n # mn4 = self.mn4\n # mn5 = self.mn5\n\n mg1 = numpy.sum(self.mn1 * tk)*self.alpha\n mg2 = numpy.sum(self.mn2 * tk)*self.alpha\n mn = numpy.sum(mn3 * tk)*self.alpha\n mu = numpy.sum(self.mn4 * tk)*(-0.5*beta**2)*self.alpha\n mv = numpy.sum(self.mn5 * tk)*(-2.*beta**2)*self.alpha\n\n return mg1, mg2, mn, mu, mv", "def _get_photobleach(imgflt_stack,flatfield,darkfield=None):\n # Initialize matrices\n imgflt_stack = np.reshape(imgflt_stack,(OPTIONS['size']*OPTIONS['size'],-1)).astype(np.float64)\n if darkfield is None:\n darkfield = np.zeros(flatfield.shape,dtype=np.float64)\n\n # Initialize weights and tolerances\n weights = np.ones(imgflt_stack.shape,dtype=np.float64)\n epsilon = np.float64(0.1)\n tol = np.float64(10**-6)\n\n # Run optimization exactly 5 times\n for r in range(5):\n # Calculate weights, offsets and coefficients\n W_idct_hat = np.reshape(flatfield,(-1,1))\n A_offset = np.reshape(darkfield,(-1,1))\n A_coeff = np.reshape(np.mean(imgflt_stack,0),(1,-1))\n\n # Initialization values and learning rates\n temp = np.linalg.svd(imgflt_stack,full_matrices=False,compute_uv=False)\n norm_two = np.float64(temp[0])\n mu = np.float64(12.5)/norm_two\n mu_bar = mu * 10**7\n rho = np.float64(1.5)\n ent1 = 1\n\n # Normalization factors\n d_norm = np.linalg.norm(imgflt_stack,'fro')\n\n # Initialize augmented representation and error\n A = np.zeros(imgflt_stack.shape,dtype=np.float64)\n E1 = np.zeros(imgflt_stack.shape,dtype=np.float64)\n Y1 = np.float64(0)\n\n # Run optimization\n iternum = 0\n converged = False\n while not converged:\n iternum += 1\n\n # Calculate augmented representation\n A = np.matmul(W_idct_hat,A_coeff) + A_offset\n\n # Calculate errors\n E1 = E1 + np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n E1 = np.max(np.reshape(E1 - weights/(ent1*mu),(imgflt_stack.shape[0],imgflt_stack.shape[1],1)),-1,initial=0) + np.min(np.reshape(E1 + weights/(ent1*mu),(imgflt_stack.shape[0],imgflt_stack.shape[1],1)),-1,initial=0)\n\n # Calculate coefficients\n R1 = imgflt_stack-E1\n A_coeff = np.reshape(np.mean(R1,0),(1, -1)) - np.mean(A_offset)\n A_coeff[A_coeff<0] = 0 # pixel values are never negative\n\n # Loss\n Z1 = imgflt_stack - A - E1\n\n # Error updates\n Y1 = Y1 + mu*Z1\n\n # Update learning rate\n mu = np.min(mu*rho,initial=mu_bar)\n\n # Stop if below threshold\n stopCriterion = np.linalg.norm(Z1,'fro')/d_norm\n if stopCriterion < tol:\n converged = True\n\n # Update weights\n XE_norm = np.reshape(np.mean(A,0),(1,-1)) / E1\n weights = 1/np.abs(XE_norm + epsilon)\n weights = weights * weights.size/np.sum(weights)\n\n return A_coeff", "def simbad_brightstars(image_file=\"../nro_maps/12CO_20161002_FOREST-BEARS_spheroidal_xyb_grid7.5_0.099kms.fits\",\n brighter_than='G0', extra_criteria=\"(ra < 84.4 | dec < -6.66)\", otypes=\"Star\",\n replace_ra='hourangle', replace_dec='deg', add_sptype_letter_column=True,\n output=None, output_format='fits'):\n try:\n wcs = WCS(image_file).celestial #Drop non-celestial axes (like velocity and stokes). \n except:\n raise(\"image_file must be a fits image or cube with wcs in header.\")\n\n footprint = wcs.calc_footprint()\n\n \n ### ra_min/max, dec_min/max need to be in degrees.\n ### In the fits headers I have they are, but this may not always be true.\n ###\n ra_min, ra_max = footprint[:,0].min(), footprint[:,0].max()\n dec_min, dec_max = footprint[:,1].min(), footprint[:,1].max()\n\n s = Simbad()\n s.add_votable_fields('sptype')\n\n if extra_criteria:\n stars = s.query_criteria(\"ra > {} & ra < {} & dec > {} & dec < {} & sptypes < {} & {}\".format(\n ra_min, ra_max, dec_min, dec_max, brighter_than, extra_criteria), otypes=\"Star\")\n else:\n stars = s.query_criteria(\"ra > {} & ra < {} & dec > {} & dec < {} & sptypes < {}\".format(\n ra_min, ra_max, dec_min, dec_max, brighter_than), otypes=\"Star\")\n\n stars_coord = coord.SkyCoord(stars['RA'], stars['DEC'], unit=(u.hourangle, u.deg))\n\n if replace_ra:\n stars.replace_column('RA', Column(stars_coord.ra, name='RA', unit=replace_ra))\n if replace_dec:\n stars.replace_column('DEC', Column(stars_coord.dec, name='DEC', unit=replace_dec))\n\n if add_sptype_letter_column:\n stars.add_column(Column([sptype[0] for sptype in stars['SP_TYPE'].astype('str')], name='SP_LETTER', unit='str'))\n\n if output:\n stars.write(output, format=output_format)##\n else:\n return stars", "def struct_from_moffat_fwhm(wcs, fwhm, psf_threshold=0.5, beta=2.5):\n # image size will be twice the full-width, to account for\n # psf_threshold < 0.5\n size = int(round(fwhm / wcs.get_step(u.arcsec)[0])) * 2 + 1\n\n psf = moffat_image(fwhm=(fwhm, fwhm), n=beta, peak=True,\n wcs=wcs[:size, :size])\n\n # remove useless zeros on the edges.\n psf.mask_selection(psf._data < psf_threshold)\n psf.crop()\n assert tuple(np.array(psf.shape) % 2) == (1, 1)\n return ~psf.mask", "def FWHM(self):\n # The width of the Lorentz profile\n fl = 2.0 * self[\"al\"]\n # Width of the Gaussian [2.35 = 2*sigma*sqrt(2*ln(2))]\n fd = 2.35482 * self['ad']\n return 0.5346 * fl + numpy.sqrt(0.2166 * (fl**2.) + fd**2.)", "def test_fwhm(self):\n for i, func in enumerate(self.fwhm_funcs):\n for j, arr1d in enumerate(self.input_arrays):\n res = func(arr1d)\n assert_allclose(res.fwhm, self.answers[i][j], atol=1e-4)" ]
[ "0.6711939", "0.65907663", "0.6529644", "0.6306172", "0.6146442", "0.6140123", "0.609935", "0.5993391", "0.5977707", "0.5863462", "0.58407354", "0.5749594", "0.572178", "0.56493753", "0.563274", "0.5542923", "0.5449296", "0.5418304", "0.5362673", "0.53052205", "0.5297855", "0.52683985", "0.5266806", "0.5235398", "0.517932", "0.51215136", "0.51116323", "0.5101479", "0.5053768", "0.5053358" ]
0.7942247
0
Computes box scale as function of k
def box_scale(k, m, s_min=0.1, s_max=0.9): # equation 4 from paper return s_min + (s_max - s_min) * (k - 1) / (m - 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scaling_factor_box( h ):\n h = np.array( h, copy=False).ravel()\n n = len(h)\n s = ( np.pi**(n/2.0) ) / sp.special.gamma( n/2.0 + 1 )\n s = 1.0/s\n s /= np.product(h)\n return s", "def scale(self, k_x, k_y = None):\r\n if (k_y is None):\r\n return vec2(k_x*self.x, k_x*self.y)\r\n else:\r\n return vec2(k_x*self.x, k_y*self.y)", "def compute_scale(self, box, plane):\n center, normal = plane\n vertex_dots = [np.dot(vertex, normal) for vertex in box[1:]]\n vertex_dots = np.sort(vertex_dots)\n center_dot = np.dot(center, normal)\n scales = center_dot / vertex_dots[:4]\n return np.mean(scales)", "def scale(self, k_x, k_y = None, k_z = None):\r\n if (k_y is None):\r\n return vec3(k_x*self.x, k_x*self.y, k_x*self.z)\r\n else:\r\n return vec3(k_x*self.x, k_y*self.y, k_z*self.z)", "def scale_matrix(matrix, k):\n new_matrix = []\n for v in range(NUMBER_VECTORS):\n new_matrix.append(matrix[v].scale(k))\n return new_matrix", "def __scale_bboxes(self, bboxes, scale_x, scale_y):\n with tf.variable_scope('scale_bboxes'):\n return tf.multiply(bboxes, tf.tile([[scale_y, scale_x, scale_y,\n scale_x]],\n [tf.shape(bboxes)[0], 1]))", "def get_scale():\r\n\r\n \r\n return 0.5", "def scale(uv_coord, K, bbox, new_size):\r\n\txmin, xmax, ymin, ymax = bbox\r\n\r\n\tuv_coord[:, 0] = (uv_coord[:, 0] - xmin) / (xmax - xmin + 1.) * new_size[1]\r\n\tuv_coord[:, 1] = (uv_coord[:, 1] - ymin) / (ymax - ymin + 1.) * new_size[0]\r\n\r\n\txscale = new_size[1] / (xmax - xmin + 1.)\r\n\tyscale = new_size[0] / (ymax - ymin + 1.)\r\n\r\n\tshift = [[1, 0, -xmin],\r\n\t\t\t [0, 1, -ymin],\r\n\t\t\t [0, 0, 1]]\r\n\r\n\tscale = [[xscale, 0, 0],\r\n\t\t\t [0, yscale, 0],\r\n\t\t\t [0, 0, 1]]\r\n\r\n\tshift = np.array(shift)\r\n\tscale = np.array(scale)\r\n\r\n\tK = np.matmul(scale, np.matmul(shift, K))\r\n\r\n\treturn uv_coord, K", "def scale_root(self) -> int:\r\n ...", "def scale_it(val):\n return scale(val, 0, 1, bpm_range[0], bpm_range[1])", "def scale_box(box, img_size):\n xscale = img_size[0] / FLAGS.size\n yscale = img_size[1] / FLAGS.size\n x0, y0, x1, y1 = box\n return [\n float(x0) * xscale,\n float(y0) * yscale,\n float(x1) * xscale,\n float(y1) * yscale,\n ]", "def scale_boxes(boxes, image_shape):\n height = image_shape[0]\n width = image_shape[1]\n image_dims = K.stack([height, width, height, width])\n image_dims = K.reshape(image_dims, [1, 4])\n boxes = boxes * image_dims\n return boxes", "def scale(self):\n return self.distribution.scale", "def scale(self):", "def scale(self):\n return self._gev_bijector.scale", "def _call_scale(vecObj, sc):\n res = vecObj.scale(sc)\n return res", "def normalize(x):\n return (x + 1e-10) / (K.sqrt(K.mean(K.square(x))) + 1e-10)", "def _ig_ksz(self, x, b):\n return self.P(x*self.r500) * (x / np.sqrt(x**2. - b**2.)) / self.Tloken(x)", "def zoom_k(img,k=1):\n\n try:\n\n m,n = img.shape\n\n tmp1 = np.zeros((m,n*k-k+1),dtype=float)\n\n for i in range(n-1):\n tmp1[:,k*i:k*i+k+1] = np.linspace(img[:,i],img[:,i+1],k+1,axis=1)\n\n tmp2 = np.zeros((m*k-k+1,n*k-k+1),dtype=float)\n\n for j in range(m-1):\n tmp2[k*j:k*j+k+1,:] = np.linspace(tmp1[j,:],tmp1[j+1,:],k+1,axis=0)\n\n except:\n\n m,n,_ = img.shape\n\n tmp1 = np.zeros((m,n*k-k+1,3),dtype=float)\n\n for i in range(n-1):\n tmp1[:,k*i:k*i+k+1,:] = np.linspace(img[:,i,:],img[:,i+1,:],k+1,axis=1)\n\n tmp2 = np.zeros((m*k-k+1,n*k-k+1,3),dtype=float)\n\n for j in range(m-1):\n tmp2[k*j:k*j+k+1,:,:] = np.linspace(tmp1[j,:,:],tmp1[j+1,:,:],k+1,axis=0)\n\n\n return tmp2", "def scale(inp, ab):\n\n return inp * ab[0] + ab[1]", "def _scale(x):\n scaleFactor = 1\n _ret = int(x/scaleFactor)\n return _ret", "def scale(self, factor):\n return BSplineFunc(self.kvs, self.coeffs * factor)", "def calculate_scaling_factors(blk):\n\n def cs(blk2):\n \"\"\"Recursive function for to do subblocks first\"\"\"\n for b in blk2.component_data_objects(pyo.Block, descend_into=False):\n cs(b)\n if hasattr(blk2, \"calculate_scaling_factors\"):\n blk2.calculate_scaling_factors()\n\n # Call recursive function to run calculate_scaling_factors on blocks from\n # the bottom up.\n cs(blk)\n # If a scale factor is set for an indexed component, propagate it to the\n # component data if a scale factor hasn't already been explicitly set\n propagate_indexed_component_scaling_factors(blk)\n # Use the variable scaling factors to scale the arc constraints.\n scale_arc_constraints(blk)", "def scaling(self):\n return self.stacked._box_scaling[1]", "def scale(curve):\n return curve/rmsd(curve)", "def convert_scaling_to_form_factors(qz, scale):\n apply_absorption_correction(qz, scale)\n apply_Lorentz_correction(qz, scale)\n for i in xrange(len(scale)):\n scale[i] = np.sign(scale[i]) * math.sqrt(abs(scale[i]))", "def factor_to_scale(factor):\n return 1 / B.sqrt(4 * factor / B.pi)", "def scale(inp, ab):\n\n return inp * ab[0] + ab[1]\n # pass", "def l_to_k(self, l, z, massive_nu_approx = True):\n inv_dist = 1./self.f_K(z, massive_nu_approx = True)\n scale = np.outer(l,inv_dist)\n return scale", "def scale(self, up):\n s = 1.1 if up else 0.9\n self.scaling_matrix = np.dot(\n self.scaling_matrix,\n F.scaling([s, s, s])\n )\n\n self.aabb.scale(s)" ]
[ "0.6722605", "0.6683108", "0.66285825", "0.65496975", "0.63913274", "0.623833", "0.60930306", "0.6081411", "0.60072404", "0.5964558", "0.59401745", "0.59369767", "0.5929781", "0.5921312", "0.59208435", "0.5897965", "0.58625317", "0.58455175", "0.57387984", "0.57370836", "0.5723434", "0.5709558", "0.5691895", "0.5684572", "0.56740683", "0.5672975", "0.5669111", "0.5644942", "0.56159097", "0.5610946" ]
0.8007818
0
Create default centerbox for given position (i, j)
def get_default_box(i, j, scale, box_ratio, width, height): default_w = scale * sqrt(box_ratio) default_h = scale / sqrt(box_ratio) center_x = (j + 0.5) / width center_y = (i + 0.5) / height return (center_x, center_y, default_w, default_h)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def center(box):\n x_center = box[:, 0] + (box[:, 2] - box[:, 0]) // 2\n y_center = box[:, 1] + (box[:, 3] - box[:, 1]) // 2\n return torch.stack((x_center, y_center)).t().to(box.device)", "def center_size(boxes):\n concat = P.Concat(1)\n return concat(((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2])) # w, h", "def to_center_form(boxes):\n x_min, y_min = boxes[:, 0], boxes[:, 1]\n x_max, y_max = boxes[:, 2], boxes[:, 3]\n center_x = (x_max + x_min) / 2.\n center_y = (y_max + y_min) / 2.\n width = x_max - x_min\n height = y_max - y_min\n return np.concatenate([center_x[:, None], center_y[:, None],\n width[:, None], height[:, None]], axis=1)", "def find_center(self):\n x = np.int(np.rint((len(self.grid[0][0]))/2))\n center = np.array([x, x, x])\n self.grid[center[0]][center[1]][center[2]] = 1\n return self.grid, center", "def center(self):\n return np.array([0,0,1/self.C+self.pos()])", "def position_center(self, x, y):\n self.x = x\n self.y = y\n self.pos[0] = x - self.pos[2]/2\n self.pos[1] = y - self.pos[3]/2", "def bbox_center(bbox):\n y, x, h, w = bbox\n return int(y + h/2), int(x + w/2)", "def find_center(self):\n return(Point(self.corner.x + self.width/2.0, self.corner.y + self.height/2.0))", "def center(self):\n # minz to offset the heights to 0\n mz = (self.maxz-self.minz)/2\n #mz = self.minz\n return (self.minx + self.width / 2, self.miny + self.height / 2, mz)", "def getBoundingBoxCenter(self, shell=False, *args, **kwargs):\n if shell:\n self.grabShell()\n uvBB = pm.polyEvaluate(boundingBoxComponent2d=True)\n uvCenter = [((uvBB[0][1] + uvBB[0][0]) / 2), ((uvBB[1][1] + uvBB[1][0]) / 2)]\n return uvCenter", "def box(self, x, y, w, h):\n\t\tpass", "def show_centre_of_bbox(self, image, objects):\n for obj in objects:\n image = cv2.circle(image, \n (int(obj.centre_cords[0] * self.x), int(obj.centre_cords[1] * self.y)), \n radius=5, \n color=AXE_COLOR, \n thickness=-1)\n \n return image", "def center_size(boxes):\n return torch.cat([(boxes[:, :2] + boxes[:, 2:])/2, # cx, cy\n boxes[:, :2] - boxes[:, 2:]], 1) # w, h", "def get_center(self):\n return center_points(np.expand_dims(self.best_box, axis=0))[0]", "def center(self):\n\n return (\n self.x() + (self.width() / 2),\n self.y() + (self.height() / 2)\n )", "def box(original, radius):\n batches = original.size()[0]\n num_elem = h.product(original.size()[1:])\n ei = h.getEi(batches,num_elem)\n \n if len(original.size()) > 2:\n ei = ei.contiguous().view(num_elem, *original.size())\n\n return HBox(original, None, ei * radius).checkSizes()", "def coord_corner2center(bbox):\n x1, y1 = bbox.new([bbox[0]]), bbox.new([bbox[1]])\n x2, y2 = bbox.new([bbox[2]]), bbox.new([bbox[3]])\n x = torch.floor((x2 - x1 + 1) / 2) + x1\n y = torch.floor((y2 - y1 + 1) / 2) + y1\n w = x2 - x1 + 1\n h = y2 - y1 + 1\n return x, y, w, h", "def center(self):\r\n self.centerx = self.screen_rect.centerx \r\n self.centery = self.screen_rect.centery", "def center(width, height):\n return width/2, height/2", "def center(self):\n return [self.position[i]+self.radius for i in range(2)]", "def center(self):\n return (self.centerx, self.centery)", "def create_human_box(self, i):\n self.box = self.detections[0, 0, i, 3:7] * np.array([self.w, self.h, self.w, self.h])\n (self.startX, self.startY, self.endX, self.endY) = self.box.astype(\"int\")", "def get_center(self, *indexes):\n if self.layout == \"horizontal\":\n return self._center_horizontal(*indexes)\n elif self.layout == \"verticle\":\n return self._center_vertical(*indexes)\n elif self.layout == \"grid\":\n return self._compute_grid(*indexes)\n else:\n \"\"\"Another advantage is that if you pass a wrong layout value,\nwe will know right after the instance's built but not when invoking the function.\"\"\"\n raise KeyError, \"invalid layout: %r\" % self.layout", "def center(self, x):\n\n shape = x.shape\n nx = shape[1]\n ny = shape[0]\n hnx = nx // 2\n hny = ny // 2\n\n temp = x[0:hny, 0:hnx].copy()\n x[0:hny, 0:hnx] = x[hny:ny, hnx:nx].copy()\n x[hny:ny, hnx:nx] = temp\n\n temp = x[0:hny, hnx:nx].copy()\n x[0:hny, hnx:nx] = x[hny:ny, 0:hnx].copy()\n x[hny:ny, 0:hnx] = temp", "def cells_center(self,refresh=False,mode='first3'):\n if refresh is True:\n to_update=slice(None)\n elif refresh is not False:\n to_update=refresh\n else:\n to_update = np.isnan(self.cells['_center'][:,0])\n\n if np.sum(to_update) > 0:\n if mode=='first3':\n p1,p2,p3 = [self.nodes['x'][self.cells['nodes'][to_update,i]] for i in [0,1,2]]\n self.cells['_center'][to_update] = circumcenter(p1,p2,p3)\n elif mode=='sequential':\n for c in np.arange(self.Ncells())[to_update]:\n points=self.nodes['x'][self.cell_to_nodes(c)]\n self.cells['_center'][c] = poly_circumcenter(points)\n \n return self.cells['_center']", "def set_box(self) -> None:\n from pymol import cmd\n\n # Delete Box object in PyMOL\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.delete(\"box\")\n # Get dimensions of selected residues\n selection = \"sele\"\n if selection in cmd.get_names(\"selections\"):\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(selection)\n else:\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(\"\")\n \n # Get center of each dimension (x, y, z)\n self.x = (min_x + max_x) / 2\n self.y = (min_y + max_y) / 2\n self.z = (min_z + max_z) / 2\n\n # Set Box variables in interface\n self.min_x.setValue(round(self.x - (min_x - self.padding.value()), 1))\n self.max_x.setValue(round((max_x + self.padding.value()) - self.x, 1))\n self.min_y.setValue(round(self.y - (min_y - self.padding.value()), 1))\n self.max_y.setValue(round((max_y + self.padding.value()) - self.y, 1))\n self.min_z.setValue(round(self.z - (min_z - self.padding.value()), 1))\n self.max_z.setValue(round((max_z + self.padding.value()) - self.z, 1))\n self.angle1.setValue(0)\n self.angle2.setValue(0)\n\n # Setting background box values\n self.min_x_set = self.min_x.value()\n self.max_x_set = self.max_x.value()\n self.min_y_set = self.min_y.value()\n self.max_y_set = self.max_y.value()\n self.min_z_set = self.min_z.value()\n self.max_z_set = self.max_z.value()\n self.angle1_set = self.angle1.value()\n self.angle2_set = self.angle2.value()\n self.padding_set = self.padding.value()\n\n # Draw box\n self.draw_box()\n\n # Enable/Disable buttons\n self.button_draw_box.setEnabled(False)\n self.button_redraw_box.setEnabled(True)\n self.min_x.setEnabled(True)\n self.min_y.setEnabled(True)\n self.min_z.setEnabled(True)\n self.max_x.setEnabled(True)\n self.max_y.setEnabled(True)\n self.max_z.setEnabled(True)\n self.angle1.setEnabled(True)\n self.angle2.setEnabled(True)", "def centerbox(pop,leftpoint,rightpoint,poptree,popxvals):\r\n\r\n if poptree[pop][2] == -1: ## pop is a terminal population\r\n ## at this point popxvals[pop] holds just the width of the box (i.e. popxvals[pop][0] is 0)\r\n popxvals[pop][1] = popxvals[pop][1] - popxvals[pop][0] + leftpoint\r\n popxvals[pop][0] = leftpoint\r\n return popxvals[pop][1]-popxvals[pop][0],leftpoint + (popxvals[pop][1]-popxvals[pop][0])/ 2.0,popxvals, leftpoint, popxvals[pop][1]\r\n else:\r\n popspacer = gv[\"popboxspaceadj\"] * popboxspacedefault\r\n (lw,lc, popxvals, leftpoint,rightpoint) = centerbox(poptree[pop][2],leftpoint,rightpoint, poptree,popxvals)\r\n rleftpoint = rightpoint + popspacer\r\n (rwidth,rcenter, popxvals, rleftpoint,rightpoint) = centerbox(poptree[pop][3],rleftpoint,rightpoint, poptree,popxvals)\r\n newwidth = lw + popspacer + rwidth\r\n\r\n newwidth = popxvals[pop][1] - popxvals[pop][0]\r\n newcenter = lc + (rcenter - lc)/2.0\r\n if newcenter - (newwidth/2.0) < leftpoint :\r\n newcenter += leftpoint - (newcenter - (newwidth/2.0))\r\n templeft = newcenter - newwidth/2.0\r\n popxvals[pop][0] = templeft\r\n popxvals[pop][1] = templeft + newwidth\r\n return newwidth, newcenter,popxvals,leftpoint,rightpoint", "def center(self):\n xc = (self.x.max() + self.x.min())/2.\n yc = (self.y.max() + self.y.min())/2.\n return (xc, yc)", "def define_box_location(self):\n self.contents['Box_ID'] = np.ones(self.numatom) * self.num_box", "def set_center(self,structure):\n for i,b in enumerate(self.bfs):\n b.set_center( structure[ self.LIST1[i] ] ) \n return" ]
[ "0.6984537", "0.6536565", "0.6419982", "0.6356094", "0.61390704", "0.6124859", "0.60519403", "0.6050198", "0.6037", "0.60185087", "0.60182935", "0.60087377", "0.5990671", "0.59769964", "0.5975228", "0.5962497", "0.59607095", "0.59463704", "0.59422565", "0.59399325", "0.59297025", "0.5907633", "0.59015435", "0.5898995", "0.58853537", "0.58782476", "0.587618", "0.5872365", "0.5853361", "0.58479965" ]
0.7353168
0
Returns BoundBoxArray of default boxes
def get_default_boxes(out_shapes, box_ratios): default_boxes = [] n_outs = len(out_shapes) scales = (box_scale(n_out + 1, n_outs) for n_out in range(n_outs)) layer_params = zip(out_shapes, scales, box_ratios) for out_shape, scale, layer_box_ratios in layer_params: height, width = height_and_width(out_shape) layer_boxes = [[[get_default_box(i, j, scale, box_ratio, width, height) for box_ratio in layer_box_ratios] for i in range(height)] for j in range(width)] default_boxes.append(layer_boxes) return BoundBoxArray.from_centerboxes(flatten_list(default_boxes)).clip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_boxes(self):\r\n\r\n boxes = [(\" \", self.worldbox.tl, self.worldbox.br)]\r\n# boxes = []\r\n boxes += [(\".\", b.tl, b.br) for b in self.wallboxes]\r\n boxes += [(\"x\", b.tl, b.br) for b in self.targetboxes]\r\n agentscale = 100\r\n boxes += [(\"a\", (self.i_state[0] - self.dx * agentscale, self.i_state[1] - self.dx * agentscale),\r\n (self.i_state[0] + self.dx * agentscale, self.i_state[1] + self.dx * agentscale))]\r\n return boxes", "def base_boxes():\n return sorted(list(set([name for name, provider in _box_list()])))", "def _get_boxes(self):\n return self._boxes", "def get_boxes(self) -> List[Box]:\n return [Box.from_npbox(npbox) for npbox in self.boxlist.get()]", "def _initial_normal_bbox(self):\n cx = cy = .5\n width = FLAGS.bbox_grid / float(FLAGS.cropbox_grid)\n x1 = cx - width / 2\n x2 = cx + width / 2\n y1 = cy - width / 2\n y2 = cy + width / 2\n return [y1,x1,y2,x2]", "def initDefaults(self):\n return _libsbml.BoundingBox_initDefaults(self)", "def create_prior_boxes(self):\n # value of k for each feature map to create k^2 boxes for each feature map\n feature_map_dims = {'conv4_3': 38, 'conv7': 19, 'conv8_2': 10, 'conv9_2': 5}\n\n # scale for boxes across different feature maps. boxes for inner feature maps\n # are scaled much lower to detect small objects\n obj_scales = {'conv4_3': 0.1, 'conv7': 0.21, 'conv8_2': 0.255, 'conv9_2': 0.30}\n\n # Defined aspect ratio calculated from mean of (w/h) across all bounding boxes\n # from the dataset. The mean is 0.66 with deviation of 0.07. So aspect ratio is kept\n # at 0.66 for all feature maps\n aspect_ratios = {'conv4_3': [0.5], 'conv7': [0.55], 'conv8_2': [0.6], 'conv9_2': [.66]}\n\n fmaps = list(feature_map_dims.keys())\n prior_boxes = []\n for k, fmap in enumerate(fmaps):\n # for each feature map, create k*k boxes\n for i in range(feature_map_dims[fmap]):\n for j in range(feature_map_dims[fmap]):\n # calculate center coordinates of boxes\n cx = (j + 0.5) / feature_map_dims[fmap]\n cy = (i + 0.5) / feature_map_dims[fmap]\n\n # For each\n for ratio in aspect_ratios[fmap]:\n prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt(ratio), obj_scales[fmap] / sqrt(ratio)])\n\n prior_boxes = torch.FloatTensor(prior_boxes).to(device) # (1930, 4)\n prior_boxes.clamp_(0, 1) # (1930, 4)\n\n return prior_boxes", "def extract_bboxes(mask):\r\n boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)\r\n for i in range(mask.shape[-1]):\r\n m = mask[:, :, i]\r\n # Bounding box.\r\n horizontal_indicies = np.where(np.any(m, axis=0))[0]\r\n vertical_indicies = np.where(np.any(m, axis=1))[0]\r\n if horizontal_indicies.shape[0]:\r\n x1, x2 = horizontal_indicies[[0, -1]]\r\n y1, y2 = vertical_indicies[[0, -1]]\r\n # x2 and y2 should not be part of the box. Increment by 1.\r\n x2 += 1\r\n y2 += 1\r\n else:\r\n # No mask for this instance. Might happen due to\r\n # resizing or cropping. Set bbox to zeros\r\n x1, x2, y1, y2 = 0, 0, 0, 0\r\n boxes[i] = np.array([y1, x1, y2, x2])\r\n return boxes.astype(np.int32)", "def boxes(self) -> dict:\n return self.data[\"boxes\"]", "def extract_bboxes(mask):\n boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)\n for i in range(mask.shape[-1]):\n m = mask[:, :, i]\n # Bounding box.\n horizontal_indicies = np.where(np.any(m, axis=0))[0]\n vertical_indicies = np.where(np.any(m, axis=1))[0]\n if horizontal_indicies.shape[0]:\n x1, x2 = horizontal_indicies[[0, -1]]\n y1, y2 = vertical_indicies[[0, -1]]\n # x2 and y2 should not be part of the box. Increment by 1.\n x2 += 1\n y2 += 1\n else:\n # No mask for this instance. Might happen due to\n # resizing or cropping. Set bbox to zeros\n x1, x2, y1, y2 = 0, 0, 0, 0\n boxes[i] = np.array([y1, x1, y2, x2])\n return boxes.astype(np.int32)", "def extract_bboxes(mask):\n boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)\n for i in range(mask.shape[-1]):\n m = mask[:, :, i]\n # Bounding box.\n horizontal_indicies = np.where(np.any(m, axis=0))[0]\n vertical_indicies = np.where(np.any(m, axis=1))[0]\n if horizontal_indicies.shape[0]:\n x1, x2 = horizontal_indicies[[0, -1]]\n y1, y2 = vertical_indicies[[0, -1]]\n # x2 and y2 should not be part of the box. Increment by 1.\n x2 += 1\n y2 += 1\n else:\n # No mask for this instance. Might happen due to\n # resizing or cropping. Set bbox to zeros\n x1, x2, y1, y2 = 0, 0, 0, 0\n boxes[i] = np.array([x1, y1, x2, y2])\n return boxes.astype(np.int32)", "def extract_bboxes(mask):\n boxes = np.zeros([mask.shape[-1],mask.shape[0], 2], dtype=np.int32)\n for i in range(mask.shape[-1]):\n # Bounding box.\n\n for j in range(mask.shape[0]):\n m = mask[j, :, i]\n horizontal_indicies = np.where(m)[0]\n\n if horizontal_indicies.shape[0]:\n x1, x2 = horizontal_indicies[[0, -1]]\n\n # x2 should not be part of the box. Increment by 1.\n x2 += 1\n else:\n # No mask for this instance. Might happen due to\n # resizing or cropping. Set bbox to zeros\n x1, x2 = 0, 0\n boxes[i,j] = np.array([x1, x2])\n\n return boxes.astype(np.int32)", "def store_empty_graphic_box(self):\n for box in self.laby.empty_box():\n x = box[0] * 40\n y = box[1] * 40\n self.store_emptyBox.append((y, x))\n return self.store_emptyBox", "def build_boxes(self):\n for index in self.box_space.points:\n if self.rank_of_box[index] == self.my_rank:\n self.my_boxes.append(Box(self, index))", "def get_bboxes(self, **kwargs):\n pass", "def create_model_input_default(self, rgb_images, all_boxes, all_masks):\n box_regions = []\n for i in range(all_boxes.size(0)):\n for j in range(all_boxes.size(1)):\n box = all_boxes[i][j]\n if is_dummy_box(box):\n continue\n image = rgb_images[i].cpu()\n\n # Apply object mask to the image\n if self.use_masks:\n image = image.clone()\n mask = all_masks[i][j].cpu()\n image = image * mask\n\n box_region = get_patch_from_image(box, image)\n box_regions.append(box_region)\n\n t = T.Compose([T.ToPILImage(), T.Resize((self.reduced_size, self.reduced_size)), T.ToTensor()])\n box_regions = [t(box.cpu()) for box in box_regions]\n return torch.stack(box_regions)", "def get_bound(box_list):\n box_xyxy_list = []\n for box in box_list:\n box_xyxy = xywh2xyxy(box)\n box_xyxy_list.append(box_xyxy)\n\n box_xyxy_list = np.array(box_xyxy_list)\n x1max, y1max, x2max, y2max = np.amax(box_xyxy_list, axis=0)\n x1min, y1min, x2min, y2min = np.amin(box_xyxy_list, axis=0)\n\n boundbox = xyxy2xywh([x1min, y1min, x2max, y2max])\n return boundbox", "def get_raw_bounds(self) -> [Vector, Vector]:\n\t\tverts = np.array([v.co for mesh in self._meshes for v in mesh.data.vertices])\n\t\tbbox_min = Vector([*np.min(verts, axis=0)])\n\t\tbbox_max = Vector([*np.max(verts, axis=0)])\n\t\treturn bbox_min, bbox_max", "def create_grid(size_x, size_y, default=None):\n return [[default for _x in range(size_y)] for _y in range(size_x)]", "def get_radec_boxes(self):\n\n return self.radec_boxes[:]", "def _polygons_to_bboxes(polygons):\n# Build bounding boxes\n bboxes = np.empty([len(polygons), 4])\n for n, p in enumerate(polygons):\n try:\n left, bottom = np.min(p, axis = 0)\n except:\n import pdb\n pdb.set_trace()\n right, top = np.max(p, axis = 0)\n bboxes[n] = [left, bottom, right, top]\n return bboxes", "def bbox(self):\n if len(self.elements) == 0:\n raise ValueError(\"Group is empty, no bbox is available\")\n bboxes = np.empty([len(self.elements), 4])\n for n, e in enumerate(self.elements):\n bboxes[n] = e.bbox.flatten()\n\n bbox = (\n (bboxes[:, 0].min(), bboxes[:, 1].min()),\n (bboxes[:, 2].max(), bboxes[:, 3].max()),\n )\n return np.array(bbox)", "def generate_default_spaces(self):\n\n defaults =[['cog', 'C_cog_space_GRP'],\n ['world', 'parts_GRP'],\n ['trueWorld', 'noXform_GRP']]\n\n return defaults", "def init_grid(self):\n grid = []\n for i in range(self.settings['grid_size']):\n grid.append([])\n for j in range(self.settings['grid_size']):\n if [j, i] in self.settings['walls']:\n grid[i].append(g.WALL)\n else:\n grid[i].append(g.EMPTY)\n return grid", "def _default() -> list:\n if metadata is None or metadata.default is None:\n return []\n\n return self._always_array(metadata.default)", "def _available_boxes(self, graph):\n return sorted([node.name for node in graph.available_nodes()\n if not isinstance(node.meta, Ibox)])", "def generate_boxes(self, img):\r\n return [Box(left, top, img) for (left, top) in self.coords]", "def bbox(self):\n return [self._x0, self._y0, self._x1, self._y1]", "def bounding_box(self):\n box_min = []\n box_max = []\n if self.is_empty():\n raise ValueError('empty polytope is not allowed')\n for i in range(0, self.space_dimension()):\n x = Variable(i)\n coords = [ v.coefficient(x) for v in self.generators() ]\n max_coord = max(coords)\n min_coord = min(coords)\n box_max.append(max_coord)\n box_min.append(min_coord)\n return (tuple(box_min), tuple(box_max))", "def bbox(self):\n bbox = self.get_bounding_box()\n if bbox is None:\n bbox = ((0, 0), (0, 0))\n return np.array(bbox)" ]
[ "0.6688832", "0.65859437", "0.6556119", "0.6538074", "0.6434962", "0.6420856", "0.6287684", "0.62804985", "0.62618226", "0.6227584", "0.62249935", "0.61945534", "0.61637175", "0.6134957", "0.60598016", "0.60597944", "0.6055284", "0.5994084", "0.5981978", "0.59624654", "0.5955712", "0.5950483", "0.59494436", "0.5930191", "0.5922222", "0.59134775", "0.5897064", "0.5864801", "0.5842702", "0.5832441" ]
0.7455453
0
Ensure fetching is waiting FETCH_MINIMUM_WAIT_SECONDS seconds inbetween each request.
def __ensure_fetching_rate_limit(self) -> None: current = datetime.now() difference = current - self.fetched_last time_to_wait = FETCH_MINIMUM_WAIT_SECONDS - difference.total_seconds() if time_to_wait > 0: time.sleep(time_to_wait) self.fetched_last = datetime.now()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fair_use_delay(self):\n if self.next_request_no_sooner_than is not None and type(self.next_request_no_sooner_than) == int:\n sleep_time = max(0, self.next_request_no_sooner_than)\n log.info('Sleeping for {} seconds until next request.'.format(sleep_time))\n time.sleep(sleep_time)", "def _sleepIfNecessary(self):\n t = time.time()\n if t - self._lastQueryTime < self._minDelayBetweenRequests:\n time.sleep(self._minDelayBetweenRequests - (t - self._lastQueryTime))\n self._lastQueryTime = t", "def _random_wait_start(self):\n\n # wait random amount of time to break synchron client behavior\n wait_until = time.time() + random.randint(0, self._config['random_wait_time'])\n while time.time() < wait_until and self._running:\n self._logger.debug(\"Waiting %.1fs to start first request\" % (wait_until - time.time()))\n time.sleep(1)", "def test_sleep_request(self):\n date = datetime.now() - timedelta(minutes=14)\n RequestAPI.objects.create(total_request=450, date=date)\n start = time.time()\n ManagerRequestApiTwitter().handle_rate_limit()\n stop = time.time()\n total_time = stop - start\n self.assertGreater(total_time, 60)", "def _wait_before_call(self):\n while (dt.datetime.now() - self._last_call_ts) <= dt.timedelta(\n seconds=self.api_timeout\n ):\n time.sleep(0.5)\n self._last_call_ts = dt.datetime.now()", "def test_fetch_delayed():\n ident = _id()\n proc = multiprocessing.Process(target=proj.fetch, args=('delayed', ident), daemon=True)\n proc.start()\n time.sleep(0.1)\n status = proj.status('delayed', ident)\n assert status == 'pending'\n time.sleep(1.25)\n res = proj.fetch('delayed', ident)\n assert res.status == 'complete'\n assert res.start_time < res.end_time\n assert res.result", "async def fetch(self, url: str, session: aiohttp.ClientSession) -> None:\n for tries in range(10):\n if self.requests_count == self.max_requests:\n time.sleep(60)\n self.requests_count = 0\n\n self.requests_count += 1\n\n try:\n async with session.get(url) as response:\n self.results[url] = await response.json()\n break\n except:\n await asyncio.sleep(1)", "def need_update(self):\n five_minutes_ago = datetime.now() - timedelta(minutes=5)\n if (\n self.fetch_status != self.FetchStatus.NONE\n and self.collected_at > five_minutes_ago\n ):\n return False\n return True", "def __enter__(self):\n self.orig_fetch_max_wait_time = self.consumer.fetch_max_wait_time\n self.orig_fetch_min_bytes = self.consumer.fetch_min_bytes\n if self.block:\n self.consumer.fetch_max_wait_time = self.timeout\n self.consumer.fetch_min_bytes = 1\n else:\n self.consumer.fetch_min_bytes = 0", "def _maybe_start_fetch_thread(self) -> None:\n with self._event_fetch_lock:\n if (\n self._event_fetch_list\n and self._event_fetch_ongoing < EVENT_QUEUE_THREADS\n ):\n self._event_fetch_ongoing += 1\n event_fetch_ongoing_gauge.set(self._event_fetch_ongoing)\n # `_event_fetch_ongoing` is decremented in `_fetch_thread`.\n should_start = True\n else:\n should_start = False\n\n if should_start:\n run_as_background_process(\"fetch_events\", self._fetch_thread)", "def wait(self) -> None:\n now = time.time()\n if now < self.lockTime:\n diff = self.lockTime - now\n logger.debug(\"Waiting %ss to avoid ratelimit\", diff)\n time.sleep(diff)", "async def sleep(self):\n if self.stream.closed: # pragma: no cover\n return\n self.next_wait = min(self.next_wait * 2, self.max_wait)\n try:\n await asyncio.sleep(self.next_wait)\n except Exception: # pragma: no cover\n pass", "def rate_limit(wait_time=60):\n \n if \"prev_time\" not in globals():\n global prev_time\n prev_time = time.time() - wait_time\n \n current_time = time.time()\n delta = current_time - prev_time\n \n if delta < wait_time:\n time.sleep(wait_time - delta)\n \n prev_time = current_time", "def wait(wait_time=WAIT_TIME):\n # time.sleep(wait_time)\n pass", "def check_for_lock_request(self):\n while True:\n sleep(0.1)\n if self.get_state():\n sleep(5)\n self.lock()\n break", "def throttle(self):\n\n # Check how long it has been since last request was sent\n time_since_req = time.time() - self.time_last_req\n\n # If last request was too recent, pause\n if time_since_req < self.wait_time:\n self.wait(self.wait_time - time_since_req)", "def test_polling_loop(self, cursor):\n cursor._poll_interval = 0\n yield cursor.execute('SELECT COUNT(*) FROM many_rows')\n self.assertEqual((yield cursor.fetchone()), [10000])", "def fetch(cls, url):\n delta = time.time() - cls._time_last_fetched\n wait_time = TIME_TIL_RETRY - delta\n if wait_time > 0:\n time.sleep(wait_time)\n resp = requests.get(url)\n cls._time_last_fetched = time.time()\n resp.raise_for_status()\n return resp", "def _wait_before_call(self) -> None:\n while (datetime.now() - self._last_call_ts) <= timedelta(\n seconds=self._config.get_yfinance_polling_period()\n ):\n time.sleep(0.1)\n self._last_call_ts = datetime.now()", "def wait_forever(self):\r\n while True:\r\n time.sleep(0.5)", "def __wait(min_sec, max_sec):\n time.sleep(randint(min_sec, max_sec))", "def fair_use_delay(self):\n if foo._error_count > 1:\n delay = foo(120, 30 * (2 ** (foo._error_count - 2)))\n else:\n delay = foo._next_call_timestamp - foo(foo.time())\n if delay > 0 and foo.respect_fair_use_policy:\n foo.info('Sleeping for %s seconds' % delay)\n foo.sleep(delay)", "def lazy_assert_n_requests(self, expected_requests, msg=None):\n request_count_0 = driver.request_count\n try:\n yield None\n finally:\n request_count_1 = driver.request_count\n msg = (msg + '\\n') if msg else ''\n msg += ('expected requests != real requests; checked by:\\n'\n ' with self.lazy_assert_n_requests({}):'.format(expected_requests))\n self.lazyAssertEqual(expected_requests, request_count_1 - request_count_0, msg=msg)", "def precheck(self):\n # making sure it's a time for pull, otherwise just sleep\n if datetime.now() < self.startTime + timedelta(hours=int(self.newsFrequency)):\n logging.info(\"Didn't reach time to wakeup yet, going to sleep\")\n self.sleep()", "def wait_for_container():\n for i in xrange(30):\n print(\"Waiting for service to come up\")\n try:\n requests.get(URL).raise_for_status()\n return True\n except Exception as e:\n print e\n sleep(1)\n\n return False", "def test_wait_for_page_in_timeout(self):\n start_time = datetime.now()\n with self.assertRaises(SpdbError):\n csdb = CacheStateDB(self.config_data)\n ch = csdb.create_page_in_channel()\n\n csdb.wait_for_page_in([\"MY_TEST_KEY1\", \"MY_TEST_KEY2\"], ch, 1)\n\n assert (datetime.now() - start_time).seconds < 3", "def check_active_requests():\n\n active_requests = jobtracker.query(\"SELECT * FROM requests \" \\\n \"WHERE status='waiting'\")\n for request in active_requests:\n\n\t# Check requested status \n\tif DownloaderSPAN512.check_request_done(request):\n\t dlm_cout.outs(\"Restore (GUID: %s) has succeeded. Will create file entries.\\n\" % request['guid'])\n\t create_file_entries(request)\n\n\telse:\n#\t dlm_cout.outs(\"Request (GUID: %s) has failed.\\n\" \\\n#\t \"\\tDatabase failed to report the data as restored.\" % request['guid'])\n#\t jobtracker.query(\"UPDATE requests SET status='failed', \" \\\n# \"details='Request failed. Why ?', \" \\\n# \"updated_at='%s' \" \\\n# \"WHERE guid='%s'\" % (jobtracker.nowstr(), request['guid']))\n\n query = \"SELECT (TO_SECONDS('%s')-TO_SECONDS(created_at)) \" \\\n \"AS deltaT_seconds \" \\\n \"FROM requests \" \\\n \"WHERE guid='%s'\" % \\\n (jobtracker.nowstr(), request['guid'])\n row = jobtracker.query(query, fetchone=True)\n #if row['deltaT_seconds']/3600. > config.download.request_timeout:\n if row/3600. > config.download.request_timeout:\n dlm_cout.outs(\"Restore (GUID: %s) is over %d hr old \" \\\n \"and still not ready. Marking \" \\\n \"it as failed.\" % \\\n (request['guid'], config.download.request_timeout))\n jobtracker.query(\"UPDATE requests \" \\\n \"SET status='failed', \" \\\n \"details='Request took too long (> %d hr)', \" \\\n \"updated_at='%s' \" \\\n \"WHERE guid='%s'\" % \\\n (config.download.request_timeout, jobtracker.nowstr(), \\\n request['guid']))", "def implicitly_wait(self, secs):\n self.base_driver.implicitly_wait(secs)", "def polling_fetch(profile, name, max_attempts=10, wait_interval=1):\n data = None\n count = 0\n while count < max_attempts:\n data = fetch_by_name(profile, name)\n if data:\n break\n else:\n count += 1\n sleep(wait_interval)\n if not data:\n msg = \"Timed out waiting for instance profile to be created.\"\n raise WaitTimedOut(msg)\n return data", "def send_fetch_request(self, payloads=None, fail_on_error=True,\n callback=None,\n max_wait_time=DEFAULT_FETCH_SERVER_WAIT_MSECS,\n min_bytes=DEFAULT_FETCH_MIN_BYTES):\n if self.timeout is not None and (\n max_wait_time / 1000) > (self.timeout - 0.1):\n raise ValueError(\n \"%r: max_wait_time: %d must be less than client.timeout by \"\n \"at least 100 milliseconds.\", self, max_wait_time)\n\n encoder = partial(KafkaCodec.encode_fetch_request,\n max_wait_time=max_wait_time,\n min_bytes=min_bytes)\n\n # resps is a list of FetchResponse() objects, each of which can hold\n # 1-n messages.\n resps = yield self._send_broker_aware_request(\n payloads, encoder,\n KafkaCodec.decode_fetch_response)\n\n returnValue(self._handle_responses(resps, fail_on_error, callback))" ]
[ "0.6250035", "0.6202518", "0.5788163", "0.57635534", "0.575597", "0.5703448", "0.56474084", "0.5559574", "0.5528586", "0.5481914", "0.5442929", "0.54330456", "0.53454167", "0.5343117", "0.5315743", "0.5307995", "0.5278265", "0.52664554", "0.5192012", "0.5185223", "0.5163235", "0.51079124", "0.50870156", "0.5085102", "0.5068411", "0.5057179", "0.5043405", "0.50303704", "0.50262403", "0.5021898" ]
0.7916572
0
Produce web token value from agreement HTML.
def __parse_agreement(html: str) -> str: soup = BeautifulSoup(html, features='html.parser') web_token = soup.find('input', {'name': 'csrfmiddlewaretoken'}) if hasattr(web_token, 'value'): return web_token['value'] raise ValueError('Expected to find web token within HTML.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __fetch_web_token(self) -> str:\n self.__ensure_fetching_rate_limit()\n response = self.session.get(EFD_ENDPOINT_SEARCH)\n web_token = EFD.__parse_agreement(response.text)\n return web_token", "def _generate_token_value():\n return secrets.token_urlsafe()", "def get_token(self, res):\n token = res.xpath('//*[@name=\"_csrf-app\"]')[0].attrs['value']\n return token", "def parse_token(page_html):\n offset = 7\n token = page_html.find(\"token\")\n start_pos = (page_html[token:]).find('value=\"') + token\n end_pos = (page_html[start_pos + offset:]).find('\"') + start_pos + offset\n\n return page_html[start_pos + offset:end_pos]", "def find_auth_token(document_html):\n search_result = re.search(AUTH_TOKEN_REGEX, document_html)\n if search_result:\n return search_result.group('auth_token')", "def token(self) -> str:", "async def token(request) -> ResponseText:\n return ResponseText(\n \"\".join(random.choices(string.ascii_uppercase + string.digits, k=42)) # noqa: S311\n )", "def token(self):\n return self[\"token\"]", "def generate(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenGenerate)['token']", "def getToken(self):\n \n raise NotImplementedError", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def extract_csrf_token(htmlData):\n parsedHTML = HTMLMetaTagCSRFTokenParser()\n parsedHTML.feed(htmlData)\n\n token = parsedHTML.CSRF_Token\n\n parsedHTML.clean()\n\n return token", "def BuildToken(request, execution_time):\n\n if request.method == \"GET\":\n reason = request.GET.get(\"reason\", \"\")\n elif request.method == \"POST\":\n # The header X-GRR-REASON is set in api-service.js, which django converts to\n # HTTP_X_GRR_REASON.\n reason = utils.SmartUnicode(urllib2.unquote(\n request.META.get(\"HTTP_X_GRR_REASON\", \"\")))\n\n token = access_control.ACLToken(\n username=request.user,\n reason=reason,\n process=\"GRRAdminUI\",\n expiry=rdfvalue.RDFDatetime().Now() + execution_time)\n\n for field in [\"REMOTE_ADDR\", \"HTTP_X_FORWARDED_FOR\"]:\n remote_addr = request.META.get(field, \"\")\n if remote_addr:\n token.source_ips.append(remote_addr)\n return token", "def get_token(request):\n # Create a TwilioCapability token with our Twilio API credentials\n capability = ClientCapabilityToken(\n settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN\n )\n\n capability.allow_client_outgoing(settings.TWILIO_ACCOUNT_SID)\n capability.allow_client_incoming('support_agent')\n token = capability.to_jwt()\n\n return JsonResponse({'token': token.decode('utf-8')})", "def __post_agreement(self, web_token: str) -> List[str]:\n payload = {\n 'prohibition_agreement': 1,\n 'csrfmiddlewaretoken': web_token\n }\n\n self.session.headers.update(HTTP_HEADERS)\n self.__ensure_fetching_rate_limit()\n response = self.session.post(EFD_ENDPOINT_ACCESS, data=payload)\n form_names = self.__parse_search_form(response.text)\n return form_names", "def get_token():\n params = {'get_token': 'get_token'}\n return load_page(API, params=params, headers={'content-type': 'application/json'})['token']", "def get_csrf(self):\n rv = self.app.get('/')\n soup = BeautifulSoup(rv.data, 'html.parser')\n tag = soup.body.find('input', attrs = { 'name' : '_csrf_token'})\n return tag['value']", "def createAccessTokenReplacement(self):\r\n\r\n url = self._config['OAUTH2ENDPOINT']['huddleAuthServer'] + \"request?response_type=code\" + \\\r\n \"&client_id=\" + self._config['OAUTH2']['clientID'] + \\\r\n \"&redirect_uri=\" + self._config['OAUTH2']['redirectUri']\r\n webbrowser.open_new(url)\r\n code = input('Please enter the code from your web browser:')\r\n\r\n response = self._oauth.obtainAccessTokenBy3LeggedOAuth(code)\r\n responseBody = json.loads(response['Body'])\r\n\r\n try:\r\n oauthToken = Token(responseBody)\r\n except TypeError as e:\r\n print (\"Bad response when requesting a token \" + str(response))\r\n sys.exit()\r\n\r\n return oauthToken", "def extract_token(coin_name):\n\n cname = coin_name.lower()\n url = f\"https://coinmarketcap.com/currencies/{cname}/\"\n r = requests.get(url)\n if r.status_code != 200:\n raise ValueError(\"Unknown coin name. Please check the url \"\n \"on coinmarketcap.com\")\n else:\n html_page = r.text\n n = html_page.find(\"https://etherscan.io/token\")\n\n if n == -1:\n raise ValueError(\"Error : Coin not on Ethereum platform\")\n else:\n token = html_page[n+27:n+69]\n return token", "def get_edit_token():\r\n\r\n print 'Getting edit token'\r\n url = 'http://shank.trikeapps.com/mediawiki/index.php?title=Special:Import'\r\n feed = urllib2.urlopen(url)\r\n buf = feed.read()\r\n tree = etree.fromstring(buf, parser)\r\n nodes = tree.xpath('//input[@name=\"editToken\"]')\r\n if not nodes or 'value' not in nodes[0].attrib:\r\n raise Exception('Failed to get edit token needed for importing')\r\n token = nodes[0].get('value')\r\n return token", "def make_token():\n return secrets.token_urlsafe(36)", "def token(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"token\")", "def get_token():\n url = settings.GENERATE_TOKEN_URL\n headers = {\"Authorization\": \"Basic {}\".format(settings.MPESA_APP_AUTHTOKEN)}\n response = get(url, headers)\n return response.json()", "def get_token(html, pattern):\n result = pattern.search(html)\n if result:\n return result.group(1)\n else:\n error('Failed to find token')\n return None", "def generate_csrf_token() -> int:\r\n ...", "def get_authenticity_token(session):\n debug('Getting the authenticity token')\n\n login_page = session.get('https://leanpub.com/login').content\n\n login_page_parsed = html.fromstring(login_page)\n\n authenticity_token = login_page_parsed.xpath('//input[@name=\"authenticity_token\"]/@value')\n\n if not authenticity_token or len(authenticity_token) != 1:\n debug('Unable to find the authenticity token', err=True, terminate=True)\n\n authenticity_token = authenticity_token[0]\n\n debug('Authenticity token: ' + authenticity_token)\n\n return authenticity_token", "def token_key(text):\n content2 = str(text.split())\n beginning = content2.find('access_token\":\"') + int(15)\n end = content2.find('token_type') - int(3)\n access_token = content2[beginning:end]\n return access_token", "def token_key(text):\n content2 = str(text.split())\n beginning = content2.find('access_token\":\"') + int(15)\n end = content2.find('token_type') - int(3)\n access_token = content2[beginning:end]\n return access_token" ]
[ "0.6441431", "0.5810527", "0.5624718", "0.55785173", "0.55474305", "0.55416983", "0.5341234", "0.52849877", "0.52341986", "0.51770765", "0.51665294", "0.51665294", "0.51665294", "0.51644754", "0.51423883", "0.5113351", "0.5105391", "0.5065668", "0.50644654", "0.50285536", "0.5023023", "0.5022631", "0.5019931", "0.501732", "0.5012763", "0.4992008", "0.49878114", "0.49812225", "0.4964808", "0.4964808" ]
0.767607
0
Return search form input names.
def __parse_search_form(html: str) -> List[str]: soup = BeautifulSoup(html, features='html.parser') form_names = [i['name'] for i in soup.find('form').findAll('input')] return form_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_input_names(self):\n return self._input_names", "def search_form(self, name_filter):\n from sagas.ofbiz.forms import print_form_list\n print_form_list(name_filter=name_filter)", "def get_input_names(self):\n inputNames = []\n for inVar in self.inputs:\n # inVar is of type InOutVar and the object that it contains is a PyFMI variable\n inputNames.append(inVar.get_object().name)\n return inputNames", "def get_input_names():\n names = [device.name for device in get_devices() if device.is_input]\n return list(sorted(names))", "def input_names(self):\n raise NotImplementedError(\n 'Derived ExternalGreyBoxModel classes need to implement the method: input_names'\n )", "def get_renamed_input_fields(self):\n return self.renamed_input_fields", "def input_names(self) -> List[Union[str, int]]:\n return [x.name or i for i, x in enumerate(self.inputs)]", "def _get_control_names(self, interface, form):\n return sorted([control.name\n for control in form.controls\n if interface.providedBy(control)])", "def get_input_var_names(self):\n return self._input_var_names", "def input_fields(self):\r\n return self.input.fields", "def names(self):\r\n return self.get_field(self.name_field)", "def searchFields(self):\n\n keyword = self.lineEdit.text().strip()\n self.options = []\n for field in self.all_fields:\n if keyword.lower() in field.lower(): # to make search case insensitive\n self.options.append(field)\n # Error dialog for invalid entry\n if len(self.options) == 0:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\"No field found containing keyword!\")\n msg.setInformativeText(\"Enter valid attribute\")\n msg.setWindowTitle(\"Error\")\n msg.show()\n msg.exec_()\n else:\n self.populateList()", "def field_names(self):\n\n entry_time_name = forms_builder.forms.models.FormEntry._meta.get_field('entry_time').verbose_name.title()\n document_title_name = Document._meta.get_field('name').verbose_name.title()\n document_url_name = Document._meta.get_field('url').verbose_name.title()\n\n form = self.form.all()[0]\n return ['user'] \\\n + [document_title_name, document_url_name] \\\n + [f.label\n for f in form.fields.all()] \\\n + [entry_time_name]", "def __find_username_field_via_name(self, inputs):\n for input in inputs:\n for n in ('name', 'login', 'user', 'mail'):\n if n in input.attrs['name'].lower():\n return input.attrs['name']\n return None", "def input_keys(self) -> List[str]:\n return [self.input_url_key, self.input_browser_content_key]", "def submit_control_names_all_forms(self):\n forms = [self.getForm(index=index)\n for index, _ in enumerate(self._getAllResponseForms())]\n names = [\n self._get_control_names(\n zope.testbrowser.interfaces.ISubmitControl, x)\n for x in forms]\n return names", "def search():\n\n # POST\n if request.method == \"POST\":\n\n # validate form submission\n if not request.form.get(\"intervention\"):\n return render_template(\"results.html\", results=entries.values())\n ''' \n elif not request.form.get(\"setting\"):\n return apology(\"missing setting\")\n elif not request.form.get(\"emrpref\"):\n return apology(\"missing emr pref\")\n elif not request.form.get(\"budget\"):\n return apology(\"missing budget\")'''\n \n results = []\n for k in entries:\n print('entries', entries[k]['Keywords'])\n print('term', request.form.get(\"intervention\"))\n if request.form.get(\"intervention\") in entries[k]['Keywords']:\n print('ya')\n results.append(entries[k])\n\n\n return render_template(\"results.html\", results=results)\n\n\n # GET\n else:\n return render_template(\"search.html\")", "def name(self): \n return \"search_form\"", "def get_search_results(self):\n return self.get_list_of_names(self.SEARCH_RESULTS)", "def forms(self):\n filter = self.filter_form\n search = self.search_form\n return {\n 'filter': filter,\n 'search': search\n }", "def get_search_terms(self):\n params = self.request.QUERY_PARAMS.get(\"search\", \"\")\n return params.replace(\",\", \" \").split()", "def get_all_forms_sqli(url):\n soup = bs(requests.get(url).content, \"html.parser\")\n return soup.find_all(\"form\"),soup.find_all(\"input\")", "def required_input_names(self) -> List[Union[str, int]]:\n return [x.name or i for i, x in enumerate(self.inputs) if not x.optional]", "def get_inputs():\n inputs = {}\n for obj in vars(acsploit.input).values():\n if hasattr(obj, 'INPUT_NAME'):\n inputs[obj.INPUT_NAME] = obj\n\n return inputs", "def get_search_tag_names(self):\n return self._ruleset.keys()", "def perform_search(self):\n\n self.implicitly_wait(5)\n html_element = self.find_element_by_xpath(\n '/html/body').get_attribute('outerHTML')\n soup = Scraper(html_element)\n target = soup.find_search_field()\n\n for elem in target:\n for attr, value in elem.items():\n placeholder = self.find_elements_by_css_selector(\n f'input[{attr}=\"{value}\"]'\n )\n for element in placeholder:\n try:\n element.send_keys(self.keywords)\n element.send_keys(Keys.RETURN)\n print(colored(':: Placeholder fullfilled ::', 'green'))\n return\n except:\n print(\n colored('Can\\'t type inside the search input', 'yellow'))", "def get_input_descriptor_names(self):\n\n return ['input']", "def names(filter=None):", "def nameset(request, model):\n if 'name' not in request.GET:\n return model.objects.all()\n\n if 'name' in request.GET:\n namestr = request.GET['name']\n if ',' in namestr:\n return model.objects.filter(name__in=namestr)\n else:\n return model.objects.filter(name__icontains=namestr)", "def search():\n # Check for database tables\n check_db()\n # Check for GET data\n search_query = request.args.get(\"q\", None)\n # Format search results as HTML\n search_results = get_search_results_html(search_query)\n # Format recent searches as HTML\n recent_searches = get_recent_searches_html()\n\n return html_wrapper('<h1>' + SITE_NAME + '''</h1>\n <form action=\"/\" method=\"GET\">\n <input type=\"text\" name=\"q\">\n <input type=\"submit\" value=\"search\">\n </form>''' + search_results + recent_searches)" ]
[ "0.6950789", "0.67164797", "0.6396843", "0.630177", "0.62223536", "0.6088735", "0.6041806", "0.60160005", "0.60123277", "0.600498", "0.59274644", "0.5919662", "0.58825165", "0.58457726", "0.5825849", "0.5805771", "0.5787204", "0.5785471", "0.5738464", "0.5688952", "0.56775737", "0.56447977", "0.5601297", "0.55931926", "0.5589371", "0.55724925", "0.555142", "0.5549188", "0.5536542", "0.55356807" ]
0.75881535
0
Add required token from cookie to header
def __header_update_token(self) -> None: cookies = self.session.cookies.get_dict() self.session.headers.update({ 'Referer': 'https://efdsearch.senate.gov/search/', 'X-CSRFToken': cookies['csrftoken'], })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def header_token(token):\n return {'Authorization': '{0} {1}'.format('JWT', token)}", "def _token_header(token=None):\n if not token:\n return None\n\n message = '{token}:Ignored'.format(token=token)\n headers = {'Authorization': 'Basic {code}'.format(\n code=base64.b64encode(message))}\n return headers", "def get_token_header(cls, token):\n if token is EMPTY_KNOX_TOKEN:\n return {}\n else:\n return {'HTTP_AUTHORIZATION': 'token {}'.format(token)}", "def add_header(response):\n response.headers['Authorization'] = response\n return response", "def token_header(token):\n message = '{token}:ignored'.format(token=token)\n return {'Authorization': 'Basic {code}'.format(\n code=base64.b64encode(message))}", "def add_auth_token(self):\n auth_token = json.loads(os.getenv('AUTH_TOKEN'))\n self.driver.add_cookie(auth_token)", "def _make_header(self, token):\n header = HEADER.copy()\n header['Authorization'] = \"Bearer {}\".format(token)\n\n return header", "def get_header( self ):\n\t\tkey = self.key\n\t\tvalue = self.value\n\t\tpath = self.path\n\t\texpires = self.expires.strftime( \"%a, %d-%m-%y %H:%M:%S GMT\" )\n\t\treturn ( \"Set-Cookie\", \"%(key)s=%(value)s; Path=%(path)s; Expires=%(expires)s;\" % locals() )", "def init_headers(token):\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + token\n }\n return headers", "def bearer_authentication(self, token: str) -> None:\n self.api_session.headers.update({'Authorization': f'Bearer {token}'})", "def updateHeader(URL): # (header, key):\r\n cookies.update(session.cookies)\r\n header['Cookie'] = '; '.join([str(key)+'='+str(value) for key, value in session.cookies.items()])\r\n # print(header['Cookie'])\r\n header['Referer'] = URL", "def __call__(self, r):\n if (self.token):\n r.headers['access-token'] = self.token\n return r", "def process_jwt_token(response):\n if response.status_code == HTTPStatus.OK and current_identity:\n response.headers['new_jwt'] = '{0}'.format(\n str(__encode_token().decode('utf-8'))\n )\n\n return response", "def __call__(self, r):\n r.headers[\"x-aims-auth-token\"] = self._token\n return r", "def __call__(self, request):\n request.headers['Authorization'] = f'Token {self.token}'\n return request", "def capture_token(self) -> None:\n auth_cookie = self.driver.get_cookies()[1]\n if 'expiry' in auth_cookie:\n del auth_cookie['expiry']\n\n json_cookie = json.dumps(auth_cookie)\n\n os.environ['AUTH_TOKEN'] = json_cookie", "def build_header(token: str = None):\n return {\n \"Content-Type\": \"application/json\",\n \"X-Auth-Token\": token or get_project_token(),\n }", "def add_token_to_response(response, csrf_token=None):\n if csrf_token:\n # Don't set httponly so that we can POST using XHR.\n # https://github.com/gratipay/gratipay.com/issues/3030\n response.set_cookie(b'csrf_token', csrf_token, expires=CSRF_TIMEOUT, httponly=False)\n\n # Content varies with the CSRF cookie, so set the Vary header.\n patch_vary_headers(response, ('Cookie',))", "def _make_header(self, token: str) -> dict:\n\n header = HEADER.copy()\n # modify to represent how to build the header\n header['Authorization'] = f\"Bearer {token}\"\n\n return header", "def __init__(self, request, field='staff_token'):\n self.request = request\n self.token_input = request.cookies.get(field)", "def get_api_header(token):\n return {\n 'Authorization': 'Token ' + str(token)}", "def auth_headers(current_user_token: str) -> Dict[str, str]:\n return {\"Authorization\": f\"Bearer {current_user_token}\"}", "def _get_authorization_header(self):\n return f\"token {self._context.get_github_token()}\"", "def extract_token_from_cookie(request):\n try:\n token = request.headers.cookie['csrf_token'].value\n except KeyError:\n token = None\n else:\n token = _sanitize_token(token)\n\n # Don't set a CSRF cookie on assets, to avoid busting the cache due to the\n # Vary header we set below. Don't set it on callbacks, because we use IP\n # filtering there.\n\n if request.path.raw.startswith('/assets/') or request.path.raw.startswith('/callbacks/'):\n token = None\n else:\n token = token or _get_new_token()\n\n return {'csrf_token': token}", "def remember(self, response, request, identity):\n extra_claims = identity.as_dict()\n userid = extra_claims.pop('userid')\n claims_set = self.create_claims_set(userid, extra_claims)\n token = self.encode_jwt(claims_set)\n response.headers['Authorization'] = '%s %s' % (self.auth_header_prefix, token)", "def add_auth_to_headers(self):\n if not hasattr(self, \"headers\"):\n self.headers = {\"Content-Type\": \"application/json\"}\n\n login = {\"account_number\": self.account[\"account_number\"],\n \"pin\": self.account[\"pin\"]}\n token = json.loads(self.client.post(\n \"/accounts/login\",\n data=json.dumps(login),\n headers=self.headers).get_data())[\"token\"]\n self.headers[\"Authorization\"] = \"Bearer \" + token", "def auth_header_value(self):\n return f\"token {self.API_TOKEN}\"", "def _addAuthenticationToRequestHeader(request, client):\n request.addAuthorization(client.id, client.secret)", "def _get_token(self): # pragma: no cover\n\n tokenCookie = None\n for cookie in self._session.cookies:\n if \"mast_token\" in cookie.name:\n tokenCookie = cookie\n break\n\n if not tokenCookie:\n warnings.warn(\"No auth token found.\", AuthenticationWarning)\n\n return tokenCookie", "def create_auth_header(api_token):\n return {'Authorization': f'token {api_token}'}" ]
[ "0.7128207", "0.700346", "0.68207276", "0.66793406", "0.66069144", "0.64189374", "0.6404582", "0.639439", "0.63904995", "0.63799286", "0.63683873", "0.62969685", "0.6278751", "0.6263158", "0.622593", "0.62056905", "0.6190562", "0.6187023", "0.6172328", "0.6169181", "0.6153334", "0.6107103", "0.60933924", "0.6084723", "0.6074833", "0.6072206", "0.60687387", "0.6057734", "0.6050651", "0.60366017" ]
0.7365751
0
Create Google Drive service using provided Flow flags and OAuth keys
def create_service(flags, client_id, client_secret): flow = OAuth2WebServerFlow( client_id=client_id, client_secret=client_secret, scope='https://www.googleapis.com/auth/drive.readonly', redirect_uri='http://localhost') storage = Storage('oauth_storage') credentials = tools.run_flow(flow, storage, flags) http = credentials.authorize(httplib2.Http()) return build('drive', 'v2', http=http)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_service():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json'\n , SCOPES)\n creds = flow.run_local_server(port=9797)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('drive', 'v3', credentials=creds)\n return service", "def build_service():\n creds = None\n\n # the file token.json stores the user's access and refresh tokens, and is \n # created automatically when the authorization flow completes for the first time\n \n if os.path.exists('../creds/token.json'):\n creds = Credentials.from_authorized_user_file('../creds/token.json', SCOPES)\n\n # if there are no (valid) credentials, ask the user to login\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n '../creds/credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n with open('../creds/token.json', 'w') as token:\n token.write(creds.to_json())\n\n service = build('drive', 'v3', credentials=creds)\n return service", "def create_google_drive_service(self):\n credentials = self.get_credentials()\n http = credentials.authorize(httplib2.Http())\n return discovery.build('drive', 'v3', http=http)", "def gdrive_service(secrets: Dict):\n return build(\n \"drive\", \"v3\", credentials=google_credentials(secrets), cache_discovery=False\n )", "def init_api(self):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(self.gdrive_config.TOKEN_PICK_PATH):\n with open(self.gdrive_config.TOKEN_PICK_PATH, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n self.gdrive_config.CREDENTIAL_PATH, self.gdrive_config.SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(self.gdrive_config.TOKEN_PICK_PATH, 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('drive', 'v3', credentials=creds)\n return service", "def __init__(self, credentials):\n self.credentials = credentials\n http = httplib2.Http()\n http = self.credentials.authorize(http)\n self.service = build(\"drive\", \"v2\", http=http)", "def drive_service() -> object:\n g_drive_service = build('drive', 'v3', credentials=google_creds())\n\n return g_drive_service", "def create_service():\n creds = None\n # The file token_sheet.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token_sheet.pickle'):\n with open('token_sheet.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials_sheets.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token_sheet.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('sheets', 'v4', credentials=creds)\n return service", "def build_service():\n\n\tstore = file.Storage('credentials.json')\n\tcreds = store.get()\n\tif not creds or creds.invalid:\n\t flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)\n\t creds = tools.run_flow(flow, store)\n\tservice = build('gmail', 'v1', http=creds.authorize(Http(disable_ssl_certificate_validation=True)))\n\treturn service", "def build_service():\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n f\"{EMAIL_ACCOUNT_FILE}\", SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('gmail', 'v1', credentials=creds)\r\n return service", "def initialize_drive():\n credentials_drive = ServiceAccountCredentials.from_json_keyfile_name(\n KEY_FILE_LOCATION, \n SCOPE\n )\n return gspread.authorize(credentials_drive)", "def __init__(self, credentials):\n http = credentials.authorize(httplib2.Http())\n self.service = googleapiclient.discovery.build(\"drive\", \"v2\", http=http)", "def authorize(config, flags):\n try:\n credentials = client.GoogleCredentials.get_application_default()\n print('Using application default credentials.')\n return credentials.create_scoped(_constants.API_SCOPE)\n except client.ApplicationDefaultCredentialsError:\n pass # Can safely ignore this error, since it just means none were found.\n if os.path.isfile(_constants.SERVICE_ACCOUNT_FILE):\n print('Using service account credentials from %s.' %\n _constants.SERVICE_ACCOUNT_FILE)\n return ServiceAccountCredentials.from_json_keyfile_name(\n _constants.SERVICE_ACCOUNT_FILE,\n scopes=_constants.API_SCOPE)\n elif os.path.isfile(_constants.CLIENT_SECRETS_FILE):\n print('Using OAuth2 client secrets from %s.' %\n _constants.CLIENT_SECRETS_FILE)\n message = tools.message_if_missing(_constants.CLIENT_SECRETS_FILE)\n storage = token_storage.Storage(config)\n credentials = storage.get()\n if credentials is not None and not credentials.invalid:\n return credentials\n message = tools.message_if_missing(_constants.CLIENT_SECRETS_FILE)\n flow = client.flow_from_clientsecrets(\n _constants.CLIENT_SECRETS_FILE,\n scope=_constants.API_SCOPE,\n message=message,\n login_hint=config['emailAddress'])\n return tools.run_flow(flow, storage, flags)\n print('No OAuth2 authentication files found. Checked:', file=sys.stderr)\n print('- Google Application Default Credentials', file=sys.stderr)\n print('- %s' % _constants.SERVICE_ACCOUNT_FILE, file=sys.stderr)\n print('- %s' % _constants.CLIENT_SECRETS_FILE, file=sys.stderr)\n print('Please read the accompanying documentation.', file=sys.stderr)\n sys.exit(1)\n return None", "def init(api_name, api_version, api_settings_dict, discovery_filename=None):\n\n # Set logging levels so we don't log stuff that doesn't really matter\n logging.getLogger(\"googleapiclient.discovery_cache\").setLevel(logging.ERROR)\n logging.getLogger(\"googleapiclient.discovery\").setLevel(logging.WARNING)\n\n # Import libraries from oauth2client\n try:\n from oauth2client import client\n from oauth2client import file\n from oauth2client import tools\n except ImportError:\n raise ImportError(\n 'GoogleApi requires oauth2client. Please install oauth2client and try again.')\n\n # Set the Google API scope\n scope = 'https://www.googleapis.com/auth/' + api_name\n\n # Parser command-line arguments.\n parent_parsers = [tools.argparser]\n parent_parsers.extend([])\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=parent_parsers)\n flags = parser.parse_args([])\n\n # Name of a file containing the OAuth 2.0 information for this\n # application, including client_id and client_secret, which are found\n # on the API Access tab on the Google APIs\n # Console <http://code.google.com/apis/console>.\n client_secrets = os.path.join(os.path.dirname(__file__),\n api_settings_dict['client_secrets_file'])\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(client_secrets,\n scope=scope,\n message=tools.message_if_missing(client_secrets))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage(api_name + '.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http=build_http())\n\n if discovery_filename is None:\n # Construct a service object via the discovery service.\n # print('Constructing a service object via the discovery service.')\n service = discovery.build(api_name, api_version, http=http)\n else:\n # Construct a service object using a local discovery document file.\n with open(discovery_filename) as discovery_file:\n service = discovery.build_from_document(\n discovery_file.read(),\n base='https://www.googleapis.com/',\n http=http)\n return service", "def main():\r\n creds = None\r\n # The file token.json stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('cal_token.json'):\r\n creds = Credentials.from_authorized_user_file('cal_token.json', SCOPES)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'client_secret.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('cal_token.json', 'w') as token:\r\n token.write(creds.to_json())\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n return service", "def get_service(credentials_folder, version='v3'):\n credentials = get_credentials(credentials_folder)\n http = credentials.authorize(httplib2.Http(cache=\".cache\"))\n service = discovery.build('drive', version, http=http)\n return service", "def authenticate_google():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n return service", "def get_service():\n creds = None\n\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n\n service = build('gmail', 'v1', credentials=creds)\n\n return service", "def init(argv, doc, filename, parents=None):\n service = None\n flags = None\n parent_parsers = [tools.argparser, argparser]\n if parents is not None:\n parent_parsers.extend(parents)\n\n parser = argparse.ArgumentParser(\n description=doc,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=parent_parsers)\n flags = parser.parse_args(argv[1:])\n\n auth_path = os.path.dirname(filename)\n client_secrets_path = os.path.join(auth_path, CLIENT_SECRETS_FILE)\n service_account_path = os.path.join(auth_path, SERVICE_ACCOUNT_FILE)\n\n credentials = None\n if os.path.isfile(service_account_path):\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n service_account_path,\n scopes=API_SCOPE)\n elif os.path.isfile(client_secrets_path):\n message = tools.message_if_missing(client_secrets_path)\n flow = client.flow_from_clientsecrets(client_secrets_path,\n scope=API_SCOPE,\n message=message)\n storage_path = os.path.join(auth_path, SERVICE_NAME + '.dat')\n storage = Storage(storage_path)\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n else:\n print('No OAuth2 authentication files found. Checked:', file=sys.stderr)\n print('- %s' % service_account_path, file=sys.stderr)\n print('- %s' % client_secrets_path, file=sys.stderr)\n print('Please read the accompanying documentation.', file=sys.stderr)\n sys.exit(1)\n\n http = credentials.authorize(http=httplib2.Http())\n service = discovery.build(SERVICE_NAME, SERVICE_VERSION, http=http)\n return (service, flags)", "def google_drive_authenticate(self):", "def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('drive', 'v3', credentials=creds)\n\n print()\n\n # look for a specific folder and get its id\n\n page_token = None\n folder_name = \"Teaching!\"\n folder_id = None\n while True:\n response = service.files().list(\n q=\"mimeType='application/vnd.google-apps.folder' and name = '\" + folder_name + \"'\",\n spaces='drive',\n fields='nextPageToken, files(id, name)',\n pageToken=page_token).execute()\n for file in response.get('files', []):\n # Process change\n folder_id = file.get('id')\n print('Found folder: %s (%s)' % (file.get('name'), file.get('id')))\n page_token = response.get('nextPageToken', None)\n if page_token is None:\n break\n print()\n print()\n\n # check if file with same name already exists\n file_to_upload_path = 'samplefiles/sky.jpg'\n name_of_uploaded_file = 'sky.jpg'\n response = service.files().list(\n q=\"trashed = false and name = '\" + name_of_uploaded_file + \"' and parents in '\" + folder_id + \"'\",\n spaces='drive',\n fields='nextPageToken, files(id, name)',\n pageToken=page_token).execute()\n files = response.get('files', [])\n if files:\n print(\"File with name {0} in {1} already exists!\".format(name_of_uploaded_file, folder_name))\n print('File info: %s (%s)' % (files[0].get('name'), files[0].get('id')))\n else:\n print(\"File with name {0} does not exist in {1}.\".format(name_of_uploaded_file, folder_name))\n\n # do the upload\n print()\n print(\"Uploading file with name {0} to folder {1}\".format(name_of_uploaded_file, folder_name))\n file_metadata = {'name': name_of_uploaded_file, 'parents': [folder_id]}\n manager = multiprocessing.Manager() # need these lines because we have a return value we care about\n return_dict = manager.dict()\n\n p = multiprocessing.Process(target=upload_file, args=(service, file_metadata, file_to_upload_path, return_dict))\n p.start()\n p.join(10)\n if p.is_alive():\n print(\"still running... let's kill it\")\n p.kill()\n p.join()\n file = return_dict['file']\n print()\n if file.get('id'):\n print('File uploaded successfully. File ID: %s' % file.get('id'))\n else:\n print('File failed to upload!')\n\n print()\n print()", "def getAPIservice(args, name, version, client_secrets_file, scope=None, parents=[], discovery_filename=None):\n if scope is None:\n scope = 'https://www.googleapis.com/auth/' + name\n\n # Parser command-line arguments.\n parent_parsers = [tools.argparser]\n parent_parsers.extend(parents)\n parser = argparse.ArgumentParser(\n description=\"Google API v3 Service Provider\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=parent_parsers)\n flags = parser.parse_args(args)\n print(\"args = %s\" % (args))\n\n # Name of a file containing the OAuth 2.0 information for this\n # application, including client_id and client_secret, which are found\n # on the API Access tab on the Google APIs\n # Console <http://code.google.com/apis/console>.\n # client_secrets = os.path.join(os.path.dirname(filename),\n # 'client_secrets.json')\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(client_secrets_file,\n scope=scope,\n message=tools.message_if_missing(client_secrets_file))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage(name + '.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http = httplib2.Http())\n\n if discovery_filename is None:\n # Construct a service object via the discovery service.\n service = discovery.build(name, version, http=http)\n else:\n # Construct a service object using a local discovery document file.\n with open(discovery_filename) as discovery_file:\n service = discovery.build_from_document(\n discovery_file.read(),\n base='https://www.googleapis.com/',\n http=http)\n return (service, flags)", "def get_googleapiclient(config, project, ns, v):\n cred = Credentials.from_service_account_info(\n config[project][\"service_account\"],\n scopes=[\"https://www.googleapis.com/auth/drive\"],\n )\n return build(ns, v, credentials=cred)", "def service_authentication():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('gmail', 'v1', credentials=creds)\n\n return service", "def auth(scope='https://mail.google.com', file_name='credentials.json', svc='gmail', version='v1'):\n store = file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets(fn, scope)\n creds = tools.run_flow(flow, store)\n service = build(svc, version, http=creds.authorize(Http()))\n return service", "def get_service():\n \n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n token_path = f\"{sys.path[0]}/creds/token.pickle\"\n if os.path.exists(token_path):\n with open(token_path, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n cred_path = f\"{sys.path[0]}/creds/credentials.json\"\n flow = InstalledAppFlow.from_client_secrets_file(\n cred_path, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(token_path, 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n\n return service", "def get_service(credentials):\n try:\n creds = service_account.Credentials.from_service_account_file(\n credentials, scopes=SCOPES)\n service = build('sheets', 'v4', credentials=creds)\n drive_service = build('drive', 'v3', credentials=creds)\n return service, drive_service\n except Exception as e:\n print(f'Error accessing Google Drive with service account '\n f'{credentials}')\n raise(e)", "def get_drive_services(username):\n g_creds = generate_google_token_from_db(user=username) # google drive creds\n d_creds = generate_dropbox_token_from_db(user=username) # dropbox creds\n # building google drive service\n g_service = build('drive', 'v3', credentials=g_creds)\n # building dropbox service \n d_service = dropbox.Dropbox(app_key=dropbox_app_key, \n oauth2_access_token=d_creds['oauth2_access_token'],\n oauth2_refresh_token=d_creds['oauth2_refresh_token'], \n app_secret=dropbox_app_secret) \n d_service.check_and_refresh_access_token()\n return g_service, d_service", "def authorize_api(self):\n\n log.debug('computing Google authentification process for \"{}\"'.format(self.school_year))\n flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, SCOPE)\n storage = Storage('credentials.dat')\n credentials = storage.get()\n\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, tools.argparser.parse_args())\n\n # Create an httplib2.Http object to handle our HTTP requests, and authorize it\n # using the credentials.authorize() function.\n http = httplib2.Http()\n http = credentials.authorize(http)\n httplib2.debuglevel = 0\n\n return build('calendar', 'v3', http=http)", "def __init__ (self, email, domain, password):\n\n self.gd_client = gdata.apps.service.AppsService()\n self.gd_client.email = email\n self.gd_client.domain = domain\n self.gd_client.password = password\n self.gd_client.ProgrammaticLogin()" ]
[ "0.7701686", "0.7527261", "0.7475176", "0.7074418", "0.69454056", "0.6692553", "0.6619492", "0.66136366", "0.6600691", "0.65582865", "0.65500313", "0.6303524", "0.6166362", "0.61439687", "0.6088845", "0.60803515", "0.6030853", "0.60077214", "0.6000676", "0.59963655", "0.599028", "0.5929376", "0.58383435", "0.5787829", "0.57851475", "0.5781902", "0.5770917", "0.57631075", "0.5759345", "0.57314306" ]
0.80562335
0
Download all files from provided list
def download_files(service, file_list, out_path): total = len(file_list) for i, file_id in enumerate(file_list, 1): name = get_file(service, file_id)['title'] print('Downloading {}... ({}/{}) [{}%]'.format(name, i, total, round(i / total * 100))) path = os.path.join(out_path, name) try: download_file(service, file_id, path) except errors.HttpError as error: os.remove(path) # Remove broken file print('Could not download file: {}'.format(error), file=sys.stderr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_files(self) -> None:\n\n for name, url in self.files.items():\n print(f\"Download {name.split('/')[-1]}\")\n wget.download(url, os.path.join(\"data\", name))", "def download_files(self):", "def multi_download(self, url_list):\n workers = 4\n with ThreadPoolExecutor(workers) as ex:\n urls = [url_list[x] for x in range(len(url_list))]\n self.filenames = [str(y)+\".txt\" for y in range(len(url_list))]\n ex.map(self.download, urls, self.filenames)\n return self.filenames", "def download_all(self):\r\n # Fetch website list\r\n self.fetch_website_list()\r\n\r\n for website in self.website_list:\r\n self.download(website['id'])", "def download_list(urls, outdir=None, workdir=None, threads=3):\n pool = ThreadPool(threads)\n download_lambda = lambda x: download(x, outfile=outdir, workdir=workdir)\n pool.map(download_lambda, urls)", "def download(all):\n print(\"Downloading\")", "def download_all(): #@save\n for name in DATA_HUB:\n download(name)", "def download_data(self):\r\n \r\n for file in self.__files:\r\n file_to_download = os.path.join(self.__folder, os.path.basename(file))\r\n if not os.path.isfile(file_to_download):\r\n self.__download_file(file)", "def download(urls: List[str], num_threads: int = 40) -> List[str]:\n\n num_files = len(urls)\n start = perf_counter()\n\n print(\"Starting download of %s files . . .\" % num_files)\n\n results = multiprocess(urls, Downloader, num_threads=num_threads)\n\n dur = perf_counter() - start\n print(\"Completed download of %s files after %.3f seconds.\" % (num_files, dur))\n\n return results", "def _download_files():\n import urllib\n from os import makedirs\n from os.path import exists, join\n \n atom_list = ['Ruthenium', 'Rhenium', 'Rutherfordium', 'Radium', 'Rubidium',\n 'Radon', 'Rhodium', 'Beryllium', 'Barium', 'Bohrium', 'Bismuth',\n 'Berkelium', 'Bromine', 'Hydrogen', 'Phosphorus', 'Osmium', 'Mercury',\n 'Germanium', 'Gadolinium', 'Gallium', 'Ununbium', 'Praseodymium',\n 'Platinum', 'Plutonium', 'Carbon', 'Lead', 'Protactinium', 'Palladium',\n 'Xenon', 'Polonium', 'Promethium', 'Hassium',\n 'Holmium', 'Hafnium', 'Molybdenum', 'Helium', 'Mendelevium', 'Magnesium',\n 'Potassium', 'Manganese', 'Oxygen', 'Meitnerium', 'Sulfur', 'Tungsten',\n 'Zinc', 'Europium', 'Einsteinium', 'Erbium', 'Nickel', 'Nobelium',\n 'Sodium', 'Niobium', 'Neodymium', 'Neon', 'Neptunium', 'Francium', 'Iron',\n 'Fermium', 'Boron', 'Fluorine', 'Strontium', 'Nitrogen', 'Krypton',\n 'Silicon', 'Tin', 'Samarium', 'Vanadium', 'Scandium', 'Antimony',\n 'Seaborgium', 'Selenium', 'Cobalt', 'Curium', 'Chlorine', 'Calcium',\n 'Californium', 'Cerium', 'Cadmium', 'Thulium', 'Caesium', 'Chromium',\n 'Copper', 'Lanthanum', 'Lithium', 'Thallium', 'Lutetium', 'Lawrencium',\n 'Thorium', 'Titanium', 'Tellurium', 'Terbium', 'Technetium', 'Tantalum',\n 'Ytterbium', 'Dubnium', 'Zirconium', 'Dysprosium', 'Iodine', 'Uranium',\n 'Yttrium', 'Actinium', 'Silver', 'Iridium', 'Americium', 'Aluminium',\n 'Arsenic', 'Argon', 'Gold', 'Astatine', 'Indium', 'Darmstadtium', 'Copernicium']\n\n if not exists(\"elements\"): makedirs(\"elements\")\n for name in atom_list: \n file = urllib.urlopen(\"http://www.webelements.com/{0}\".format(name.lower()))\n string = file.read()\n file.close()\n with open(join(\"elements\", name), \"w\") as out: out.write(string)\n file = urllib.urlopen(\"http://www.webelements.com/{0}/atoms.html\".format(name.lower()))\n string = file.read()\n file.close()\n with open(join(\"elements\", name + \"_atoms.html\"), \"w\") as out: out.write(string)\n file = urllib.urlopen( \"http://www.webelements.com/{0}/electronegativity.html\"\\\n .format(name.lower()))\n string = file.read()\n file.close()\n with open(join(\"elements\", name + \"_electronegativity.html\"), \"w\") as out: out.write(string)\n file = urllib.urlopen( \"http://www.webelements.com/{0}/atom_sizes.html\"\\\n .format(name.lower()))\n string = file.read()\n file.close()\n with open(join(\"elements\", name + \"_atom_sizes.html\"), \"w\") as out: out.write(string)\n file = urllib.urlopen( \"http://www.webelements.com/{0}/thermochemistry.html\"\\\n .format(name.lower()))\n string = file.read()\n file.close()\n with open(join(\"elements\", name + \"_thermochemistry.html\"), \"w\") as out: out.write(string)\n file = urllib.urlopen( \"http://www.webelements.com/{0}/physics.html\"\\\n .format(name.lower()))\n string = file.read()\n file.close()\n with open(join(\"elements\", name + \"_physics.html\"), \"w\") as out: out.write(string)", "def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n print(filename)\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n shutil.move(filename,path)\n print('Done!')", "def download_all(self, to: str = None) -> Generator:\n\n for filename in self.list_files():\n yield (self.download(filename, to))", "def _download_all(update_path=True, verbose=None):\n\n # iterate over dataset\n for ds in dataset_list:\n # call download\n ds().download(update_path=True, verbose=verbose, accept=True)", "def get_files(self):\n # self.folder= +str(int(time.time()))\n if not os.path.exists(self.folder):\n os.mkdir(self.folder)\n while len(self.url_queue): # If we have URLs to crawl - we crawl\n href = self.url_queue.popleft() # We grab a URL from the left of the list\n filename = href.rsplit('/', 1)[-1]\n print(\"Downloading %s to %s...\" % (href, filename))\n fullname = os.path.join(self.folder, filename)\n urlretrieve(href, fullname)\n self.xlfnames.append(filename)", "def download_packages(list, failfast=False):\n return _librepo.download_packages(list, failfast)", "def download_models_and_data():\n\n for file in DATA_FILES:\n download_file(file[\"url\"], file[\"path\"])", "def get_files_to_download(self):\n\n self.logger.logMsg(\"Getting Files to Download\")\n\n download_links = []\n try:\n with open(self.main_xml) as xml_file:\n data_dict = xmltodict.parse(xml_file.read())\n\n xml_file.close()\n\n for docs in data_dict.get('response').get('result').get('doc'):\n for doc in docs.get('str'):\n\n if doc.get('@name') == 'download_link':\n link = doc.get('#text', None)\n if link is not None:\n download_links.append(link)\n except Exception as e:\n self.logger.logMsg(\"Error Getting Files to Download {}\".format(str(e)))\n raise Exception('Error in Getting Files For Download')\n\n self.logger.logMsg(\"Finished Getting Files to Download\")\n\n return download_links", "def download(urls, dest_folder):\n pass", "def download(self, destination: str = None) -> list:\n return [f.download(destination=destination) for f in self.files]", "def fetch_files_from_urls(urls, dir):\n makedir(dir)\n try:\n pool = []\n for url in urls:\n p = Process(target=download, args=(url, dir,))\n p.start()\n pool.append(p)\n for p in pool:\n p.join()\n except KeyboardInterrupt:\n print \"Shutdown requested...exiting\"\n # except Exception:\n # traceback.print_exc(file=sys.stdout)\n\n # print(\"removing temporary files from current directory\")\n map(os.remove, glob.glob(\"*.tmp\"))", "def get_downloadable_data(url_list):\n downloadable_data_list = []\n for url in url_list:\n soup = visit_homepage(url)\n for link in soup.find_all(class_='resource-url-analytics'):\n downloadable_data_list.append(link['href'])\n return downloadable_data_list", "def download_and_unzip(self, file_list):\n self.logger.logMsg(\"Starting Download and unzip files\")\n rtn = True\n if not len(file_list):\n self.logger.logError('Nothing to Download Return ')\n raise Exception('Nothing to Download')\n else:\n for file in file_list:\n try:\n self.logger.logMsg(\"Downloading {}\".format(file))\n with urlopen(file) as zipresp:\n with ZipFile(BytesIO(zipresp.read())) as zfile:\n zfile.extractall(self.download_path)\n except Exception as e:\n self.logger.logError(\"Error {} Downloading/Unzipping {}\".format(str(e), file))\n rtn = False\n if not rtn:\n self.logger.logError(\"Error Download and unzip files\")\n raise Exception('Failed to Download/Unzip one or More Files')\n\n self.logger.logMsg(\"Finished Download and unzip files\")", "def download_all_files(self):\n self.server_handler.get_sensor_data_from_server()", "def run(self):\n urls_to_download = self._get_links()\n results = ThreadPool(8).imap_unordered(self._download_url, urls_to_download)\n for path in results:\n print(path)", "def download_all_files(self, root_url, version):\n file_list = self._http_client.get(root_url + '?ref=refs/tags/' + version)\n for file in file_list.json():\n if file['type'] == 'file':\n download_url = file['download_url']\n download_path = self.get_module_and_path('next/' + file['path'].replace(self._main_dir + '/', ''))\n self.download_file(download_url.replace('refs/tags/', ''), download_path)\n elif file['type'] == 'dir':\n path = self.get_module_and_path('next/' + file['path'].replace(self._main_dir + '/', ''))\n os.mkdir(path)\n self.download_all_files(root_url + '/' + file['name'], version) # Recurse into the subdirectory.\n\n file_list.close()", "def download_file_list(self, limit=None, test_page='https://www.google.com'):\n # test csv file parsing\n if self.file_list is None:\n raise NoFileListProvided()\n\n # test Internet connection\n try:\n urllib.request.urlopen(test_page, timeout=2)\n except urllib.request.URLError:\n raise InternetConnectionIssue()\n except:\n raise UnknownError()\n\n # determine whether the number of file to be downloaded is capped for test purposes\n if limit is None:\n total_file_num = self.file_num\n else:\n total_file_num = limit\n print('Total number of files to be downloaded: ' + str(total_file_num))\n\n # perform downloading\n print(\"Downloading MODIS data...\")\n for row in tqdm(range(total_file_num)):\n download_dir = self.file_list['download_dir'].iloc[row]\n file_name = self.file_list['file_name'].iloc[row]\n online_url = self.file_list['online_url'].iloc[row]\n\n # create local sub-directories\n if not os.path.isdir(download_dir):\n os.makedirs(download_dir)\n\n # check local file existence\n # CAUTION: the existence of local files, even incomplete, will preemptively stop the downloading process\n if os.path.isfile(os.path.join(download_dir, file_name)):\n self.file_list.set_value(index=row, col='status', value=1)\n else:\n try:\n HTTPresponse = urllib.request.urlretrieve(online_url, os.path.join(download_dir, file_name))\n # check remote file existence\n if 'Content-Type: application/x-hdf' in HTTPresponse[1].__str__():\n self.file_list.set_value(index=row, col='status', value=1)\n elif 'Content-Type: text/html' in HTTPresponse[1].__str__():\n os.remove(os.path.join(download_dir, file_name))\n raise RemoteFileDoesntExist()\n else:\n os.remove(os.path.join(download_dir, file_name))\n raise UnknownError()\n except RemoteFileDoesntExist:\n self.file_list.set_value(index=row, col='status', value=0)\n except:\n os.remove(os.path.join(download_dir, file_name))\n self.file_list.set_value(index=row, col='status', value=0)\n raise UnknownError()", "def download_files(valid_links: list) -> list:\n print('Starting process...')\n print('')\n\n year_month_filepath = []\n\n for link_info in valid_links:\n\n # Get file extension\n extension = link_info[0].split('.')[-1]\n\n # Link to download\n link_to_download = link_info[0]\n\n # Get month\n month = link_info[1]\n\n # Get year\n year = link_info[2]\n\n # Create a standard filename to save\n file_name = f'{year}-{month}.{extension}'\n\n print(f'Downloading... {link_to_download} Saving... {file_name}')\n\n # Create a link to save into ./file directory\n link_to_save = f'./file/{file_name}'\n\n # Download file and save it\n wget.download(link_to_download, out=link_to_save)\n\n\n # Special treatment to zip and xlsx file\n if extension == 'zip':\n\n # Get right link to save (.csv) from zip function\n link_to_save = get_file_into_zip(link_to_save)\n\n elif extension == 'xlsx':\n # Get right link to save (.csv) from xlsx function\n link_to_save = excel2csv(link_to_save)\n\n # Include the tuple into a list\n year_month_filepath.append((year, month, link_to_save))\n\n print('Finishing process...')\n\n return year_month_filepath", "def download_googledrive(file_list=[ { \"fileid\": \"1-K72L8aQPsl2qt_uBF-kzbai3TYG6Qg4\", \"path_target\": \"data/input/download/test.json\"}], **kw):\n try :\n import gdown\n except:\n os.system('pip install gdown')\n import gdown\n import random\n target_list = []\n\n for d in file_list :\n fileid = d[\"fileid\"]\n target = d.get(\"path_target\", \"data/input/adonwload/googlefile_\" + str(random.randrange(1000) ) )\n\n os.makedirs(os.path.dirname(target), exist_ok=True)\n\n url = f'https://drive.google.com/uc?id={fileid}'\n gdown.download(url, target, quiet=False)\n target_list.append( target )\n\n return target_list", "def ssh_download_files(data):\n with _ssh_connect() as ssh:\n with ssh.open_sftp() as sftp:\n with click.progressbar(data, label='downloads') as items: # noqa\n for item in items:\n _, filename = os.path.split(item)\n sftp.get(item, f'{DOWNLOAD_DIR}/{filename}')", "def kegg_download_manager_synchronous(list_of_ids, wait=1):\n urls = ['http://rest.kegg.jp/get/%s' % '+'.join(chunk) for chunk in chunks(list(list_of_ids), 10)]\n num_urls = len(urls)\n print(f\"Total urls to download: {num_urls}. Progress will be shown below.\")\n results = []\n for url in tqdm(urls):\n results.append(download_synchronous(url))\n time.sleep(wait)\n\n return [raw_record for raw_records in results for raw_record in raw_records.split('///')[:-1]]" ]
[ "0.7878511", "0.78570163", "0.7798649", "0.7391583", "0.73496884", "0.728595", "0.72161186", "0.716233", "0.7137925", "0.7103304", "0.70779246", "0.7064052", "0.7041122", "0.7000575", "0.6984006", "0.6970926", "0.69130874", "0.68817306", "0.6855708", "0.68175423", "0.6751781", "0.6732523", "0.67126364", "0.6708006", "0.66980726", "0.66898924", "0.6678605", "0.6637978", "0.6628403", "0.66281295" ]
0.7884188
0
merge all documents into one bigger file
def merge_docs(self):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combine_documents(path=os.path.join(os.curdir, \"data/processed\"), name='corpus.txt'):\n outname=os.path.join(path, name)\n if os.path.exists(outname):\n os.remove(outname)\n filenames = [f for f in os.listdir(path) if fnmatch.fnmatch(f, '*.txt')]\n with open(outname, 'w') as outfile:\n print \"Combining documents...\"\n for fname in filenames:\n print fname\n with open(os.path.join(path, fname)) as infile:\n outfile.write(infile.read())", "def concatinate_documents_to_single_doc(): \n stars_list=[\"one_star\",\"two_star\",\"three_star\",\"four_star\",\"five_star\"]\n docs_path=\"C:\\supporting_evidence\\external resources\\IMDB\\movie_articles\"\n for star in stars_list:\n curr_star_docs_sentence=\"\"\n for filename in os.listdir(docs_path+\"\\\\\"+ star):\n with open(docs_path+\"\\\\\"+ star+\"\\\\\"+filename, 'r') as f:\n doc_lines=f.read()\n curr_star_docs_sentence+=doc_lines\n with open(star+\"_single_doc.txt\",'wb') as csvfile:\n f=csv.writer(csvfile)\n f.writerow([curr_star_docs_sentence])", "def merge_files(filename_list, merged_file, encode):\n lines_counter = list()\n for file_name in filename_list:\n lines_counter.append(count_lines(file_name))\n lines_counter.sort(key=lambda item: item[-1])\n with open(merged_file, 'w', encoding=encode) as file:\n for doc in lines_counter:\n file.write(f'{doc[0]}\\n')\n file.write(f'{doc[1]}\\n')\n text = get_text(doc[0])\n file.write(f'{text}\\n\\n')", "def gather_documents(self):\n self.document_gatherer.gather_and_save_everything(Constants.path_cord, \n Constants.path_metadata, \n Constants.path_linked_documents,\n Constants.path_unlinked_documents,\n Constants.path_parsed_documents,\n Constants.path_all_documents)\n \n print(\"Done gathering documents.\")", "def merge():\n result = []\n for f in glob.glob(f\"{DATA_DIR}/COP*.json\"):\n with open(f, \"r\") as infile:\n result.append(json.load(infile))\n\n with open(f\"{DATA_DIR}/corpus.json\", \"w\", encoding=\"utf-8\") as outfile:\n json.dump(result, outfile)", "def add_documents(self, documents):\n\t\t\n\t\t# flag for StopIteration exceptions\n\t\tmore_documents = True\n\t\t# loop while there are still documents in the iterator\n\t\twhile more_documents:\n\t\t\t# increment batch number\n\t\t\tbatch = len(self.batch_stats) + 1\n\t\t\t# count sentences\n\t\t\tsentences_count = 0\n\t\t\t# create temporary batch data file in the version directory\n\t\t\tbatch_file = os.path.join(self.file_base.get_version_path(self.version), \"data.jl.gz.temp\")\n\t\t\t# try to read the next batch of files, catch exception and stop if there are no more\n\t\t\ttry:\n\t\t\t\t# get next document before opening the file just to make sure it's there\n\t\t\t\tdocument = documents.next()\n\t\t\t\t# open the data file\n\t\t\t\twith gzip.open(batch_file, \"wb\") as outfile:\n\t\t\t\t\t# loop through DOCUMENT_BATCH_SIZE documents\n\t\t\t\t\tfor i in range(DocumentDatabase.DOCUMENT_BATCH_SIZE):\n\t\t\t\t\t\t# count sentences in document\n\t\t\t\t\t\tfor paragraph in document[\"paragraphs\"]:\n\t\t\t\t\t\t\tsentences_count += len(paragraph[\"sentences\"])\n\t\t\t\t\t\t# write JSON to file one line at a time\n\t\t\t\t\t\toutfile.write(\"%s\\n\" % json.dumps(document))\n\t\t\t\t\t\t# if we are not done with this batch, retrieve the next document\n\t\t\t\t\t\tif i < DocumentDatabase.DOCUMENT_BATCH_SIZE - 1:\n\t\t\t\t\t\t\tdocument = documents.next()\n\t\t\texcept StopIteration:\n\t\t\t\t# the end of the documents stream, set the flag to False\n\t\t\t\tmore_documents = False\n\t\t\t# make sure the batch isn't empty\n\t\t\tif sentences_count > 0:\n\t\t\t\t# create the new batch in the file system\n\t\t\t\tself.version_batches.create_latest_version()\n\t\t\t\t# add the stats to the statistics hash\n\t\t\t\tself.batch_stats[batch] = BatchStats(sentences_count)\n\t\t\t\t# write the batch statistics to file\n\t\t\t\twith codecs.open(self._get_batch_stat_file(batch), \"wb\", \"utf-8\") as outfile:\n\t\t\t\t\t# write the JSON representation for the stats\n\t\t\t\t\toutfile.write(json.dumps(self.batch_stats[batch].to_json()))\n\t\t\t\t# move the temp data file to the correct location inside the version folder\n\t\t\t\tos.rename(batch_file, self._get_batch_file(batch))", "def merge(root, option, values, extension=\"txt\"):\n if option.upper() not in OPTIONS:\n raise ValueError(\"Options is not implemented: {}\".format(option))\n elif option.upper() == \"TOPIC\":\n searchTerms = values\n elif option.upper() == \"YEAR\":\n startYear, endYear = values\n searchTerms = range(startYear, endYear+1)\n else:\n raise ValueError(\"Update merge OPTIONS, option is not included: {}\".format(option))\n\n for searchTerm in searchTerms:\n document = mergeSimilarDocuments(root, searchTerm, extension)\n\n filename = '{}/{}.{}'.format(root + '_merged', str(searchTerm), str(extension))\n with codecs.open(filename, 'w+', encoding=\"utf-8\") as f:\n f.write(document)", "def createDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # this is create method, no update allowed\n if \"_rev\" in document: del document[\"_rev\"]\n if \"_deleted\" in document: del document[\"_deleted\"]\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()", "def join_files():\n files = [ent_1.get(), ent_2.get()]\n out_writer = PyPDF2.PdfFileWriter()\n for file in files:\n pdf_file = open(file, 'rb')\n file_reader = PyPDF2.PdfFileReader(pdf_file)\n for page in range(file_reader.numPages):\n pageObj = file_reader.getPage(page)\n out_writer.addPage(pageObj)\n\n output_file_name = result_entry.get()\n output_file = open(output_file_name, 'wb')\n out_writer.write(output_file)\n output_file.close()\n pdf_file.close()\n opener = \"open\" if sys.platform == \"darwin\" else \"xdg-open\"\n subprocess.call([opener, output_file_name])\n clear_labels()", "def _merge_pdf(self, documents):\n writer = PdfFileWriter()\n streams = []\n for document in documents:\n pdfreport = file(document, 'rb')\n streams.append(pdfreport)\n reader = PdfFileReader(pdfreport)\n for page in range(0, reader.getNumPages()):\n writer.addPage(reader.getPage(page))\n\n merged_file_fd, merged_file_path = tempfile.mkstemp(\n suffix='.pdf', prefix='report.merged.tmp.')\n with closing(os.fdopen(merged_file_fd, 'w')) as merged_file:\n writer.write(merged_file)\n\n for stream in streams:\n stream.close()\n\n return merged_file_path", "def merge_pdfs(output_name, files):\n output = PdfFileWriter()\n for f in files:\n try:\n i = PdfFileReader(file(f, \"rb\"))\n except IOError as e:\n print(e)\n except PdfReadError as e:\n print(e)\n else:\n for p in i.pages:\n output.addPage(p)\n if output.getNumPages():\n ostream = file(output_name, \"wb\")\n output.write(ostream)\n ostream.close()", "def _merge_pdf(self, documents):\n writer = PdfFileWriter()\n streams = [] # We have to close the streams *after* PdfFilWriter's call to write()\n for document in documents:\n pdfreport = file(document, 'rb')\n streams.append(pdfreport)\n reader = PdfFileReader(pdfreport)\n for page in range(0, reader.getNumPages()):\n writer.addPage(reader.getPage(page))\n merged_file_fd, merged_file_path = tempfile.mkstemp(suffix='.pdf', prefix='report.merged.tmp.')\n with closing(os.fdopen(merged_file_fd, 'w')) as merged_file:\n writer.write(merged_file)\n for stream in streams:\n stream.close()\n return merged_file_path", "def mergeFile():\n with open(\"output.txt\",'w') as o:\n o.write(data1)\n o.write(data2)\n o.write(data3)", "def merge_pdfs(self, files):\n merger = PdfFileMerger()\n for pdf in files:\n merger.append(pdf)\n\n tmp = tempfile.mktemp()\n with open(tmp, 'wb') as fout:\n merger.write(fout)\n return tmp", "def merge_articles(docs_folder):\n\n s = \"\"\n \n for doc in os.listdir(docs_folder):\n try:\n with open(docs_folder + doc ,'r') as f:\n\n lines = f.readlines()\n raw_doc = \"\".join(txt for txt in lines)\n left_idx_headline = [ m.end(0) for m in re.finditer(r\"<HEADLINE>\",raw_doc)]\n right_idx_headline = [ m.start(0) for m in re.finditer(r\"</HEADLINE>\",raw_doc)]\n\n left_idx_text = [ m.end(0) for m in re.finditer(r\"<TEXT>\",raw_doc)]\n right_idx_text = [ m.start(0) for m in re.finditer(r\"</TEXT>\",raw_doc)]\n\n raw_headline = raw_doc[left_idx_headline[0]:right_idx_headline[0]]\n raw_text = raw_doc[left_idx_text[0]:right_idx_text[0]]\n\n left_idx_paragraph_headline = [ m.end(0) for m in re.finditer(r\"<P>\",raw_headline)]\n right_idx_paragraph_headline = [ m.start(0) for m in re.finditer(r\"</P>\",raw_headline)]\n\n left_idx_paragraph_text = [ m.end(0) for m in re.finditer(r\"<P>\",raw_text)]\n right_idx_paragraph_text = [ m.start(0) for m in re.finditer(r\"</P>\",raw_text)]\n\n for i in range(len(left_idx_paragraph_headline)):\n s += raw_headline[left_idx_paragraph_headline[i]:right_idx_paragraph_headline[i]-2] + \".\"\n\n for i in range(len(left_idx_paragraph_text)):\n s += raw_text[left_idx_paragraph_text[i]:right_idx_paragraph_text[i]-1]\n except:\n pass\n\n return s", "def batch(infolder, outfile): # type: (str, str) -> None\n\n if not os.path.isdir(infolder):\n return\n\n results = []\n\n for filename in os.listdir(infolder):\n print('Processing ' + filename)\n curresults = []\n if filename.endswith('.txt'):\n with open(os.path.join(infolder, filename), 'r') as curfile:\n curdata = curfile.read() + '\\n'\n curresults = processClauseText(curdata, 'text')\n elif filename.endswith('.pdf'):\n with open(os.path.join(infolder, filename), 'rb') as curfile:\n curdata = base64.b64encode(curfile.read()).decode()\n curresults = processClauseText(curdata, 'pdf')\n elif filename.endswith('.docx'):\n with open(os.path.join(infolder, filename), 'rb') as curfile:\n curdata = base64.b64encode(curfile.read()).decode()\n curresults = processClauseText(curdata, 'word')\n if len(curresults) > 0:\n for result in curresults:\n result['filename'] = filename\n results.extend(curresults)\n\n if outfile is not None:\n with open(outfile, 'w') as outfile:\n json.dump(results, outfile, indent=2)", "def do_merge_all():\n for rawd, merged in TOMERGE:\n mylogger.info(\"cleaning \" + merged)\n ensure_dir(merged)\n cleandir(merged)\n mylogger.info(\"merging \" + rawd + \" to \" + merged)\n build_merged_dir(build_sensor_file_map(rawd), merged)\n\n # add timestamp file\n\tf = open(TIMESTAMP_FILE,\"w\")\n\tf.write(str(datetime.datetime.now()))\n\tf.close()", "def _merge_files(files: List[str], output: str, delete: bool = True) -> None:\r\n\r\n if not files:\r\n return\r\n\r\n first = True\r\n\r\n ## Open the single concatenated output file\r\n with open(output, 'w') as outfl:\r\n\r\n ## Loop through input files...\r\n for fpath in files:\r\n\r\n ## Read each input file and format line x line\r\n with open(fpath, 'r') as infl:\r\n\r\n if not first:\r\n ## Skip the header\r\n next(infl)\r\n else:\r\n first = False\r\n\r\n outfl.write(infl.read())\r\n\r\n ## Remove the file once we're done\r\n if delete:\r\n Path(fpath).unlink()", "def merge(split_files, output_file):\n if len(split_files) > 1:\n raise NotImplementedError(\"Merging multiple XML files is non-trivial and must be implemented for each XML type\")\n # For one file only, use base class method (move/copy)\n data.Text.merge(split_files, output_file)", "def updateDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # these are required params\n if \"_id\" not in document or \"_rev\" not in document:\n raise Exception(\"Both _id & _rev fields are required!\")\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()", "def backupDocuments(currentTime,baseDir):\n client = MongoClient('asr2.iem.technion.ac.il',27017)\n db = client.asr16\n pathToFolder = baseDir +'Results/'\n FEATURES_DIR = pathToFolder + '/Features/' + currentTime\n docToFeatureVector = parseFeatures(FEATURES_DIR)\n documents = db.documents.find({})\n for document in documents:\n document['text']= document.pop('current_document')\n document['id']= document.pop('_id')\n document['features'] = docToFeatureVector[document[\"query_id\"]+\"-\"+document[\"username\"]]\n del document['posted_document']\n document['iteration'] = currentTime\n db.archive.save(document)", "def merge_all_claims_norm_dicts_for_docs(): \n# docs_norm_scores_dicts_path = base_path+\"\\\\docs_norm_scores_dicts\"\n docs_norm_scores_dicts_path = linux_base_path+\"/docs_norm_scores_dicts\"\n# all_claims_norms_scores_merged_dict = base_path +\"\\\\all_claims_norms_scores_merged_dict\"\n all_claims_norms_scores_merged_dict = linux_base_path +\"/all_claims_norms_scores_merged_dict\"\n for alpha in range(0,11,1):\n for beta in range(0,10,1):\n docs_scores_all_claims = {}\n for filename in os.listdir(docs_norm_scores_dicts_path):\n (alpha_f,beta_f)=turn_to_float([alpha,beta])\n if \"_alpha_\"+str(alpha_f)+\"_\" in filename and \"_beta_\"+str(beta_f)+\"_\" in filename:\n curr_dict = read_pickle(docs_norm_scores_dicts_path+\"/\"+filename)\n docs_scores_all_claims = dict(docs_scores_all_claims.items() + curr_dict.items()) #merge dicts\n save_pickle(all_claims_norms_scores_merged_dict+\"/docs_norm_scores_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f),docs_scores_all_claims)", "def _merge_files(parse_results: Iterable[ParseResult]) -> Iterable[ParseResult]:\n return map(_merge_records, groupby_file(parse_results))", "def extract_documents():\n client = MongoClient()\n conn = client.data\n coll = conn.germanwings\n\n query = {'text': {'$exists': 1}, 'exc': {'$exists': 0}}\n selection = {'text': 1, 'short_url': 1}\n for i, doc in enumerate(coll.find(query, selection)):\n short_url, text = tuple(doc[x] for x in (\"short_url\", \"text\"))\n print(\"Extracting {0} {1}\".format(i, short_url), file=stderr)\n filename = os.path.join(RAW_DIR, short_url)\n with open(filename, \"w\") as f:\n ascii = text.encode('ascii', 'ignore')\n f.write(ascii)", "def merge_cooccur(args):\n if not args.quiet:\n logger.setLevel(logging.INFO)\n\n merged = dict.fromkeys(['mat', 'tokenizer', 'window_size', 'uniform_count'])\n with tqdm(total=len(args.cooccurfiles), ncols=80, disable=args.quiet) as prog:\n for file in args.cooccurfiles:\n # load the data\n corpus = load_corpus(file)\n\n if merged['tokenizer'] is None:\n merged['tokenizer'] = corpus._tokenizer.to_str()\n\n if merged['window_size'] is None:\n merged['window_size'] = corpus.window_size\n\n if merged['uniform_count'] is None:\n merged['uniform_count'] = corpus.uniform_count\n\n mat = corpus.mat.astype('float32')\n if args.symmetrization:\n mat = (mat + mat.T.tocoo()).tocoo()\n\n if merged['mat'] is None:\n merged['mat'] = mat\n else:\n merged['mat'] += mat\n\n prog.update()\n merged['mat'] = merged['mat'].tocoo()\n\n # save output\n logger.info('Saving to disk...')\n out_fn = join(args.path, args.out)\n with open(out_fn, 'wb') as fp:\n pkl.dump(\n {\n 'mat': {\n 'row': merged['mat'].row,\n 'col': merged['mat'].col,\n 'counts': merged['mat'].data\n },\n 'tokenizer': merged['tokenizer'],\n 'uniform_count': merged['uniform_count'],\n 'window_size': merged['window_size']\n },\n fp\n )", "def export_documents(self, index, filename, **kwargs):\n documentsGenerator = self.get_documents(index, **kwargs)\n documents = []\n format=kwargs.get('format','json')\n for doc in documentsGenerator:\n doc_with_id={**doc.to_dict(),'_id':doc.meta.id}\n documents.append(doc_with_id)\n self.__export_documents(documents,filename,exportformat=format)", "def merge_pdf():\n filelist = [os.path.join(os.path.abspath(\"api/public\"), f) for f in os.listdir(\n os.path.abspath(\"api/public\")) if ('.pdf' in f)]\n merger = PdfFileMerger()\n for f in filelist:\n with open(f, 'rb') as pdf_file:\n merger.append(PdfFileReader(pdf_file))\n os.remove(f)\n export_path = os.path.join(os.path.abspath(\"api/public\"), \"export.pdf\")\n merger.write(export_path)\n return export_path", "def mergeSimilarDocuments(root, searchTerm, extension=\"txt\"):\n searchTerm = str(searchTerm).lower()\n\n document = []\n\n for root, directories, files in os.walk(root):\n for name in files:\n # Skip non-extension matching files.\n if not name[-len(extension):] == extension:\n continue\n filepath = os.path.join(root, name)\n if searchTerm in filepath.lower():\n with codecs.open(filepath, 'r', encoding=\"utf-8\") as f:\n document.append(f.read())\n\n return ''.join(document)", "def merge_files():\n # abs path of data folder\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\\\\KORD\")\n file_list = os.listdir(work_folder)\n with open(os.path.join(work_folder, \"..\\\\merged_history_KORD.csv\"), \"w\") as outfile:\n for line in open(os.path.join(work_folder, file_list[0])):\n outfile.write(line)\n print \"write the first line\"\n for i in range(1, len(file_list)):\n with open(os.path.join(work_folder, file_list[i])) as infile:\n infile.next()\n for line in infile:\n outfile.write(line)", "def amalgamate_all_txts_into_one(\n path_to_folder=\"L:\\\\word_docs\\\\NLP\\\\stemming\\\\combinedRTFDOCX\\\\\",\n save_path_all_txt=\"L:\\\\word_docs\\\\NLP\\\\\",\n future_option=False):\n\n #initialise\n all_txt_stem = \"\"\n\n\n for txt_file in os.listdir(path_to_folder):\n path_to_doc = os.path.join(path_to_folder, txt_file)\n\n #open the file\n pt_txt = open_txt_file(path_to_doc)\n all_txt_stem = all_txt_stem + \" \" + pt_txt\n\n # save all_txt\n \n save_filtered_txt_file(\"made_up\\\\all_txt_stemmed2.txt\", all_txt_stem, save_path_all_txt)\n \n # in order to count, use tokens \n counts = count_tokens(all_txt_stem)" ]
[ "0.7000478", "0.6545423", "0.6462658", "0.63792735", "0.62568164", "0.6245949", "0.6204331", "0.61666673", "0.6163962", "0.6136766", "0.6123079", "0.6118926", "0.61155397", "0.60974044", "0.6090275", "0.5976273", "0.5972494", "0.5966715", "0.5928108", "0.59268343", "0.5917983", "0.58604056", "0.5843391", "0.5841645", "0.5814241", "0.5811194", "0.5791259", "0.57796866", "0.57709336", "0.5734705" ]
0.760634
0
parseBA(fd) > dict() Parses Rabit's BA format into a simple dictionary.
def parseBA(fd): aut = dict() first_line = fd.readline().strip() aut["initial"] = [first_line] aut["transitions"] = [] aut["final"] = [] while True: line = fd.readline() if not line: return aut line = line.strip() if line == "": continue match = re.match(r'^(?P<state>[^-,>]+)$', line) if match: aut["final"].append(match.group("state")) continue match = re.match(r'^(?P<symb>[^-,>]+),(?P<src>[^-,>]+)->(?P<tgt>[^-,>]+)$', line) if match: symb = match.group("symb") src = match.group("src") tgt = match.group("tgt") aut["transitions"].append((src, symb, tgt)) continue raise Exception("Invalid format: " + line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FASTAfile_to_dict(FASTAfile):\n FASTADict = {}\n for line in FASTAfile:\n if '>' in line:\n FASTALabel = line\n FASTADict[FASTALabel] = \"\"\n else:\n FASTADict[FASTALabel] += line\n return FASTADict", "def parseHOA(fd):\n aut = dict()\n aut[\"initial\"] = []\n aut[\"transitions\"] = []\n aut[\"final\"] = []\n\n aps = dict() # atomic propositions\n\n # reading header\n while True:\n line = fd.readline()\n if not line: # no body\n raise Exception(\"Missing body!\")\n line = line.strip()\n if line == \"\":\n continue\n if line == \"--BODY--\":\n break\n match = re.match(r'^(?P<key>[^:]+):\\s*(?P<value>.*)$', line)\n if not match:\n raise Exception(\"Invalid header format: {}\".format(line))\n\n # input sanity checks\n if match['key'] == \"acc-name\":\n if (match['value'] != \"Buchi\"):\n raise Exception(\"Not Buchi acceptance: {}\".format(match['value']))\n if match['key'] == \"Acceptance\":\n if (match['value'] != \"1 Inf(0)\"):\n raise Exception(\"Expected acceptance: \\\"1 Inf(0)\\\" Received: \\\"{}\\\"\".format(match['value']))\n\n # start state\n if match['key'] == \"Start\":\n aut[\"initial\"] = [match['value']]\n\n # atomic propositions\n if match['key'] == \"AP\":\n ap_ls = match['value'].split()\n aps_num = int(ap_ls[0])\n ap_ls = ap_ls[1:]\n cnt = 0\n for ap in ap_ls: # mam APs to numbers\n aps[cnt] = ap.strip(\"\\\"\")\n cnt += 1\n if cnt != aps_num:\n raise Exception(\"Invalid number of atomic propositions (does not match the declared number: {}\".format(line))\n\n # reading body\n state = None\n while True:\n line = fd.readline()\n if not line: # end of input\n raise Exception(\"Unexpected end of file\")\n line = line.strip()\n if line == \"\":\n continue\n if line == \"--END--\":\n break\n\n match = re.match(r'^State:\\s*(?P<state>\\d+)\\s*(?P<final>.+)?$', line)\n if not match:\n if state is None: # first state not declared\n raise Exception(\"Invalid beginning of the body: {}\".format(line))\n\n trans_match = re.match(r'\\[(?P<aps>[^\\]].*)\\]\\s*(?P<dst>\\d+)$', line)\n if not trans_match:\n raise Exception(\"Invalid transition: {}\".format(line))\n\n dst = trans_match['dst']\n\n str_aps = trans_match['aps']\n ls_str_aps = str_aps.split(\"&\")\n symb = None\n for one_ap in ls_str_aps:\n one_ap = one_ap.strip()\n ap_match = re.match(r'^(?P<neg>!)?\\s*(?P<ap>\\d+)$', one_ap)\n if not ap_match:\n raise Exception(\"Invalid AP: {}\".format(line))\n if not ap_match['neg']: # positive AP\n if symb is not None: # if other AP was positive\n raise Exception(\"More than one positive AP: {}\".format(line))\n\n symb_num = int(ap_match['ap'])\n symb = aps[symb_num]\n\n aut['transitions'].append((state, symb, dst))\n\n # continue in the transition of the current state\n else: # if new state declared\n state = int(match['state'])\n if match['final']:\n aut['final'].append(str(state))\n\n return aut", "def read_fasta(fasta_file):\n\n seq_dict = dict() # Declare a new dictionary\n\n with open(fasta_file,'r') as f:\n lines = f.readlines()\n defline = \"\"\n for li in lines:\n li = li.rstrip() # remove newlines\n if '>' in li:\n defline = li # if i use 'id' it is blue; why?\n seq_dict[defline] = \"\"\n else:\n li = li.upper() # just to clean up sequence\n seq_dict[defline] += li\n\n return seq_dict", "def decode(cls, buffer: bytes) -> Dict[str, Any]:\n pstruct = Struct()\n pstruct.ParseFromString(buffer)\n dictionary = dict(pstruct)\n cls._patch_dict_restore(dictionary)\n return dictionary", "def read_fasta_to_dicts(fname, CONFIG):\n from Bio import SeqIO\n logger.info('Reading in FASTA from {}.'.format(fname))\n\n data_dicts = []\n with open(fname, \"rU\") as f:\n\n for record in SeqIO.parse(f, \"fasta\"):\n data = {}\n if record.description in CONFIG[\"fasta_header_swaps\"]:\n record.description = CONFIG[\"fasta_header_swaps\"][record.description]\n head = record.description.split(CONFIG['fasta_separator_character'])\n\n if len(head) != len(CONFIG[\"fasta_headers\"]):\n logger.warn(\"Skipping {} which had {} fields (expected {})\".format(record.description, len(head), len(CONFIG[\"fasta_headers\"])))\n continue\n for i in range(len(CONFIG[\"fasta_headers\"])):\n try:\n data[CONFIG[\"fasta_headers\"][i]] = head[i]\n data['sequence'] = str(record.seq)\n except KeyError:\n logger.critical(\"Error parsing FASTA header. Header: {}. CONFIG specifies: {}\".format(head, CONFIG[\"fasta_headers\"])); sys.exit(2)\n data_dicts.append(data)\n return data_dicts", "def _readFAI(self, fai):\n\t\t#FAI Format http://www.biostars.org/p/1495/\n #chrName chrLen chrSeek lineBases lineLen\n #Chr1 30427671 6 79 80\n #Line len is bases+\\n\n\t\twith open(fai, 'r') as FAI:\n\t\t\tlines = [line.rstrip('\\n').split() for line in FAI]\n\t\tself.sorted_chroms = sorted([line[0] for line in lines])\n\t\tself.chrom_dict = {line[0]:int(line[1]) for line in lines}", "def _decode_compound(fp):\n values = {}\n tag_type = ord(fp.read(1))\n while tag_type > 0:\n name = _decode_string(fp)\n values[name] = _MAP[tag_type](fp)\n tag_type = ord(fp.read(1))\n return values", "def parse_magic_blast(file, data_dict):\n\n alignment_lengths = {i: 0 for i in range(70,101)}\n query_lengths = {i: 0 for i in range(70,101)}\n read_counts = {i: 0 for i in range(70,101)}\n\n name = file.split('_')[0]\n\n with open(file, 'r') as f:\n for l in f:\n if l.startswith('#'): continue\n X = l.rstrip().split('\\t')\n pident = int(X[2].split('.')[0])\n astrt = min(int(X[8]), int(X[9]))\n astp = max(int(X[8]), int(X[9]))\n aLen = astp - astrt # read alignment length\n qLen = int(X[15]) # full length of read\n\n if pident >= 70:\n alignment_lengths[pident] += aLen\n query_lengths[pident] += qLen\n read_counts[pident] += 1\n\n data_dict['alen'] = alignment_lengths\n data_dict['qlen'] = query_lengths\n data_dict['rcount'] = read_counts\n\n return data_dict", "def convertion_binaire_arbre(self):\r\n binary_code = self.root.conversion_binaire('')\r\n binary_dict = {}\r\n binary_code = binary_code.strip().split(\"\\n\")\r\n for element in binary_code:\r\n binary_dict[element.split(\":\")[0]] = element.split(\":\")[1]\r\n return binary_dict", "def read_abbrevs(): \n abbrevs = {}\n with open('abbrev.txt','r') as fhx:\n for line in fhx:\n line = line.decode(\"utf-8\").upper()\n abbrevs[line.split('=')[0]] = line.split('=')[1].rstrip() + \" \"\n \n return abbrevs", "def get_fasta_dict(input_fasta_path):\n\n\ttry:\n\t\tnew_file = open(input_fasta_path, \"rU\")\n\t\tsequence_record_dict = SeqIO.to_dict(SeqIO.parse(new_file, \"fasta\"))\n\t\tnew_file.close()\n\t\treturn sequence_record_dict\n\texcept IOError as e:\n\t\tprint(str(e))\n\t\tsys.exit(1) # Aborts program. (exit(1) indicates that an error occurred)", "def read_FASTA_dictionary(filename, splitstr='|', SplitHeader=False):\n return {info[0]: seq for info, seq in read_FASTA(filename, splitstr=splitstr, SplitHeader=SplitHeader)}", "def frame_to_dict(frame):\n item = None\n atom_format = None\n data = {}\n for lines in frame:\n if lines.startswith('ITEM:'):\n item = lines.strip().split('ITEM:')[1].lower().strip()\n item_split = item.split()\n if item_split[0] == 'box':\n item = 'box'\n data[item] = {'bounds': [str(i) for i in item_split[2:]]}\n elif item_split[0] == 'atoms':\n item = 'atoms'\n data[item] = {i: [] for i in item_split[1:]}\n else:\n data[item] = []\n continue\n if item is not None:\n if item in ('timestep', 'number of atoms'):\n data[item] = int(lines.strip())\n elif item in ('box',):\n read_for_box(data['box'], lines)\n elif item in ('atoms',):\n formatted, atom_format = read_atom_data(atom_format, lines)\n for key, val in zip(data['atoms'].keys(), formatted):\n data['atoms'][key].append(val)\n else:\n data[item].append(lines.strip())\n for key, val in data['atoms'].items():\n data['atoms'][key] = np.array(val)\n if len(data['atoms'][key]) != data['number of atoms']:\n print(f'Inconsistent data length for {key}')\n return data", "def fasta_to_dict(fasta_file):\n deflines = []\n sequences = []\n sequence = \"\"\n with open(fasta_file, \"r\") as file:\n for line in file:\n if line.startswith(\">\"):\n deflines.append(line.rstrip().lstrip('>'))\n if sequence:\n sequences.append(sequence)\n sequence = \"\"\n else:\n sequence += line.rstrip()\n sequences.append(sequence)\n fasta_dict = {}\n for x, defline in enumerate(deflines):\n fasta_dict[defline]=sequences[x]\n return fasta_dict", "def cbf_file_to_basis_dict(path):\n import dxtbx.format.Registry\n reader = dxtbx.format.Registry.get_format_class_for_file(path)\n instance = reader(path)\n return map_detector_to_basis_dict(instance.get_detector())", "def ParserPDB(a):\n\tcontenu=list()\n\tmon_fichier=open(a,\"r\")\n\tfor line in mon_fichier.readlines():\n\t\tcontenu.append(line.strip()) #met le contenu du fichier pdb dans la liste \"contenu\"\n\n\tacidea=dict()\n\t\n\n\n\tfor chain in range(len(contenu)): #On parcourt cette liste contenant tout le fichier pdb\n\t\tif contenu[chain][0:5]==\"MODEL\":\n\t\t\tnewProt = contenu[chain][7:14]\n\t\t\t\n\t\t\tif newProt not in acidea.keys():\n\t\t\t\tacidea[newProt]={}\n\t\t\t\t\n\t\tif contenu[chain][0:4]==\"ATOM\": #Si la ligne commence par \"ATOM\" \n\t\t\tChaine = contenu[chain][21]\n\t\t\t\n\t\t\tif Chaine not in acidea[newProt].keys(): #Si la chaine ( A, B ... ) existe pas deja \n\t\t\t\tacidea[newProt][Chaine] = {} #creation du dictionnaire qui a pour nom les caractères a la ligne 21 ( Chaine)\n\t\t\t\n\t\t\tPosi = contenu[chain][24:26]\n\t\t\tif Posi not in acidea[newProt][Chaine].keys(): #Si la position pour une chaine n'existe pas deja (ex : -3 dans la chaine A)\n\t\t\t\tacidea[newProt][Chaine][Posi]={} # creation du dictionnaire poisition dans le dictionnaire chaine \n\t\t\t\n\t\t\tresidu = contenu[chain][12:16]\n\t\t\tif residu not in acidea[newProt][Chaine][Posi].keys(): #si le residu n'existe pas deja pour une chaine et une position donnée (ex : un CO de la chaine A a la position -3)\n\t\t\t\tacidea[newProt][Chaine][Posi][residu]= {} #Creation du dictionnaire nom de l'atome, contenu dans le dictionnaire position lui meme contenu dans le dictionnaire chaine\t\n\t\t\t\n\t\t\t#repartition de l'information dans le dictionnaire.\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"x\"] = float(contenu[chain][32:38]) #Mise des information de X dans le dictionnaire atome\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"y\"] = float(contenu[chain][40:46]) #Mise des information de Y dans le dictionnaire atome\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"z\"] = float(contenu[chain][48:54]) #Meme chose pour Z\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"Id\"] = contenu[chain][9:11] #Meme chose pour Identifiant\n\n\treturn( acidea)", "def ParserTeil2():\r\n Bags = {}\r\n for Line in Lines:\r\n Words = Line.split()\r\n Farbe = Words[0]+\" \"+Words[1]\r\n Bags[Farbe] = re.findall(r\"(\\d+?) (.+?) bags?\", Line)\r\n return Bags", "def read_pdb(self, pdb):\n pdb_a = {}\n for line in pdb:\n at = re.compile(\"(ATOM|HETATM)\")\n if at.match(line):\n nm = re.sub(r'\\s', '', line[6:12])\n aname = re.sub(r'\\s', '', line[12:17])\n ri_c = re.sub(r'\\s', '', line[20:27])\n x = re.sub(r'\\s', '', line[30:38])\n y = re.sub(r'\\s', '', line[38:46])\n z = re.sub(r'\\s', '', line[46:55])\n if ri_c and aname and x and y and z:\n pdb_a[int(nm)] = [aname, Vector(float(x), float(y), float(z)), ri_c]\n return [pdb_a, nm]", "def decode_db_definition(self, data):\n rd = ByteReader(data)\n\n d = dict()\n while not rd.eof():\n keyname = rd.readname()\n if keyname in d:\n print(\"WARN: duplicate key: %s\" % keyname)\n\n index_or_length = rd.readdword()\n if index_or_length >> 31:\n d[keyname] = rd.readbytes(index_or_length & 0x7FFFFFFF)\n else:\n refdata = self.stru.readrec(index_or_length)\n if refdata[:1] != b\"\\x04\":\n print(\"WARN: expected refdata to start with 0x04\")\n d[keyname] = refdata[1:]\n return d", "def MinimalBpseqParser(lines):\n result = {'HEADER':[], 'SEQ_STRUCT':[]}\n \n for line in lines:\n if line.startswith('Filename') or line.startswith('Organism') or\\\n line.startswith('Accession') or line.startswith('Citation') or\\\n \":\" in line:\n result['HEADER'].append(line.strip())\n elif len(line.split()) == 3:\n result['SEQ_STRUCT'].append(line.strip())\n else:\n continue #unknown\n return result", "def _decode_dict(data: BencodedString) -> dict:\n result_dict = {}\n data.del_prefix(1)\n\n while True:\n if data.bytes:\n if data.bytes[0] != END_MARKER:\n key = _decode(data)\n value = _decode(data)\n result_dict[key] = value\n else:\n data.del_prefix(1)\n break\n else:\n raise ValueError(\n \"Cannot decode a dictionary, reached end of the bencoded \"\n \"string before the end marker was found. Most likely the \"\n \"bencoded string is incomplete or incorrect.\"\n )\n\n return result_dict", "def parse_bibtex(self, data: str) -> Dict:\n\n new_bib = [line for line in data.splitlines() if \"= ,\" not in line]\n new_bib = \"\\n\".join(new_bib)\n bib_db: bibtexparser.bibdatabase.BibDatabase = bibtexparser.loads(new_bib)\n result = dict()\n for entry in bib_db.entries:\n osti_id = entry[\"ID\"].split(\"_\")[1]\n result[osti_id] = entry\n return result", "def medline_parser(filename):\n pmid_abstract_dict = {}\n with open(filename) as handle:\n for record in Medline.parse(handle):\n if 'AB' in record.keys():\n pmid, abstract = record['PMID'], record['AB']\n pmid_abstract_dict[pmid] = abstract\n return pmid_abstract_dict", "def preprocessBed(fname):\n res = {}\n iter = parseBed(fname)\n for i in iter:\n res.setdefault(i.chr,[])\n res[i.chr].append(i)\n for k in res.keys():\n res[k].sort()\n return res", "def return_fasta_dic(file):\n seq_dict = {rec.id: rec.seq for rec in SeqIO.parse(file, \"fasta\")}\n return seq_dict", "def read(file_path: str) -> dict:\n\n if not os.path.isfile(file_path):\n raise FileNotFoundError(\"The file `%s` must exist and be a BLM file\" % file_path)\n\n file_contents = open(file_path, 'r').read()\n headers = parse_headers(file_contents)\n definitions = parse_definitions(headers, file_contents)\n data = parse_data(headers, definitions, file_contents)\n\n return {'headers': headers, 'definitions': definitions, 'data': data}", "def _convert_to_dict(r):\n if not r:\n return r\n else:\n return dict(token=r[0], code=r[2], value=r[1], address='-')", "def Parse_Fasta(filename):\n dic = {}\n name = None\n seq = ''\n with open(filename) as F:\n for line in F:\n if line.startswith('>'):\n if name is not None:\n dic[name] = seq\n seq = ''\n name = line.strip()\n else:\n seq += line\n if not name in dic:\n dic[name] = seq\n return dic", "def parseAddressFA(address, word_size = 4):\n binary_address = bin(address)[2:].zfill(32)\n byte_offset_size = int(math.log2(word_size))\n byte_offset = int(binary_address[-byte_offset_size:],2)\n tag = int(binary_address[:-(byte_offset_size)],2)\n #address_result = int(binary_address[:-byte_offset_size],2)\n return {\"tag\" : tag, \"address_result\" : address - byte_offset }", "def fasta_parser(filename):\n fasta = {}\n with open(filename) as f:\n contents = f.read()[1:].split('\\n>')\n for section in contents:\n sample = section.split('\\n')\n sample_id = sample[0]\n seq = ''.join(sample[1:]).strip()\n fasta[sample_id] = seq\n return fasta" ]
[ "0.5891278", "0.5844353", "0.58382297", "0.57476145", "0.56055814", "0.558734", "0.5553851", "0.5519351", "0.5498047", "0.548586", "0.5482621", "0.54821944", "0.5435535", "0.5382995", "0.53779835", "0.53777", "0.5375928", "0.5373505", "0.53611994", "0.5351868", "0.5351013", "0.53182125", "0.530086", "0.5275318", "0.5269131", "0.5261188", "0.5251352", "0.52460927", "0.52423006", "0.5240683" ]
0.69841844
0
parseHOA(fd) > dict() Parses Hanoi Omega Automata format into a simple dictionary. (Supports only a subset focused on statebased acceptance Buchi automata.)
def parseHOA(fd): aut = dict() aut["initial"] = [] aut["transitions"] = [] aut["final"] = [] aps = dict() # atomic propositions # reading header while True: line = fd.readline() if not line: # no body raise Exception("Missing body!") line = line.strip() if line == "": continue if line == "--BODY--": break match = re.match(r'^(?P<key>[^:]+):\s*(?P<value>.*)$', line) if not match: raise Exception("Invalid header format: {}".format(line)) # input sanity checks if match['key'] == "acc-name": if (match['value'] != "Buchi"): raise Exception("Not Buchi acceptance: {}".format(match['value'])) if match['key'] == "Acceptance": if (match['value'] != "1 Inf(0)"): raise Exception("Expected acceptance: \"1 Inf(0)\" Received: \"{}\"".format(match['value'])) # start state if match['key'] == "Start": aut["initial"] = [match['value']] # atomic propositions if match['key'] == "AP": ap_ls = match['value'].split() aps_num = int(ap_ls[0]) ap_ls = ap_ls[1:] cnt = 0 for ap in ap_ls: # mam APs to numbers aps[cnt] = ap.strip("\"") cnt += 1 if cnt != aps_num: raise Exception("Invalid number of atomic propositions (does not match the declared number: {}".format(line)) # reading body state = None while True: line = fd.readline() if not line: # end of input raise Exception("Unexpected end of file") line = line.strip() if line == "": continue if line == "--END--": break match = re.match(r'^State:\s*(?P<state>\d+)\s*(?P<final>.+)?$', line) if not match: if state is None: # first state not declared raise Exception("Invalid beginning of the body: {}".format(line)) trans_match = re.match(r'\[(?P<aps>[^\]].*)\]\s*(?P<dst>\d+)$', line) if not trans_match: raise Exception("Invalid transition: {}".format(line)) dst = trans_match['dst'] str_aps = trans_match['aps'] ls_str_aps = str_aps.split("&") symb = None for one_ap in ls_str_aps: one_ap = one_ap.strip() ap_match = re.match(r'^(?P<neg>!)?\s*(?P<ap>\d+)$', one_ap) if not ap_match: raise Exception("Invalid AP: {}".format(line)) if not ap_match['neg']: # positive AP if symb is not None: # if other AP was positive raise Exception("More than one positive AP: {}".format(line)) symb_num = int(ap_match['ap']) symb = aps[symb_num] aut['transitions'].append((state, symb, dst)) # continue in the transition of the current state else: # if new state declared state = int(match['state']) if match['final']: aut['final'].append(str(state)) return aut
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parseBA(fd):\n aut = dict()\n first_line = fd.readline().strip()\n aut[\"initial\"] = [first_line]\n aut[\"transitions\"] = []\n aut[\"final\"] = []\n\n while True:\n line = fd.readline()\n if not line:\n return aut\n\n line = line.strip()\n if line == \"\":\n continue\n\n match = re.match(r'^(?P<state>[^-,>]+)$', line)\n if match:\n aut[\"final\"].append(match.group(\"state\"))\n continue\n\n match = re.match(r'^(?P<symb>[^-,>]+),(?P<src>[^-,>]+)->(?P<tgt>[^-,>]+)$',\n line)\n if match:\n symb = match.group(\"symb\")\n src = match.group(\"src\")\n tgt = match.group(\"tgt\")\n aut[\"transitions\"].append((src, symb, tgt))\n continue\n\n raise Exception(\"Invalid format: \" + line)", "def aut2HOA(aut):\n state_cnt = 0\n state_transl_dict = dict()\n\n ###########################################\n def state_transl(state):\n \"\"\"state_transl(state) -> int\n\n Translates state names into numbers.\n \"\"\"\n nonlocal state_cnt\n nonlocal state_transl_dict\n\n if state not in state_transl_dict.keys():\n state_transl_dict[state] = state_cnt\n state_cnt += 1\n\n return str(state_transl_dict[state])\n ###########################################\n\n symb_cnt = 0\n symb_transl_dict = dict()\n\n ###########################################\n def symb_transl(symb):\n \"\"\"symb_transl(symb) -> int\n\n Translates symbol names into numbers.\n \"\"\"\n nonlocal symb_cnt\n nonlocal symb_transl_dict\n\n if symb not in symb_transl_dict.keys():\n symb_transl_dict[symb] = symb_cnt\n symb_cnt += 1\n\n return str(symb_transl_dict[symb])\n ###########################################\n\n # count states and transitions\n for st in aut[\"initial\"]:\n state_transl(st)\n for trans in aut[\"transitions\"]:\n src, symb, tgt = trans\n state_transl(src)\n symb_transl(symb)\n state_transl(tgt)\n for st in aut[\"final\"]:\n state_transl(st)\n\n res = \"\"\n res += \"HOA: v1\\n\"\n res += \"States: {}\\n\".format(state_cnt)\n\n res += \"Start: \"\n for state in aut[\"initial\"]:\n res += state_transl(state) + \" \"\n res += \"\\n\"\n\n # magic setting for Buchi condition\n res += \"acc-name: Buchi\\n\"\n res += \"Acceptance: 1 Inf(0)\\n\"\n\n # atomic propositions\n res += \"AP: {}\".format(symb_cnt)\n for i in range(symb_cnt):\n for key in symb_transl_dict:\n if symb_transl_dict[key] == i:\n res += \" \\\"{}\\\"\".format(key)\n res += \"\\n\"\n\n res += \"--BODY--\\n\"\n for (name, num) in state_transl_dict.items():\n res += \"State: {}\".format(num)\n if name in aut[\"final\"]:\n res += \" { 0 }\"\n res += \"\\n\"\n\n for trans in aut[\"transitions\"]:\n src, symb, tgt = trans\n if src == name:\n res += \" [\"\n for i in range(symb_cnt):\n if i != 0:\n res += \" & \"\n if symb_transl_dict[symb] != i:\n res += \"!\"\n res += str(i)\n\n res += \"] {}\\n\".format(state_transl(tgt))\n res += \"--END--\\n\"\n\n return res", "def ogip_dictionary_arf():\n \"\"\"\n this function returns the required and optional keywords and columns\n as defined by OGIP 92-002 and 92-002a\n \"\"\"\n global status\n global REPORT\n\n \"\"\"\n FOR the ARF file:\n \"\"\"\n \"\"\"\n Define REQUIRED Keywords for SPECRESP EXTENSION (note: EXTNAME is SPECRESP)\n \"\"\"\n reqkeys = ['TELESCOP', 'INSTRUME']\n reqkeys.append('FILTER')\n reqkeys.append('CHANTYPE[PHA|PI]')\n reqkeys.append('DETCHANS')\n reqkeys.append('HDUCLASS[OGIP]')\n reqkeys.append('HDUCLAS1[RESPONSE]')\n reqkeys.append('HDUCLAS2[SPECRESP]')\n reqkeys.append('HDUVERS[1.1.0]')\n reqkeys.append('TLMIN*')\n reqkeys.append('NUMGRP')\n reqkeys.append('NUMELT')\n reqkeys.append('CCLS0001[CPF]')\n reqkeys.append('CCNM0001[SPECRESP]')\n reqkeys.append('CDTP0001[DATA]')\n reqkeys.append('CVSD0001')\n reqkeys.append('CVST0001')\n reqkeys.append('CDES0001')\n\n \"\"\"\n Define recommended Keywords\n \"\"\"\n optkeys = ['PHAFILE']\n optkeys.append('LO_THRES') # minimum probability threshold in matrix (values < this are set to 0)\n optkeys.append('HDUCLAS3[REDIST|DETECTOR|FULL]') # required if channel numbering doesn't start at 1\n optkeys.append('RMFVERSN[1992A]')\n optkeys.append('HDUVERS1[1.1.0]')\n optkeys.append('HDUVERS2[1.2.0]')\n\n \"\"\"\n Define Required Columns\n \"\"\"\n reqcols = ['ENERG_LO'] # lower energy bound of bin (keV)\n reqcols.append('ENERG_HI') # upper energy bound of bin (keV); generally ENERG_LO(J) = ENERG_HI(J-1)\n reqcols.append('SPECRESP') # the \"effective area\"\n\n\n \"\"\"\n Define Optional Columns\n \"\"\"\n optcols = [] # dispersion order for grating data\n\n specresp = {'KEYWORDS':{'REQUIRED':reqkeys,'RECOMMENDED':optkeys}, 'COLUMNS':{'REQUIRED':reqcols,'RECOMMENDED':optcols}}\n\n extns={'REQUIRED':['SPECRESP'],'OPTIONAL':[]}\n #\n # create structure for the ARF file\n #\n ogip = {'EXTENSIONS':extns,\n 'SPECRESP':specresp,\n 'REFERENCE':'OGIP/92-002',\n 'REFURL':'https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/ofwg_recomm.html',\n 'REFTITLE':'The Calibration Requirements for Spectral Analysis'}\n\n return ogip", "def parse_etymology() -> Dict[str, str]:\n with open(PATH_ETYMOLOGY, encoding=\"utf-8\") as f:\n\n buffer = defaultdict(list)\n for line in f:\n line = line.strip()\n\n # Skip empty lines\n if not line:\n continue\n\n # New block\n if line[1] == \" \" and line[2] == \"(\":\n hanzi = line[0]\n else:\n buffer[hanzi].append(line)\n\n result = {}\n for k,v in buffer.items():\n result[k] = \" \".join(v)\n\n return result", "def parse_aurinkopenkki(hex_str, port=None):\n b = bytes.fromhex(hex_str)\n val = struct.unpack('<BbxxfffIIIII', b)\n\n #struct t_AcudcDATA { \n # uint8_t msg_type;\n # uint8_t msg_ver;\n # float volt;\n # float amp;\n # float watt;\n # uint32_t runTime;\n # uint32_t inEnergy;\n # uint32_t outEnergy;\n # uint32_t inAh;\n # uint32_t outAh;\n \n data = {\n 'voltage': val[2],\n 'current': val[3],\n 'power': val[4],\n 'runtime': val[5],\n 'inEnergy': val[6],\n 'outEnergy': val[7],\n 'inmAh': val[8],\n 'outmAh': val[9],\n }\n return data", "def _readFAI(self, fai):\n\t\t#FAI Format http://www.biostars.org/p/1495/\n #chrName chrLen chrSeek lineBases lineLen\n #Chr1 30427671 6 79 80\n #Line len is bases+\\n\n\t\twith open(fai, 'r') as FAI:\n\t\t\tlines = [line.rstrip('\\n').split() for line in FAI]\n\t\tself.sorted_chroms = sorted([line[0] for line in lines])\n\t\tself.chrom_dict = {line[0]:int(line[1]) for line in lines}", "def process_FAO(x, num):\n d = soildict\n maxlat,minlat, maxlon, minlon = maxmin(x['geometry']['coordinates'])\n d['UniqueID'] = num\n d['FAO_USDA'] = \"FAO\"\n d['Soil'] = inverted_soil_dict[x['properties']['DOMSOI']]\n d['Suborder'] = x['properties']['DOMSOI']\n d['Points'] = x['geometry']['coordinates']\n d['maxlat'] = maxlat\n d['maxlon'] = maxlon\n d['minlat'] = minlat\n d['minlon'] = minlon\n return d", "def read_pdb(self, pdb):\n pdb_a = {}\n for line in pdb:\n at = re.compile(\"(ATOM|HETATM)\")\n if at.match(line):\n nm = re.sub(r'\\s', '', line[6:12])\n aname = re.sub(r'\\s', '', line[12:17])\n ri_c = re.sub(r'\\s', '', line[20:27])\n x = re.sub(r'\\s', '', line[30:38])\n y = re.sub(r'\\s', '', line[38:46])\n z = re.sub(r'\\s', '', line[46:55])\n if ri_c and aname and x and y and z:\n pdb_a[int(nm)] = [aname, Vector(float(x), float(y), float(z)), ri_c]\n return [pdb_a, nm]", "def parse_fasta(fasta_f, contig_data):\n\n basen = os.path.basename(fasta_f)\n [soil, ecotype, media] = basen.split(\"_\")[:3]\n\n with open(fasta_f, 'rU') as IN:\n for record in SeqIO.parse(IN, \"fasta\"):\n contig_data[record.description] = {'length': len(record.seq), 'soil': soil, 'ecotype': ecotype, 'media': media}", "def setup_parser(self) -> Dict[str, Any]:\n\n\t# % year,doy, RH(m), Month, day, azimuth(deg),freq, satNu, LSP amp,pk2noise,UTC(hr) \n\t# 2021 9 4.888 1 9 225.3 1 2 9.51 3.23 10.08\n\t# 2021 9 5.018 1 9 181.3 1 15 7.79 2.84 15.67\n\t# 2021 9 5.123 1 9 185.4 1 16 6.27 3.01 0.68\n #----+----0----+----1----+----2----+----3----+----4----+----5----+----6----+----7\n return dict(\n skip_header=1,\n names=(\n \"year\",\n \"doy\",\n \"reflection_height\",\n \"month\",\n \"day\",\n \"azimuth\",\n \"frequency\",\n \"satellite\",\n \"amplitude\",\n \"peak2noise\",\n \"hour\",\n ),\n dtype=(\"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\"),\n )", "def _species(self, hdr):\n # Called PolyAtomic in OpenMIMS source\n d = {}\n\n d['numeric flag'], d['numeric value'], d['elements'], \\\n d['charges'], d['charge label'], d['label'] = \\\n unpack(self._bo + '4i c 64s', hdr.read(81))\n\n d['label'] = self._cleanup_string(d['label'])\n d['charge label'] = self._cleanup_string(d['charge label'])\n\n # OpenMIMS says 3 bytes AFTER el.table are unused; this is wrong,\n # 3 bytes BEFORE el.table (b 81-84) are unused. n_elements (here:\n # atomic number) is element number in periodic table rather than\n # number of elements. n_isotopes (here: isotope number) is offset from\n # main atomic Z number. Also: collapse ElementTable (Tabelts) into\n # main dict, too many layers.\n hdr.seek(3, 1)\n atoms = unpack(self._bo + '15i', hdr.read(60))\n d['atomic number'] = tuple(n for n in atoms[::3])\n d['isotope number'] = tuple(n for n in atoms[1::3])\n d['stoich number'] = tuple(n for n in atoms[2::3])\n return d", "def ParserPDB(a):\n\tcontenu=list()\n\tmon_fichier=open(a,\"r\")\n\tfor line in mon_fichier.readlines():\n\t\tcontenu.append(line.strip()) #met le contenu du fichier pdb dans la liste \"contenu\"\n\n\tacidea=dict()\n\t\n\n\n\tfor chain in range(len(contenu)): #On parcourt cette liste contenant tout le fichier pdb\n\t\tif contenu[chain][0:5]==\"MODEL\":\n\t\t\tnewProt = contenu[chain][7:14]\n\t\t\t\n\t\t\tif newProt not in acidea.keys():\n\t\t\t\tacidea[newProt]={}\n\t\t\t\t\n\t\tif contenu[chain][0:4]==\"ATOM\": #Si la ligne commence par \"ATOM\" \n\t\t\tChaine = contenu[chain][21]\n\t\t\t\n\t\t\tif Chaine not in acidea[newProt].keys(): #Si la chaine ( A, B ... ) existe pas deja \n\t\t\t\tacidea[newProt][Chaine] = {} #creation du dictionnaire qui a pour nom les caractères a la ligne 21 ( Chaine)\n\t\t\t\n\t\t\tPosi = contenu[chain][24:26]\n\t\t\tif Posi not in acidea[newProt][Chaine].keys(): #Si la position pour une chaine n'existe pas deja (ex : -3 dans la chaine A)\n\t\t\t\tacidea[newProt][Chaine][Posi]={} # creation du dictionnaire poisition dans le dictionnaire chaine \n\t\t\t\n\t\t\tresidu = contenu[chain][12:16]\n\t\t\tif residu not in acidea[newProt][Chaine][Posi].keys(): #si le residu n'existe pas deja pour une chaine et une position donnée (ex : un CO de la chaine A a la position -3)\n\t\t\t\tacidea[newProt][Chaine][Posi][residu]= {} #Creation du dictionnaire nom de l'atome, contenu dans le dictionnaire position lui meme contenu dans le dictionnaire chaine\t\n\t\t\t\n\t\t\t#repartition de l'information dans le dictionnaire.\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"x\"] = float(contenu[chain][32:38]) #Mise des information de X dans le dictionnaire atome\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"y\"] = float(contenu[chain][40:46]) #Mise des information de Y dans le dictionnaire atome\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"z\"] = float(contenu[chain][48:54]) #Meme chose pour Z\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"Id\"] = contenu[chain][9:11] #Meme chose pour Identifiant\n\n\treturn( acidea)", "def convert_allele_list_to_ags(hla_allele_list):\n\tallele_list_dict = {}\n\tag_list = []\n\tbw4_6_list = []\n\tfor allele in hla_allele_list:\n\t\tallele = allele.rstrip(\"p P g G\")\n\t\tif allele in allele_to_ag_dict:\n\t\t\tag = \"\"\n\t\t\trule = \"\"\n\t\t\tag = allele_to_ag_dict[allele][0]\n\t\t\trule = allele_to_ag_dict[allele][1]\n\t\t\tbw4_6 = allele_to_ag_dict[allele][2]\n\t\t\tag_list.append(ag)\n\t\t\tbw4_6_list.append(bw4_6)\n\t\t\t\n\t\telse:\n\t\t\tag = \"NA\"\n\t\t\tag_list.append(ag)\n\t\n\tallele_list_dict = {\"Allele_list\": hla_allele_list, \"UNOS antigens\": ag_list, \"Bw4/6 epitopes\": bw4_6_list}\n\t\t\t\t\n\treturn allele_list_dict", "def parse_ensembl_line(line, header):\n line = line.rstrip().split(\"\\t\")\n header = [head.lower() for head in header]\n raw_info = dict(zip(header, line))\n\n ensembl_info = {}\n\n for word in raw_info:\n value = raw_info[word]\n if not value:\n continue\n\n if \"chromosome\" in word:\n ensembl_info[\"chrom\"] = value\n if \"gene name\" in word:\n ensembl_info[\"hgnc_symbol\"] = value\n if \"hgnc id\" in word:\n ensembl_info[\"hgnc_id\"] = int(value.split(\":\")[-1])\n if \"hgnc symbol\" in word:\n ensembl_info[\"hgnc_symbol\"] = value\n if \"strand\" in word:\n ensembl_info[\"strand\"] = int(value)\n\n update_gene_info(ensembl_info, word, value)\n update_transcript_info(ensembl_info, word, value)\n update_exon_info(ensembl_info, word, value)\n update_utr_info(ensembl_info, word, value)\n update_refseq_info(ensembl_info, word, value)\n return ensembl_info", "def build_hap_dict(self, obs_tab, leg_tab, hap_tab, number_of_haplotypes):\n\n hap_dict = dict()\n mismatches = 0\n combined = {pos: (ref,alt,hap) for (chr_id,pos,ref,alt),hap in zip(leg_tab, hap_tab)}\n missing = 3*(None,)\n\n b = (1 << number_of_haplotypes) - 1 #### equivalent to int('1'*number_of_haplotypes,2)\n\n for (pos, read_id, base) in obs_tab:\n ref, alt, hap = combined.get(pos, missing)\n if base==alt:\n hap_dict[(pos,base)] = hap\n elif base==ref:\n hap_dict[(pos,base)] = hap ^ b ### ^b flips all bits of the binary number, hap_tab[ind] using bitwise xor operator.\n else:\n mismatches += 1\n\n fraction_of_matches = 1-mismatches/len(obs_tab)\n\n return hap_dict, fraction_of_matches", "def parse_taxonomy(infile):\r\n\r\n res = {}\r\n for line in infile:\r\n if not line or line.startswith('#'):\r\n continue\r\n line = line.rstrip(\"\\n\")\r\n fields = line.split('\\t')\r\n otu = fields[0].split(' ')[0]\r\n res[otu] = taxa_split(fields[1])\r\n\r\n return res", "def parse_opening_hours(opening_hours_string):\n elements = opening_hours_string.split(';') # Split by ';'\n elements = map(lambda x: x.strip(), elements) # Strip whitespace left and right\n elements = map(lambda x: x.split(' '), elements) # Split by whitespace\n\n def maybe_translate(day_abbreviation):\n if day_abbreviation in english_day_abbreviations:\n return days[english_day_abbreviations.index(day_abbreviation)]\n\n def get_days_in_range(days_string):\n from_to_pairs = days_string.split(',')\n from_to_pairs = list(map(lambda x : x.split('-'), from_to_pairs))\n\n result = []\n for pair in from_to_pairs:\n if len(pair) == 1:\n result.append(pair[0])\n\n if len(pair) == 2:\n day_from = maybe_translate(pair[0])\n day_to = maybe_translate(pair[1])\n\n result += (days[days.index(day_from) : days.index(day_to) + 1])\n\n #print('[ERROR] in parsing days string {}. Unkown amount of elements'.format(days_string))\n #return []\n\n return result\n\n opening_hours = {}\n\n for dict_day_name in dict_day_names:\n opening_hours[dict_day_name] = {'start': None, 'end': None}\n\n for element in elements:\n if len(element) > 1:\n days_string = element[0]\n hours_string = element[1]\n\n hours_with_breaks = hours_string.split(',')\n\n from_to_pairs = []\n for hours in hours_with_breaks:\n from_to_pair = hours.split('-')\n from_to_pairs.append(from_to_pair)\n\n open_from = from_to_pairs[0][0]\n open_to = from_to_pairs[-1][-1]\n\n if open_from == 'off' or open_to == 'off':\n continue\n\n days_for_element = get_days_in_range(days_string)\n\n for day in days_for_element:\n if day in days:\n opening_hours[dict_day_names[days.index(day)]]['start'] = open_from\n opening_hours[dict_day_names[days.index(day)]]['end'] = open_to\n\n return opening_hours", "def Parser(wa1, wa2):\r\n #Note that in the documentation, they start counting at position 1\r\n output = { \r\n 'First Borough Name': wa1[360 :369].strip(),\r\n 'House Number Display Format': wa1[369: 385].strip(),\r\n 'House Number Sort Format': wa1[385: 396].strip(),\r\n 'B10SC First Borough and Street Code': wa1[396: 407].strip(),\r\n 'Second Street Name Normalized': wa1[407:439].strip(),\r\n 'Community District': wa2[149:152].strip(),\r\n 'Zip Code': wa2[152:157].strip(),\r\n 'Election District': wa2[157:160].strip(),\r\n 'Assembly District': wa2[160:162].strip(),\r\n 'Congressional District': wa2[163:165].strip(),\r\n 'State Senatorial District': wa2[165:167].strip(),\r\n 'City Council District': wa2[169:171].strip(),\r\n 'Police Precinct': wa2[191:194].strip(),\r\n 'Community School District': wa2[203:205].strip(),\r\n 'Atomic Polygon': wa2[205: 208].strip(),\r\n '2010 Census Tract': wa2[223: 229].strip(),\r\n '2010 Census Block': wa2[229:233].strip(),\r\n '2010 Census Block Suffix': wa2[233].strip(),\r\n 'Neighborhood Tabulation Area (NTA)': wa2[245:249].strip(),\r\n 'DSNY Snow Priority Code': wa2[249].strip(),\r\n 'Hurricane Evacuation Zone (HEZ)': wa2[260:262].strip(),\r\n 'Spatial Coordinates of Segment': {'X Coordinate, Low Address End': wa2[313:320].strip(),\r\n 'Y Coordinate, Low Address End': wa2[320:327].strip(),\r\n 'Z Coordinate, Low Address End': wa2[327:334].strip(),\r\n 'X Coordinate, High Address End': wa2[334:341].strip(),\r\n 'Y Coordinate, High Address End': wa2[341:348].strip(),\r\n 'Z Coordinate, High Address End': wa2[348:355].strip(),\r\n },\r\n 'Roadway Type': wa2[444:446].strip(),\r\n 'Bike Lane': wa2[486].strip(),\r\n 'NTA Name': wa2[553: 628].strip(),\r\n 'USPS Preferred City Name': wa2[628:653].strip(),\r\n 'Latitude': wa2[653:662].strip(),\r\n 'Longitude': wa2[662: 673].strip(),\r\n 'Borough Block Lot (BBL)': {'Borough code': wa2[1533].strip(),\r\n 'Tax Block': wa2[1534:1539].strip(),\r\n 'Tax Lot': wa2[1539:1543].strip(),\r\n },\r\n 'Building Identification Number (BIN) of Input Address or NAP': wa2[1581:1588].strip(),\r\n 'X-Y Coordinates of Lot Centroid': wa2[1699:1713].strip(),\r\n 'Spatial X': wa2[125:132].strip(),\r\n 'Spatial Y': wa2[132:139].strip(),\r\n 'Message': wa1[579:659].strip(),\r\n }\r\n return output", "def exon_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n exonpos = defline[1:].split(' ')[1]\n seqs[exonpos] = seq\n\n rnaid_to_accession = dict()\n reported_exons = {}\n exons, cdss = [], {}\n start, stop = None, None\n moltypes = ['mRNA', 'tRNA', 'ncRNA', 'transcript', 'primary_transcript',\n 'V_gene_segment', 'D_gene_segment', 'J_gene_segment',\n 'C_gene_segment']\n for entry in gff3:\n for moltype in moltypes:\n if ('\\t%s\\t' % moltype) in entry:\n accession = re.search(r'accession=([^;\\n]+)', entry).group(1)\n tid = re.search(r'ID=([^;\\n]+)', entry).group(1)\n rnaid_to_accession[tid] = accession\n\n if '\\texon\\t' in entry:\n exons.append(entry)\n elif '\\tCDS\\t' in entry:\n fields = entry.split('\\t')\n pos = '%s_%s-%s%s' % (fields[0], fields[3], fields[4], fields[6])\n cdss[pos] = entry\n elif '\\tstart_codon\\t' in entry:\n start = entry\n elif '\\tstop_codon\\t' in entry:\n stop = entry\n elif entry.startswith('###'):\n if len(exons) == 0:\n continue\n xcept = False\n for exonpos in cdss:\n if ';exception=ribosomal slippage' in cdss[exonpos]:\n xcept = True\n if xcept:\n exons, cdss = [], {}\n start, stop = None, None\n continue\n assert start, 'No start codon for exon(s): %s' % exons[0]\n assert stop, 'No stop codon for exon(s): %s' % exons[0]\n for exon in exons:\n fields = exon.split('\\t')\n assert len(\n fields) == 9, 'entry does not have 9 fields: %s' % exon\n mrnaid = re.search(r'Parent=([^;\\n]+)', fields[8]).group(1)\n exonpos = '%s_%s-%s%s' % (fields[0],\n fields[3], fields[4], fields[6])\n if exonpos in reported_exons:\n continue\n exonlength = int(fields[4]) - int(fields[3]) + 1\n exonseq = seqs[exonpos]\n assert len(exonseq) == exonlength, \\\n 'exon \"%s\": length mismatch; gff=%d, fa=%d' % (\n exonpos, exonlength, len(exonseq))\n gccontent = gc_content(exonseq)\n gcskew = gc_skew(exonseq)\n ncontent = n_content(exonseq)\n context = exon_context(exon, start, stop)\n phase = None\n remainder = None\n if context == 'cds':\n cexon = cdss[exonpos]\n phase = int(cexon.split('\\t')[7])\n remainder = (exonlength - phase) % 3\n values = '%s %s %d %.3f %.3f %.3f %s %r %r' % (\n exonpos, rnaid_to_accession[mrnaid], exonlength, gccontent,\n gcskew, ncontent, context, phase, remainder)\n reported_exons[exonpos] = 1\n yield values.split(' ')\n exons, cdss = [], {}\n start, stop = None, None", "def test_to_dict():\n from matdb.utility import _set_config_paths\n\n _set_config_paths(\"AgPd_Enumerated\", '.')\n \n atSi = Atoms(\"Si8\",positions=[[0,0,0],[0.25,0.25,0.25],[0.5,0.5,0],[0.75,0.75,0.25],\n [0.5,0,0.5],[0.75,0.25,0.75],[0,0.5,0.5],[0.25,0.75,0.75]],\n cell=[5.43,5.43,5.43])\n\n kwargs = {}\n args = []\n\n calc = Aflow(atSi, '$control$', '$control$', 0, *args, **kwargs)\n out = {\"folder\":'$control$', \"ran_seed\":0, \"contr_dir\":'$control$',\n \"kwargs\":{\"entry\":None}, \"args\":[]}\n assert compare_nested_dicts(calc.to_dict(),out)", "def parse_data(filename):\n ta = {\n \"name\": \"\", # TA full name\n \"total \": 0, # total contract hours\n \"detailed\": [], # materials for the first page\n \"summary\": {} # materials for the second page\n }\n category = None\n total_hours = 0\n\n for line in open(filename):\n line = line.strip()\n if not line or line.startswith(\"====\") or line.startswith(\"TA INFO\"):\n # skip these lines. they are not meaningful\n continue\n\n key, info = line.split(\":\")\n key = key.strip()\n\n if key in DDAH_CATEGORIES:\n category = key\n\n if key.startswith(\"Full Name\"):\n ta[\"name\"] = info.strip()\n elif key.startswith(\"Total contract\"):\n ta[\"total\"] = float(info.strip().strip(\"_\"))\n else:\n # convert info -> hours, filter out zero hours:\n info = info.strip(\"_\")\n if not info:\n continue\n hours = float(info)\n if hours < 0.1:\n continue\n\n # detailed hours:\n ta[\"detailed\"].append((key, category, hours),)\n # summary hours:\n assert category is not None\n if category not in ta[\"summary\"]:\n ta[\"summary\"][category] = 0\n ta[\"summary\"][category] += hours\n # total hours:\n total_hours += hours\n\n # verify that the hours add up\n if total_hours != ta[\"total\"]:\n raise ValueError(\"Total contract hours is {0} but {1} hours are assigned\".format(ta[\"total\"], total_hours))\n # verify that there are at most 12 rows in ta[\"detailed\"]\n if len(ta[\"detailed\"]) > 12:\n raise ValueError(\"DDAH form supports at most 12 rows of detailed activity, but there are {0}\".format(len(ta[\"detailed\"])))\n\n return ta", "def convert_allele_to_ag(allele):\n\tallele_dict = {}\n\tallele = allele.rstrip(\"p P g G\")\n\tif allele in allele_to_ag_dict:\t\n\t\tag = allele_to_ag_dict[allele][0]\n\t\trule = allele_to_ag_dict[allele][1]\n\t\tbw4_6 = allele_to_ag_dict[allele][2]\n\t\n\telse:\n\t\tag = \"NA\"\n\tallele_dict[allele] = [ag, bw4_6]\n\treturn allele_dict", "def test_parse():\n first = parse_formula(\"PO4H2(CH2)12CH3\")\n assert first == {\"P\":1, \"O\":4, \"H\":29, \"C\":13}\n\n second = parse_formula(\"H2O\")\n assert second == {\"H\":2, \"O\":1}", "def parser_headings(xmlthing):\n match = RE_HEADINGS.search(u(xmlthing))\n if match:\n try:\n buf = match.group()\n flds = RE_FLDS.findall(buf)\n vals = RE_VALS.findall(buf)\n return dict(zip(flds, vals))\n except Exception: # pragma: no cover\n LOGGER.debug(\"Bad parsing of 'headings' for 'oclc' service!\")\n return {} # pragma: no cover", "def process_hmmer_output(file_name):\n dict = {}\n with file_open(file_name, \"r\") as f:\n try:\n for l in f:\n if not l.startswith(\"#\"):\n field = l.split()\n if not field[0] in dict:\n dict[field[0]] = [field[2], field[17], field[18]]\n else:\n if int(dict[field[0]][1]) > int(field[17]):\n dict[field[0]][1] = field[17]\n if int(dict[field[0]][2]) < int(field[18]):\n dict[field[0]][2] = field[18]\n except IndexError:\n\t return {}\n return dict", "def read_h5(self):\n infile = h5py.File(self.inf_name,'r')\n\n vardict = self.labdict\n #store data with the correct labels\n for k in infile['plasma/1d'].keys():\n try:\n vardict[k] = infile[self.labdict[k]].value\n except:\n vardict[k] = []\n\n vardict['a_ions']=infile['/plasma/anum'].value\n vardict['znum']=infile['/plasma/znum'].value\n \n\n self.rho_in = vardict['rho']\n self._rho_vol = infile['distributions/rhoDist/abscissae/dim1'].value[1:]\n self._volumes = infile['distributions/rhoDist/shellVolume'].value\n self.nrho_in = np.size(self.rho_in)\n\n if vardict['a_ions'][0]!='/':\n self.nspec = len(vardict['a_ions'])\n else:\n self.nspec = vardict['ni'].shape[1]\n print(\"Number of ions: \", self.nspec)\n if len(vardict['a_ions'])!=len(vardict['znum']):\n print(\"ERROR! array of A and Z don't have the same length\")\n\n self.A = vardict['a_ions']\n self.Z = vardict['znum']\n self.nion = self.nspec\n \n self.te_in = vardict['te'][:]\n self.ne_in = vardict['ne'][:]\n self.ti_in = vardict['ti'][:]\n ni1_in = vardict['ni'][:,0]\n self.ni_in = np.zeros((self.nion, self.nrho_in),dtype=float)\n self.ni_in[0,:] = ni1_in\n if self.nion==2:\n ni2_in = vardict['ni'][:,1]\n self.ni_in[1,:] = ni2_in\n elif self.nion==3:\n ni2_in = vardict['ni'][:,1]\n ni3_in = vardict['ni'][:,2]\n self.ni_in[1,:] = ni2_in\n self.ni_in[2,:] = ni3_in\n\n try:\n self.vt_in = vardict['vtor']\n except:\n self.vt_in = np.zeros(self.nrho_in,dtype=float)\n\n try:\n self.zeff_in = vardict['zeff'][:]\n except:\n self.zeff_in = np.zeros(self.nrho_in,dtype=float)\n\n self.ni = np.zeros((self.nion, self.nrho),dtype = float)\n self.spline()", "def parse_input(parts):\n\n \"\"\"\n Begin in state A.\n Perform a diagnostic checksum after 6 steps.\n \"\"\"\n turing = {}\n metadata_part = parts[0].split('\\n')\n start_state = metadata_part[0][-2]\n checksum_after = 12302209\n\n metadata = (start_state, checksum_after)\n\n for part in parts[1:]:\n lines = part.split('\\n')\n state = lines[0][-2]\n state_num = int(lines[1][-2])\n # print(\"PART N: \", state, state_num)\n # - Write the value X.\n write_val = int(lines[2][-2])\n move = '>' if lines[3][-6:-1] == 'right' else '<'\n next_state = lines[4][-2]\n turing[(state, state_num)] = (write_val, move, next_state)\n\n state_num = int(lines[5][-2])\n # print(\"PART N: \", state, state_num)\n write_val = int(lines[6][-2])\n move = '>' if lines[7][-6:-1] == 'right' else '<'\n next_state = lines[8][-2]\n turing[(state, state_num)] = (write_val, move, next_state)\n\n # print(turing)\n\n return turing, metadata", "def aluminum_hexathiohypodiphosphate():\n\n positions = [[0.000000, 0.000000, 0.000000],\n [0.500000, 0.000000, 0.500000],\n [0.000000, 0.500000, 0.000000],\n [0.000000, 0.000000, 0.500000],\n [0.197847, 0.276435, 0.101916],\n [0.197847, 0.723565, 0.898084],\n [0.802153, 0.276435, 0.898084],\n [0.802153, 0.723565, 0.101916],\n [0.776404, 0.800507, 0.601208],\n [0.776404, 0.199493, 0.398792],\n [0.223596, 0.800507, 0.398792],\n [0.223596, 0.199493, 0.601208]]\n\n species = ['Al','Al','P','P','S','S','S','S','S','S','S','S']\n\n bravais = 'orthorhombic'\n\n space_group = 16\n lattice_parameters = {'a': Set(5.71230345, 'angstrom'),\n 'b': Set(5.71644625, 'angstrom'),\n 'c': Set(11.46678755,'angstrom')}\n data = {'fractional': positions,\n 'species': species,\n 'lattice_parameters': lattice_parameters,\n 'space_group': ('', space_group),\n 'n_atoms': len(species)}\n\n return data", "def setup_parser(self) -> Dict[str, Any]:\n\n\n # % GALAT - SPP Single Point Positioning\n # % -------------------------------------\n # % Processing Option\n # % ------------------\n # % GNSS system(s) : GALILEO\n # % Orbit type : Broadcast - INAV\n # % Solution type : SPP\n # % Frequency : E1\n # % Elevation mask : 5.0 deg\n # % Time interval : 30.0 s\n # % Ionosphere opt : NeQuick-G\n # % Troposhere opt : GMF with GPT\n # % Obs start : 2020/01/04 00:00:00.0 GPST (week 2086 518400.0s)\n # % Obs end : 2020/01/04 23:59:30.0 GPST (week 2086 604770.0s)\n # % Epoch expected : 2880\n # % Epoch have : 2880\n # %\n # % Input file(s) : KOUG00GUF_R_20200040000_01D_30S_MO.rnx\n # % Input file(s) : CNES0030.20L\n # % Input file(s) : CNES0040.20L\n # % Input file(s) : igs14.atx\n # %\n # % RINEX header info\n # % ------------------\n # % Marker : KOUG 97301M402\n # % Receiver T/V/# : SEPT POLARX5TR 5.3.0 17323022503\n # % Antenna T/ /# : LEIAR25.R3 LEIT 10180007\n # % Position XYZ : 3855263.3407 -5049731.9986 563040.4252\n # % Antenna H/E/N : 0.0000 0.0000 0.0000\n self._parse_header()\n\n # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+--\n # 2020/01/04 00:00:00 5.098466365 -52.639742999 106.8901 -0.603 -0.821 -0.349 1.018 0.349 \n # 2020/01/04 00:00:30 5.098466094 -52.639742684 107.4962 -0.633 -0.856 0.257 1.065 0.257 \n # 2020/01/04 00:01:00 5.098466030 -52.639740961 107.6125 -0.640 -1.047 0.373 1.228 0.373 \n return dict(\n names=(\n \"yyyymmdd\", \n \"hhmmss\", \n \"latitude\", \n \"longitude\", \n \"height\", \n \"dlatitude\", \n \"dlongitude\", \n \"dheight\",\n \"hpe\",\n \"vpe\",\n \"site_vel_3d\",\n \"pdop\",\n \"num_satellite_available\",\n \"num_satellite_used\",\n ),\n comments=\"%\",\n delimiter=(10, 9, 15, 15, 10, 9, 9, 9, 9, 9, 9, 6, 4, 4),\n dtype=(\"U10\", \"U9\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\"),\n autostrip=True,\n )", "def read_uai_file(filename):\n uai_info = {}\n\n with open(filename, \"r\") as file_:\n lines = file_.readlines()\n lines = [l.replace(\"\\n\", \"\") for l in lines]\n lines = [l.replace(\"\\t\", \" \") for l in lines]\n\n ### Exclude empty lines\n lines = [l for l in lines if l != '']\n\n uai_info[\"network_type\"] = lines[0]\n uai_info[\"n_variables\"] = int(lines[1])\n\n ### Variables\n uai_info[\"variables\"] = {}\n for i in range(uai_info[\"n_variables\"]):\n uai_info[\"variables\"][i + 1] = {}\n\n uai_info[\"cardinalities\"] = [int(c) for c in lines[2].split(\" \")]\n\n ### Cliques\n uai_info[\"n_cliques\"] = int(lines[3])\n uai_info[\"cliques\"] = {}\n for i in range(uai_info[\"n_cliques\"]):\n uai_info[\"cliques\"][i] = {}\n\n uai_info[\"cliques\"][i][\"vars\"] = [int(c) for c in lines[3 + (i + 1)].split(\" \")][1:]\n uai_info[\"cliques\"][i][\"potential\"] = [c for c in lines[3 + uai_info[\"n_cliques\"] + (2 * i + 2)].split(\" \")]\n\n return uai_info" ]
[ "0.63681", "0.6347017", "0.5696547", "0.5572677", "0.5515874", "0.5506609", "0.5462382", "0.54618406", "0.54504764", "0.54472536", "0.53847593", "0.5374126", "0.5333519", "0.5292556", "0.5265104", "0.52295166", "0.51738256", "0.51727885", "0.51565576", "0.51344913", "0.51198894", "0.511759", "0.5110856", "0.50902426", "0.50819385", "0.5054588", "0.5046962", "0.5046347", "0.50290436", "0.50249267" ]
0.81904477
0
aut2BA(aut) > string Serializes an automaton as Rabit's BA file.
def aut2BA(aut): res = "" for st in aut["initial"]: res += st + "\n" for trans in aut["transitions"]: src, symb, tgt = trans res += "{},{}->{}".format(symb, src, tgt) + "\n" for st in aut["final"]: res += st + "\n" return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_string(representation):\r\n auto = Automaton()\r\n lines = [line.strip() for line in representation.split('\\n')]\r\n auto.transitions = [Transition(*line.split()) for line in lines[:-1]]\r\n auto.final_states = lines[-1].split()[1:]\r\n auto.start_state = lines[-1].split()[0]\r\n\r\n return auto", "def aut2HOA(aut):\n state_cnt = 0\n state_transl_dict = dict()\n\n ###########################################\n def state_transl(state):\n \"\"\"state_transl(state) -> int\n\n Translates state names into numbers.\n \"\"\"\n nonlocal state_cnt\n nonlocal state_transl_dict\n\n if state not in state_transl_dict.keys():\n state_transl_dict[state] = state_cnt\n state_cnt += 1\n\n return str(state_transl_dict[state])\n ###########################################\n\n symb_cnt = 0\n symb_transl_dict = dict()\n\n ###########################################\n def symb_transl(symb):\n \"\"\"symb_transl(symb) -> int\n\n Translates symbol names into numbers.\n \"\"\"\n nonlocal symb_cnt\n nonlocal symb_transl_dict\n\n if symb not in symb_transl_dict.keys():\n symb_transl_dict[symb] = symb_cnt\n symb_cnt += 1\n\n return str(symb_transl_dict[symb])\n ###########################################\n\n # count states and transitions\n for st in aut[\"initial\"]:\n state_transl(st)\n for trans in aut[\"transitions\"]:\n src, symb, tgt = trans\n state_transl(src)\n symb_transl(symb)\n state_transl(tgt)\n for st in aut[\"final\"]:\n state_transl(st)\n\n res = \"\"\n res += \"HOA: v1\\n\"\n res += \"States: {}\\n\".format(state_cnt)\n\n res += \"Start: \"\n for state in aut[\"initial\"]:\n res += state_transl(state) + \" \"\n res += \"\\n\"\n\n # magic setting for Buchi condition\n res += \"acc-name: Buchi\\n\"\n res += \"Acceptance: 1 Inf(0)\\n\"\n\n # atomic propositions\n res += \"AP: {}\".format(symb_cnt)\n for i in range(symb_cnt):\n for key in symb_transl_dict:\n if symb_transl_dict[key] == i:\n res += \" \\\"{}\\\"\".format(key)\n res += \"\\n\"\n\n res += \"--BODY--\\n\"\n for (name, num) in state_transl_dict.items():\n res += \"State: {}\".format(num)\n if name in aut[\"final\"]:\n res += \" { 0 }\"\n res += \"\\n\"\n\n for trans in aut[\"transitions\"]:\n src, symb, tgt = trans\n if src == name:\n res += \" [\"\n for i in range(symb_cnt):\n if i != 0:\n res += \" & \"\n if symb_transl_dict[symb] != i:\n res += \"!\"\n res += str(i)\n\n res += \"] {}\\n\".format(state_transl(tgt))\n res += \"--END--\\n\"\n\n return res", "def make_struc(alat,atom,clat):\r\n if atom == 'Cu' or atom == 'Au':\r\n fcccell = bulk(atom, 'fcc', a=alat)\r\n write('fcc.cif', fcccell)\r\n print(fcccell, fcccell.get_atomic_numbers())\r\n structure = Struc(ase2struc(fcccell))\r\n elif atom == 'CuAu':\r\n lattice = alat * numpy.identity(3)\r\n lattice[2][2] = clat\r\n symbols = ['Cu','Au']\r\n sc_pos = [[0,0,0],[0.5,0.5,0.5]]\r\n bctcell = Atoms(symbols=symbols, scaled_positions=sc_pos, cell=lattice)\r\n write('bct.cif', bctcell)\r\n print(bctcell, bctcell.get_atomic_numbers())\r\n structure = Struc(ase2struc(bctcell))\r\n # check how your cell looks like\r\n print(structure.species)\r\n return structure", "def convert_amber_atomtype_to_rosetta_atomtype(self):\n\n tmpfile = open(\"tmp.mol2\", 'w')\n with open(\"ligand_am1_bcc.mol2\",'r') as f:\n atoms = False\n\n for line in f:\n\n print \"ATOM\", line.find(\"@<TRIPOS>ATOM\"),line\n print \"BOND\", line.find(\"@<TRIPOS>BOND\"),line\n\n if ( len(line) > 13 and line.find(\"@<TRIPOS>ATOM\") >-1.0):\n atoms = True\n\n elif ( len(line) > 13 and line.find(\"@<TRIPOS>BOND\") >-1.0):\n atoms = False\n\n elif( atoms == True and len(line) > 75 ):\n tmp_characters = line[47]+\".\"+line[48]\n line = line[0:47]+tmp_characters+line[50:]\n\n tmpfile.write(line)\n tmpfile.close()", "def aut2GFF(aut):\n state_cnt = 0\n state_transl_dict = dict()\n\n ###########################################\n def state_transl(state):\n \"\"\"state_transl(state) -> int\n\n Translates state names into numbers.\n \"\"\"\n nonlocal state_cnt\n nonlocal state_transl_dict\n\n if state not in state_transl_dict.keys():\n state_transl_dict[state] = state_cnt\n state_cnt += 1\n\n return str(state_transl_dict[state])\n ###########################################\n\n res = \"\"\n res += \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\" standalone=\\\"no\\\"?>\\n\"\n res += \"<structure label-on=\\\"transition\\\" type=\\\"fa\\\">\\n\"\n\n # get the alphabet\n alphabet = set()\n states = set()\n for trans in aut[\"transitions\"]:\n src, symb, tgt = trans\n alphabet.add(symb)\n states.add(src)\n states.add(tgt)\n for st in aut[\"initial\"]:\n states.add(st)\n for st in aut[\"final\"]:\n states.add(st)\n\n res += \"<alphabet type=\\\"classical\\\">\\n\"\n for symb in alphabet:\n res += \"<symbol>\" + symb + \"</symbol>\\n\"\n res += \"</alphabet>\\n\"\n\n res += \"<stateset>\\n\"\n for st in states:\n res += \"<state sid=\\\"\" + state_transl(st) + \"\\\"></state>\\n\";\n res += \"</stateset>\\n\"\n\n res += \"<acc type=\\\"buchi\\\">\\n\"\n for st in aut[\"final\"]:\n res += \"<stateID>\" + state_transl(st) + \"</stateID>\\n\"\n res += \"</acc>\\n\"\n\n res += \"<initialStateSet>\\n\"\n for st in aut[\"initial\"]:\n res += \"<stateID>\" + state_transl(st) + \"</stateID>\\n\"\n res += \"</initialStateSet>\\n\";\n\n res += \"<transitionset>\\n\"\n tid = 0\n for trans in aut[\"transitions\"]:\n src, symb, tgt = trans\n res += \"<transition tid=\\\"\" + str(tid) + \"\\\">\\n\"\n tid += 1\n res += \"<from>\" + state_transl(src) + \"</from>\\n\" +\\\n \"<to>\" + state_transl(tgt) + \"</to>\\n\" + \\\n \"<read>\" + symb + \"</read>\\n\"\n res += \"</transition>\\n\"\n res += \"</transitionset>\\n\"\n\n res += \"</structure>\\n\"\n\n return res", "def to_string(fasta):\n\n # remove header\n fasta_nh = fasta.readlines()[1:]\n\n # make into single string\n fasta_str = ''.join(fasta_nh)\n\n # remove newline characters\n seq = fasta_str.replace(\"\\n\", \"\")\n\n return seq", "def single_string_to_actg(bin_str: str) -> str:\r\n y = \"\"\r\n i = 1\r\n while (1):\r\n if i >= len(bin_str):\r\n break\r\n if bin_str[i - 1] == '0' and bin_str[i] == '0':\r\n y += \"A\"\r\n if bin_str[i - 1] == '0' and bin_str[i] == '1':\r\n y += \"C\"\r\n if bin_str[i - 1] == '1' and bin_str[i] == '0':\r\n y += \"G\"\r\n if bin_str[i - 1] == '1' and bin_str[i] == '1':\r\n y += \"T\"\r\n i = i + 2\r\n return y", "async def generate_otu_fasta(db, otu_id):\n\n otu = await db.otus.find_one(otu_id, [\"name\", \"isolates\"])\n\n if not otu:\n raise virtool.errors.DatabaseError(\"OTU does not exist\")\n\n fasta = list()\n\n for isolate in otu[\"isolates\"]:\n async for sequence in db.sequences.find({\"otu_id\": otu_id, \"isolate_id\": isolate[\"id\"]}, [\"sequence\"]):\n fasta.append(format_fasta_entry(\n otu[\"name\"],\n virtool.otus.format_isolate_name(isolate),\n sequence[\"_id\"],\n sequence[\"sequence\"]\n ))\n\n fasta = \"\\n\".join(fasta)\n\n return format_fasta_filename(otu[\"name\"]), fasta", "def generate_aa_sequence(chain):\n\n chain.strip()\n chain_list = chain.split(' ')\n # TODO: What if aa is not in the lookup\n seq = [IUPAC_AA_codes[aa] for aa in chain_list]\n return ''.join(seq)", "def test_single_file_cup_string(self):\r\n # convert_biom using otu_table w/o leading #\r\n bt_string = (\r\n '{\"rows\": [{\"id\": \"1\", \"metadata\": null}, {\"id\": \"2\",'\r\n '\"metadata\": null}, {\"id\": \"3\", \"metadata\": null}, {\"id\": \"4\", '\r\n '\"metadata\": null}, {\"id\": \"5\", \"metadata\": null}], \"format\": '\r\n '\"Biological Observation Matrix 0.9.1-dev\", \"data\": [[0, 0, 3.0], '\r\n '[0, 1, 4.0], [1, 0, 2.0], [1, 1, 5.0], [2, 0, 1.0], [2, 1, 2.0], '\r\n '[3, 1, 4.0], [4, 0, 1.0]], \"columns\": [{\"id\": \"S1\", \"metadata\": '\r\n 'null}, {\"id\": \"S2\", \"metadata\": null}], \"generated_by\": '\r\n '\"BIOM-Format 0.9.1-dev\", \"matrix_type\": \"sparse\", \"shape\": '\r\n '[5, 2], \"format_url\": \"http://biom-format.org\", \"date\": '\r\n '\"2012-05-04T09:28:28.247809\", \"type\": \"OTU table\", \"id\": null, '\r\n '\"matrix_element_type\": \"float\"}')\r\n\r\n with open(self.tmp_file, 'w') as fh:\r\n fh.write(bt_string)\r\n\r\n single_file_cup(self.tmp_file, 'lladser_pe,lladser_ci',\r\n self.tmp_outfile, r=4)\r\n\r\n # Not much testing here, just make sure we get back a (formatted)\r\n # matrix with the right dimensions\r\n with open(self.tmp_outfile, 'U') as out_f:\r\n observed = out_f.readlines()\r\n self.assertEqual(len(observed), 3)\r\n self.assertEqual(len(observed[1].split('\\t')), 4)", "def output_aa_string(residues):\n # Dictionary of 3 letter to 1 letter AA conversion\n aa_dict = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',\n 'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',\n 'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',\n 'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}\n\n s = ''\n for res in residues:\n s = s + aa_dict.get(res.type)\n return s", "def mol_to_cbor(mol: masm.Molecule) -> str:\n serializer = masm.JsonSerialization\n cbor_format = serializer.BinaryFormat.CBOR\n serialization = serializer(mol)\n cbor_binary = serialization.to_binary(cbor_format)\n return serializer.base_64_encode(cbor_binary)", "def convertion_binaire_arbre(self):\r\n binary_code = self.root.conversion_binaire('')\r\n binary_dict = {}\r\n binary_code = binary_code.strip().split(\"\\n\")\r\n for element in binary_code:\r\n binary_dict[element.split(\":\")[0]] = element.split(\":\")[1]\r\n return binary_dict", "def utf8_to_binary() :\n dico_binary, comp_seq, file_comp = read_compressed_file()\n \n #for each items of the sequence convert it in binary string on 8 bits\n bin_str = \"\"\n for value in comp_seq:\n code = ord(value)\n bin_str += '{:08b}'.format(code)\n \n #remove the number of zeroes added \n \n added = int(dico_binary[\"add\"])\n #if the padding is equal to 0, don't cut anathing from the sequence\n if added == 0: \n bin_seq = bin_str\n else: \n bin_seq = bin_str[:-added]\n \n return bin_seq, dico_binary, comp_seq, file_comp", "def test_to_rna(self):\n r = self.DNA(\"UCA\")\n self.assertEqual(str(r), \"TCA\")\n self.assertEqual(str(r.to_rna()), \"UCA\")", "def test_to_fasta(self):\n even = \"TCAGAT\"\n odd = even + \"AAA\"\n even_dna = self.SequenceClass(even, name=\"even\")\n odd_dna = self.SequenceClass(odd, name=\"odd\")\n self.assertEqual(even_dna.to_fasta(), \">even\\nTCAGAT\\n\")\n # set line wrap to small number so we can test that it works\n self.assertEqual(even_dna.to_fasta(block_size=2), \">even\\nTC\\nAG\\nAT\\n\")\n self.assertEqual(odd_dna.to_fasta(block_size=2), \">odd\\nTC\\nAG\\nAT\\nAA\\nA\\n\")\n # check that changing the linewrap again works\n self.assertEqual(even_dna.to_fasta(block_size=4), \">even\\nTCAG\\nAT\\n\")", "def aa(seq):\n global codontable\n seq = seq.upper()\n if codontable is None:\n # TODO: figure out the right place for the pre-computed information here\n bases = ['T', 'C', 'A', 'G']\n codons = [a+b+c for a in bases for b in bases for c in bases]\n codons = codons + list(map(lambda x: x.lower(), codons))\n amino_acids = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'\n amino_acids = amino_acids + amino_acids.lower()\n codontable = dict(zip(codons, amino_acids))\n res = ''\n for i in range(0, len(seq) - 2, 3):\n res += codontable[seq[i:(i+3)]]\n return res", "def coding_strand_to_AA(dna):\n #inital conditions\n protein = ''\n i = 0\n\n #for the length of DNA, translate each codon in an ORF to an amino acid\n while i < (len(dna)-2):\n codon = dna[i:i+3] \n amino_acid = aa_table[codon]\n protein= protein + amino_acid\n i += 3\n\n #return the string of amino acids\n return protein", "def translateORFtoAAs(self,sequence,number):\r\n AAStringfromORF = str()\r\n startingM = int()\r\n for i in range(0,len(sequence)-2,3):\r\n if sequence[i:i+3] != \"AUG\":\r\n pass\r\n else:\r\n startingM = i\r\n for i in range(startingM,len(sequence)-2,3):\r\n x = self.tabletoTranslate(sequence[i:i+3])\r\n AAStringfromORF+=x\r\n if x == \"-\":\r\n self.listofSequences.append(AAStringfromORF.rstrip(\"-\").lstrip().rstrip())\r\n AAStringfromORF = str()\r\n break", "def convert_to_binary(seqid_file_in, seqid_file_out):\n subprocess.run(\n \"blastdb_aliastool -seqid_file_in %s -seqid_file_out %s\"\n % (seqid_file_in, seqid_file_out),\n shell=True,\n env={'PATH': BLAST_PATH}\n )", "def test_to_fasta(self):\n even = \"TCAGAT\"\n odd = even + \"AAA\"\n even_dna = self.SEQ(even, name=\"even\")\n odd_dna = self.SEQ(odd, name=\"odd\")\n self.assertEqual(even_dna.to_fasta(), \">even\\nTCAGAT\\n\")\n # set line wrap to small number so we can test that it works\n self.assertEqual(even_dna.to_fasta(block_size=2), \">even\\nTC\\nAG\\nAT\\n\")\n self.assertEqual(odd_dna.to_fasta(block_size=2), \">odd\\nTC\\nAG\\nAT\\nAA\\nA\\n\")\n # check that changing the linewrap again works\n self.assertEqual(even_dna.to_fasta(block_size=4), \">even\\nTCAG\\nAT\\n\")", "def make_fasta(data):\n result = data\n if not data.startswith(\">\"):\n result = \"\"\n cnt = 1\n for line in data.split('\\n'):\n if line:\n result += \">seq{}\\n\".format(cnt)\n result += line\n result += \"\\n\"\n cnt += 1\n return result.strip()", "def atob(b):\n return base64.b64decode(b.encode()).decode()", "def migrate(belstr: str) -> str:\n\n bo.parse(belstr)\n\n return migrate_ast(bo.ast).to_string()", "def s2b (s):\n return s.encode()", "def s2b(s):\n return s.encode('utf-8')", "def test_to_dna(self):\n r = self.RNA(\"TCA\")\n self.assertEqual(str(r), \"UCA\")\n self.assertEqual(str(r.to_dna()), \"TCA\")", "def parseBA(fd):\n aut = dict()\n first_line = fd.readline().strip()\n aut[\"initial\"] = [first_line]\n aut[\"transitions\"] = []\n aut[\"final\"] = []\n\n while True:\n line = fd.readline()\n if not line:\n return aut\n\n line = line.strip()\n if line == \"\":\n continue\n\n match = re.match(r'^(?P<state>[^-,>]+)$', line)\n if match:\n aut[\"final\"].append(match.group(\"state\"))\n continue\n\n match = re.match(r'^(?P<symb>[^-,>]+),(?P<src>[^-,>]+)->(?P<tgt>[^-,>]+)$',\n line)\n if match:\n symb = match.group(\"symb\")\n src = match.group(\"src\")\n tgt = match.group(\"tgt\")\n aut[\"transitions\"].append((src, symb, tgt))\n continue\n\n raise Exception(\"Invalid format: \" + line)", "def binary_to_seq():\n bin_seq, dico_binary, comp_seq, file_comp = utf8_to_binary()\n \n #for each binary value associate the corresponding letter (key) \n #according to the dictionnary \n dna_seq = \"\"\n reading_binary = \"\"\n for value in bin_seq:\n reading_binary += value\n for letter, code in dico_binary.items():\n if code == reading_binary:\n dna_seq += letter\n reading_binary = \"\"\n break\n \n #print(dna_seq, bin_seq, comp_seq, file_comp)\n return dna_seq, bin_seq, comp_seq, file_comp", "def test_serialize(self):\n r = self.RNA(\"ugagg\")\n assert dumps(r)" ]
[ "0.5600367", "0.5583563", "0.52981955", "0.5286957", "0.52553785", "0.5212133", "0.51995844", "0.5184662", "0.5136247", "0.50710785", "0.5066019", "0.50640976", "0.50595695", "0.5055324", "0.50280625", "0.5026861", "0.5022681", "0.50217015", "0.5005895", "0.49576405", "0.49455854", "0.49265665", "0.4911472", "0.48896503", "0.48835608", "0.4881782", "0.48785216", "0.48736295", "0.48613676", "0.48487192" ]
0.74356085
0
aut2HOA(aut) > string Serializes an automaton as the Hanoi Omega Automata file format.
def aut2HOA(aut): state_cnt = 0 state_transl_dict = dict() ########################################### def state_transl(state): """state_transl(state) -> int Translates state names into numbers. """ nonlocal state_cnt nonlocal state_transl_dict if state not in state_transl_dict.keys(): state_transl_dict[state] = state_cnt state_cnt += 1 return str(state_transl_dict[state]) ########################################### symb_cnt = 0 symb_transl_dict = dict() ########################################### def symb_transl(symb): """symb_transl(symb) -> int Translates symbol names into numbers. """ nonlocal symb_cnt nonlocal symb_transl_dict if symb not in symb_transl_dict.keys(): symb_transl_dict[symb] = symb_cnt symb_cnt += 1 return str(symb_transl_dict[symb]) ########################################### # count states and transitions for st in aut["initial"]: state_transl(st) for trans in aut["transitions"]: src, symb, tgt = trans state_transl(src) symb_transl(symb) state_transl(tgt) for st in aut["final"]: state_transl(st) res = "" res += "HOA: v1\n" res += "States: {}\n".format(state_cnt) res += "Start: " for state in aut["initial"]: res += state_transl(state) + " " res += "\n" # magic setting for Buchi condition res += "acc-name: Buchi\n" res += "Acceptance: 1 Inf(0)\n" # atomic propositions res += "AP: {}".format(symb_cnt) for i in range(symb_cnt): for key in symb_transl_dict: if symb_transl_dict[key] == i: res += " \"{}\"".format(key) res += "\n" res += "--BODY--\n" for (name, num) in state_transl_dict.items(): res += "State: {}".format(num) if name in aut["final"]: res += " { 0 }" res += "\n" for trans in aut["transitions"]: src, symb, tgt = trans if src == name: res += " [" for i in range(symb_cnt): if i != 0: res += " & " if symb_transl_dict[symb] != i: res += "!" res += str(i) res += "] {}\n".format(state_transl(tgt)) res += "--END--\n" return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aut2BA(aut):\n res = \"\"\n for st in aut[\"initial\"]:\n res += st + \"\\n\"\n for trans in aut[\"transitions\"]:\n src, symb, tgt = trans\n res += \"{},{}->{}\".format(symb, src, tgt) + \"\\n\"\n for st in aut[\"final\"]:\n res += st + \"\\n\"\n\n return res", "async def generate_otu_fasta(db, otu_id):\n\n otu = await db.otus.find_one(otu_id, [\"name\", \"isolates\"])\n\n if not otu:\n raise virtool.errors.DatabaseError(\"OTU does not exist\")\n\n fasta = list()\n\n for isolate in otu[\"isolates\"]:\n async for sequence in db.sequences.find({\"otu_id\": otu_id, \"isolate_id\": isolate[\"id\"]}, [\"sequence\"]):\n fasta.append(format_fasta_entry(\n otu[\"name\"],\n virtool.otus.format_isolate_name(isolate),\n sequence[\"_id\"],\n sequence[\"sequence\"]\n ))\n\n fasta = \"\\n\".join(fasta)\n\n return format_fasta_filename(otu[\"name\"]), fasta", "def extract_ao_integrals(mol, prefix):\n\n with open(prefix + \"_nuc.txt\", \"w\") as f:\n f.write(\"%.18e\\n\" % mol.energy_nuc())\n\n np.savetxt(prefix + \"_ovl.txt\", mol.intor(\"int1e_ovlp\"))\n\n np.savetxt(prefix + \"_oei.txt\", mol.intor(\"int1e_kin\") + mol.intor(\"int1e_nuc\"))\n\n tei = mol.intor(\"int2e\")\n tei = np.reshape(tei,(2,2,2,2))\n\n\n with open(prefix + \"_tei.txt\", \"w\") as f:\n for i in range(tei.shape[0]):\n for j in range(tei.shape[1]):\n for k in range(tei.shape[2]):\n for l in range(tei.shape[3]):\n f.write(\"%4i %4i %4i %4i %.18e\\n\" % (i, j, k, l, tei[i,j,k,l]))", "def parseHOA(fd):\n aut = dict()\n aut[\"initial\"] = []\n aut[\"transitions\"] = []\n aut[\"final\"] = []\n\n aps = dict() # atomic propositions\n\n # reading header\n while True:\n line = fd.readline()\n if not line: # no body\n raise Exception(\"Missing body!\")\n line = line.strip()\n if line == \"\":\n continue\n if line == \"--BODY--\":\n break\n match = re.match(r'^(?P<key>[^:]+):\\s*(?P<value>.*)$', line)\n if not match:\n raise Exception(\"Invalid header format: {}\".format(line))\n\n # input sanity checks\n if match['key'] == \"acc-name\":\n if (match['value'] != \"Buchi\"):\n raise Exception(\"Not Buchi acceptance: {}\".format(match['value']))\n if match['key'] == \"Acceptance\":\n if (match['value'] != \"1 Inf(0)\"):\n raise Exception(\"Expected acceptance: \\\"1 Inf(0)\\\" Received: \\\"{}\\\"\".format(match['value']))\n\n # start state\n if match['key'] == \"Start\":\n aut[\"initial\"] = [match['value']]\n\n # atomic propositions\n if match['key'] == \"AP\":\n ap_ls = match['value'].split()\n aps_num = int(ap_ls[0])\n ap_ls = ap_ls[1:]\n cnt = 0\n for ap in ap_ls: # mam APs to numbers\n aps[cnt] = ap.strip(\"\\\"\")\n cnt += 1\n if cnt != aps_num:\n raise Exception(\"Invalid number of atomic propositions (does not match the declared number: {}\".format(line))\n\n # reading body\n state = None\n while True:\n line = fd.readline()\n if not line: # end of input\n raise Exception(\"Unexpected end of file\")\n line = line.strip()\n if line == \"\":\n continue\n if line == \"--END--\":\n break\n\n match = re.match(r'^State:\\s*(?P<state>\\d+)\\s*(?P<final>.+)?$', line)\n if not match:\n if state is None: # first state not declared\n raise Exception(\"Invalid beginning of the body: {}\".format(line))\n\n trans_match = re.match(r'\\[(?P<aps>[^\\]].*)\\]\\s*(?P<dst>\\d+)$', line)\n if not trans_match:\n raise Exception(\"Invalid transition: {}\".format(line))\n\n dst = trans_match['dst']\n\n str_aps = trans_match['aps']\n ls_str_aps = str_aps.split(\"&\")\n symb = None\n for one_ap in ls_str_aps:\n one_ap = one_ap.strip()\n ap_match = re.match(r'^(?P<neg>!)?\\s*(?P<ap>\\d+)$', one_ap)\n if not ap_match:\n raise Exception(\"Invalid AP: {}\".format(line))\n if not ap_match['neg']: # positive AP\n if symb is not None: # if other AP was positive\n raise Exception(\"More than one positive AP: {}\".format(line))\n\n symb_num = int(ap_match['ap'])\n symb = aps[symb_num]\n\n aut['transitions'].append((state, symb, dst))\n\n # continue in the transition of the current state\n else: # if new state declared\n state = int(match['state'])\n if match['final']:\n aut['final'].append(str(state))\n\n return aut", "def kata2hira(kata):\n hira = [ hiragana_value(x) for x in kata.decode(\"utf-8\") ]\n return \"\".join(hira).encode(\"utf-8\")", "def alom():\n #\n # this is the alpha\n inlist = list(\"begin\") # change data into a list element\n outlist[0:5] = inlist # place data in the list in the correct place\n # print(\"\".join(outlist)) # see result\n #\n # this is the omega\n inlist = list(\"end\")\n #\n # change data into a list element\n outlist[1247:1250] = inlist # place data in the list in the correct place\n outstr = \"\".join(outlist)\n print(outstr)\n print(len(outstr))\n # of = open(\"workfile\", \"w\")\n # of.write(outstr)", "def hp2gona(hp):\n return GONAngle(hp2gon(hp))", "def from_string(representation):\r\n auto = Automaton()\r\n lines = [line.strip() for line in representation.split('\\n')]\r\n auto.transitions = [Transition(*line.split()) for line in lines[:-1]]\r\n auto.final_states = lines[-1].split()[1:]\r\n auto.start_state = lines[-1].split()[0]\r\n\r\n return auto", "def aut2GFF(aut):\n state_cnt = 0\n state_transl_dict = dict()\n\n ###########################################\n def state_transl(state):\n \"\"\"state_transl(state) -> int\n\n Translates state names into numbers.\n \"\"\"\n nonlocal state_cnt\n nonlocal state_transl_dict\n\n if state not in state_transl_dict.keys():\n state_transl_dict[state] = state_cnt\n state_cnt += 1\n\n return str(state_transl_dict[state])\n ###########################################\n\n res = \"\"\n res += \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\" standalone=\\\"no\\\"?>\\n\"\n res += \"<structure label-on=\\\"transition\\\" type=\\\"fa\\\">\\n\"\n\n # get the alphabet\n alphabet = set()\n states = set()\n for trans in aut[\"transitions\"]:\n src, symb, tgt = trans\n alphabet.add(symb)\n states.add(src)\n states.add(tgt)\n for st in aut[\"initial\"]:\n states.add(st)\n for st in aut[\"final\"]:\n states.add(st)\n\n res += \"<alphabet type=\\\"classical\\\">\\n\"\n for symb in alphabet:\n res += \"<symbol>\" + symb + \"</symbol>\\n\"\n res += \"</alphabet>\\n\"\n\n res += \"<stateset>\\n\"\n for st in states:\n res += \"<state sid=\\\"\" + state_transl(st) + \"\\\"></state>\\n\";\n res += \"</stateset>\\n\"\n\n res += \"<acc type=\\\"buchi\\\">\\n\"\n for st in aut[\"final\"]:\n res += \"<stateID>\" + state_transl(st) + \"</stateID>\\n\"\n res += \"</acc>\\n\"\n\n res += \"<initialStateSet>\\n\"\n for st in aut[\"initial\"]:\n res += \"<stateID>\" + state_transl(st) + \"</stateID>\\n\"\n res += \"</initialStateSet>\\n\";\n\n res += \"<transitionset>\\n\"\n tid = 0\n for trans in aut[\"transitions\"]:\n src, symb, tgt = trans\n res += \"<transition tid=\\\"\" + str(tid) + \"\\\">\\n\"\n tid += 1\n res += \"<from>\" + state_transl(src) + \"</from>\\n\" +\\\n \"<to>\" + state_transl(tgt) + \"</to>\\n\" + \\\n \"<read>\" + symb + \"</read>\\n\"\n res += \"</transition>\\n\"\n res += \"</transitionset>\\n\"\n\n res += \"</structure>\\n\"\n\n return res", "def hq_isoforms_fa(self):\n return op.join(self.fasta_dir, \"hq_isoforms.fasta\")", "def toGenomeRepresentation(self):\n s = \"\"\n s += str(self.axiom)\n s += \"||\"+str(self.niterations) # The iterations must be shown as well\n for prod in self.productions:\n s += \"||\"\n s += prod.toGenomeRepresentation()\n return s", "def write_input(self, suffix=''):\n \n out_fname = \"input.plasma_1d\"+suffix\n with open(out_fname, 'w+') as outfile:\n outfile.write('# Input file for ASCOT containing radial 1D information of plasma temperature,density and toroidal rotation \\n')\n outfile.write('# range must cover [0,1] of normalised poloidal rho. It can exceed 1. \\n')\n outfile.write('# {:s} (first 3 lines are comment lines) \\n'.format(time.strftime('%d%b%y')))\n outfile.write('{:d}\\t{:1d}\\t# Nrad,Nion \\n'.format(self.nrho,self.nion))\n strcoll = str(1)+' ' # for electrons\n strZ=''\n strA=''\n for i in range(self.nion):\n strZ += str(self.Z[i]) + ' '\n strA += str(self.A[i]) + ' '\n strcoll += str(int(self.coll_mode[i])) + ' '\n strZ +='\\t\\t# ion Znum \\n'\n strA +='\\t\\t# ion Amass \\n'\n strcoll += '# collision mode (0= no colls, 1=Maxw colls, 2=binary colls, 3=both colls) 1st number is for electrons \\n'\n outfile.write(strZ)\t\t\t\t\n outfile.write(strA)\n outfile.write(strcoll)\n \n lab_len=15\n strlabel='RHO (pol)'.ljust(lab_len)+'Te (eV)'.ljust(lab_len)+'Ne (1/m3)'.ljust(lab_len)+'Vtor_I (rad/s)'.ljust(lab_len)+\\\n 'Ti1 (eV)'.ljust(lab_len)\n for i in range(self.nion):\n tmpstr ='Ni{:d} (1/m3)'.format(i+1)\n strlabel+=tmpstr.ljust(lab_len)\n strlabel+='\\n'\n outfile.write(strlabel)\n data=np.array((self.rho, self.te, self.ne, self.vt, self.ti), dtype=float)\n data = np.concatenate([data, [self.ni[i,:] for i in range(self.nion)]])\n\n data=np.transpose(data)\n #print(data)\n #print(\"if i don't print, it won't work\")\n np.savetxt(outfile, data, fmt='%.5e')", "def ot2bio_ote(ote_tag_sequence):\n new_ote_sequence = []\n n_tag = len(ote_tag_sequence)\n prev_ote_tag = '$$$'\n for i in range(n_tag):\n cur_ote_tag = ote_tag_sequence[i]\n assert cur_ote_tag == 'O' or cur_ote_tag == 'T'\n if cur_ote_tag == 'O':\n new_ote_sequence.append(cur_ote_tag)\n else:\n # cur_ote_tag is T\n if prev_ote_tag == 'T':\n new_ote_sequence.append('I')\n else:\n # cur tag is at the beginning of the opinion target\n new_ote_sequence.append('B')\n prev_ote_tag = cur_ote_tag\n return new_ote_sequence", "def translateORFtoAAs(self,sequence,number):\r\n AAStringfromORF = str()\r\n startingM = int()\r\n for i in range(0,len(sequence)-2,3):\r\n if sequence[i:i+3] != \"AUG\":\r\n pass\r\n else:\r\n startingM = i\r\n for i in range(startingM,len(sequence)-2,3):\r\n x = self.tabletoTranslate(sequence[i:i+3])\r\n AAStringfromORF+=x\r\n if x == \"-\":\r\n self.listofSequences.append(AAStringfromORF.rstrip(\"-\").lstrip().rstrip())\r\n AAStringfromORF = str()\r\n break", "def get_ata_obsinfo():\n f = open(os.path.join(ATA_SHARE_DIR, 'obsinfo.toml'), \"r\")\n obsinfo = toml.load(f)\n return obsinfo", "def cfg2json(cfgfilename, outpath=None):\n # open cfg file and load up the output dictionary\n cfg_data = teal.load(cfgfilename, strict=False)\n del cfg_data['_task_name_']\n del cfg_data['_RULES_']\n\n out_dict = {\"parameters\": cfg_data, \"default_values\": cfg_data}\n\n # build output json filename\n json_filename = cfgfilename.split(\"/\")[-1].replace(\".cfg\", \".json\")\n\n if not outpath:\n code_dir = os.path.abspath(__file__)\n base_dir = os.path.dirname(os.path.dirname(code_dir))\n out_dir = os.path.join(base_dir, \"pars/hap_pars\")\n det = json_filename.split(\"_\")[0]\n json_filename = json_filename.replace(det, det+\"_astrodrizzle\")\n if det == \"any\":\n json_filename = os.path.join(out_dir, det, json_filename)\n else:\n if det in [\"hrc\", \"sbc\", \"wfc\"]:\n inst = \"acs\"\n if det in [\"ir\", \"uvis\"]:\n inst = \"wfc3\"\n json_filename = \"{}_{}\".format(inst, json_filename)\n json_filename = os.path.join(out_dir, inst, det, json_filename)\n else:\n json_filename = os.path.join(outpath, \"any_\"+json_filename)\n json_filename = json_filename.replace(\"hap.json\", \"hap_basic.json\")\n\n # write out data.\n if os.path.exists(json_filename):\n os.remove(json_filename)\n with open(json_filename, 'w') as fp:\n json.dump(out_dict, fp, indent=4)\n print(\"Wrote {}\".format(json_filename))", "def test_onion_parse():\n vec = get_vector('onion-test-v0.json')\n o = vec['onion']\n o = onion.RoutingOnion.from_hex(o)\n\n assert(o.version == 0)\n assert(bytes.hex(o.hmac) == 'b8640887e027e946df96488b47fbc4a4fadaa8beda4abe446fafea5403fae2ef')\n\n assert(o.to_bin() == bytes.fromhex(vec['onion']))", "def make_open_airspace_format(self):\n # Extract coordinates from KML\n for idxline in range(len(self.kml_lines)):\n if '<name>' in self.kml_lines[idxline]:\n self.name = self.kml_lines[idxline].replace('\\t', '').replace('<name>', '').replace('</name>', '').replace('\\n','')\n if not self.name.startswith('TS'):\n self.name = 'TS_' + self.name\n print('Type: %s | Name: %s' % (self.as_type, self.name))\n if '<coordinates>' in self.kml_lines[idxline]:\n self.coordinates_kml = self.kml_lines[idxline + 1].replace('\\t', '').replace('\\n', '')\n break\n # start conversion to airspace format\n \"\"\" AC A\n AN TS_Erzgeb\n AL FL98\n AH FL99\n DP 50:26:22 N 012:17:59 E\n DP 50:25:25 N 012:18:26 E\n DP 50:24:40 N 012:19:01 E\n DP 50:24:06 N 012:19:46 E\"\"\"\n\n # AC A\n self.txt_lines.append('AC %s\\n' % self.as_type)\n # AN TS_Erzgeb\n self.txt_lines.append('AN %s\\n' % self.name)\n # heights\n self.txt_lines.append('AL FL98\\n')\n self.txt_lines.append('AH FL99\\n')\n # coordinates\n for coo_pt in self.coordinates_kml.split(' ')[:-1]:\n # Target format: DP 50:26:22 N 012:17:59 E\n lat_long = coo_pt.split(',')\n # latitude\n latDecAsStr = lat_long[1].split('.')\n #if '.' not in latDecAsStr: # take care of case \"51\" instead of \"51.123456\"\n # latDecAsStr += '.000000'\n lat_degree = abs(int(latDecAsStr[0]))\n #print(f'latDecAsStr {latDecAsStr}')\n if len(latDecAsStr)==1:\n latDecAsStr.append('0')\n lat_secondDec = (float('0.' + latDecAsStr[1])*60) % 1\n lat_minute = round((float('0.' + latDecAsStr[1])*60) - lat_secondDec)\n lat_second = round(lat_secondDec*60)\n cooString = ('DP %02d:%02d:%02d' %(lat_degree,lat_minute,lat_second))\n if latDecAsStr[0].startswith('-'):\n cooString += ' S'\n else:\n cooString += ' N'\n # longitude\n #print(f'converting lat_long {lat_long}')\n # take care of case: no decimal sign included, case \"11\" instead of \"11.123456\"\n if '.' not in lat_long[0]:\n lat_long[0] += '.0'\n lonDecAsStr = lat_long[0].split('.')\n lon_degree = abs(int(lonDecAsStr[0]))\n lon_secondDec = (float('0.' + lonDecAsStr[1]) * 60) % 1\n lon_minute = round((float('0.' + lonDecAsStr[1]) * 60) - lon_secondDec)\n lon_second = round(lon_secondDec * 60)\n cooString += (' %03d:%02d:%02d' % (lon_degree, lon_minute, lon_second))\n if lonDecAsStr[0].startswith('-'):\n cooString += ' W'\n else:\n cooString += ' E'\n cooString += '\\n'\n self.txt_lines.append(cooString)", "def save_Omg(self, infodir='auto', histogram=True, attribute=True, force_hdf5=False, overwrite=False):\n if infodir == 'auto' or infodir is None:\n infodir = dio.prepdir(self.lattice.lp['meshfn'])\n if self.Omg is not None:\n pinning_name = self.get_pinmeshfn_exten()\n # When running jobs in series (NOT in parallel), can save pinning directly to hdf5\n if force_hdf5:\n h5fn = dio.prepdir(self.lp['meshfn']) + 'omg_configs.hdf5'\n if glob.glob(h5fn):\n rw = \"r+\"\n else:\n rw = \"w\"\n\n with h5py.File(h5fn, rw) as fi:\n keys = fi.keys()\n # is this pinning configuration already in the hdf5 file?\n if pinning_name not in keys:\n # add pinning to the hdf5 file\n print 'saving pinning in hdf5...'\n fi.create_dataset(pinning_name, shape=np.shape(self.Omg), data=self.Omg, dtype='float')\n elif overwrite:\n data = fi[pinning_name] # load the data\n data[...] = self.Omg # assign new values to data\n else:\n raise RuntimeError('Pinning config already exists in hdf5, exiting...')\n else:\n # Otherwise perform standard save of a text file for the pinning configuration\n print 'saving pinning in txt...'\n fn = dio.prepdir(self.lp['meshfn']) + pinning_name + '.txt'\n np.savetxt(fn, self.Omg, header=\"Pinning frequencies Omg\")\n if histogram:\n plt.clf()\n fig, hist_ax = leplt.initialize_histogram(self.Omg, xlabel=r'Pinning frequencies, $\\Omega_g$')\n histfn = 'Omg_hist_mean' + sf.float2pstr(self.lp['Omg']) + self.lp['meshfn_exten']\n plt.savefig(infodir + histfn + '.png')\n plt.clf()\n print 'Saved Omg to ' + fn\n else:\n raise RuntimeError('self.Omg is None, so cannot save it!')", "def f2o(forfile, opath):\n return f2suff(forfile, opath, 'o')", "def save(automaton):\n show_notification(\"Select a location to save \" + automaton[\"name\"])\n location = filedialog.asksaveasfilename(\n title=\"Automaton Export Filename\",\n defaultextension=\"json\"\n )\n # If selected a location, save it\n if len(location) == 0:\n show_error(\"No location given\")\n return False\n else:\n with open(location, 'w') as f: # writing JSON object\n dump(automaton, f, sort_keys=True, indent=4)\n\n # Remove the .json extension\n location = location[:-5]\n visualize(automaton, location, view=False)\n show_notification(\"Saved to \" + location)\n return True", "def voc2json():\n hyou_lesson = hyou_reader()\n mina1_lesson = mina1_reader()\n mina2_lesson = mina2_reader()\n\n lesson_list = hyou_lesson + mina1_lesson + mina2_lesson\n\n json_file = open(OUT_PATH, 'w')\n json_file.write(json.dumps(lesson_list, encoding='utf-8', ensure_ascii=False,\n indent=4, sort_keys=True))\n json_file.close()", "def HDF5_to_ascii(self, **kwds):\n # compile regular expression operator for extracting info from ICESat2 files\n rx = re.compile(r'(processed_)?(ATL\\d+)(-\\d{2})?_(\\d{4})(\\d{2})(\\d{2})'\n r'(\\d{2})(\\d{2})(\\d{2})_(\\d{4})(\\d{2})(\\d{2})_(\\d{3})_(\\d{2})(.*?).h5$')\n # split extension from HDF5 file\n # extract parameters from ICESat2 HDF5 file\n if isinstance(self.filename, str):\n fileBasename,fileExtension=os.path.splitext(self.filename)\n # extract parameters from ICESat2 HDF5 file\n SUB,PRD,HEM,YY,MM,DD,HH,MN,SS,TRK,CYCL,GRAN,RL,VERS,AUX = \\\n rx.findall(os.path.basename(self.filename)).pop()\n else:\n fileBasename,fileExtension=os.path.splitext(self.filename.filename)\n SUB,PRD,HEM,YY,MM,DD,HH,MN,SS,TRK,CYCL,GRAN,RL,VERS,AUX = \\\n rx.findall(os.path.basename(self.filename.filename)).pop()\n # output file suffix for csv or tab-delimited text\n delimiter = ',' if self.reformat == 'csv' else '\\t'\n # copy bare minimum variables from the HDF5 file to the ascii file\n source = h5py.File(self.filename,mode='r')\n\n # find valid beam groups by testing for particular variables\n if (PRD == 'ATL06'):\n VARIABLE_PATH = ['land_ice_segments','segment_id']\n elif (PRD == 'ATL07'):\n VARIABLE_PATH = ['sea_ice_segments','height_segment_id']\n elif (PRD == 'ATL08'):\n VARIABLE_PATH = ['land_segments','segment_id_beg']\n elif (PRD == 'ATL10'):\n VARIABLE_PATH = ['freeboard_beam_segments','delta_time']\n elif (PRD == 'ATL12'):\n VARIABLE_PATH = ['ssh_segments','delta_time']\n # create list of valid beams within the HDF5 file\n beams = []\n for gtx in [k for k in source.keys() if bool(re.match(r'gt\\d[lr]',k))]:\n # check if subsetted beam contains data\n try:\n source['/'.join([gtx,*VARIABLE_PATH])]\n except KeyError:\n pass\n else:\n beams.append(gtx)\n\n # for each valid beam within the HDF5 file\n for gtx in sorted(beams):\n # extract variables and attributes for each variable\n values = {}\n vattrs = {}\n # create a column stack of valid output segment values\n if (PRD == 'ATL06'):\n # land ice height\n var = source[gtx]['land_ice_segments']\n valid, = np.nonzero(var['atl06_quality_summary'][:] == 0)\n # variables for the output ascii file\n vnames = ['segment_id','delta_time','latitude','longitude',\n 'h_li','h_li_sigma']\n vformat = ('{1:0.0f}{0}{2:0.9f}{0}{3:0.9f}{0}{4:0.9f}{0}'\n '{5:0.9f}{0}{6:0.9f}')\n # for each output variable\n for i,v in enumerate(vnames):\n # convert data to numpy array for HDF5 compatibility\n values[v] = np.copy(var[v][:])\n # extract attributes\n vattrs[v] = {atn:atv for atn,atv in var[v].attrs.items()}\n # add precision attributes for ascii yaml header\n if (v == 'segment_id'):\n vattrs[v]['precision'] = 'integer'\n vattrs[v]['units'] = 'count'\n else:\n vattrs[v]['precision'] = 'double_precision'\n vattrs[v]['comment'] = f'column {i+1:d}'\n elif (PRD == 'ATL07'):\n # sea ice height\n var = source[gtx]['sea_ice_segments']\n valid, = np.nonzero(var['heights/height_segment_quality'][:] == 1)\n # variables for the output ascii file\n vnames = ['height_segment_id','delta_time',\n 'latitude','longitude','seg_dist_x',\n 'heights/height_segment_height',\n 'heights/height_segment_confidence',\n 'heights/height_segment_type',\n 'heights/height_segment_ssh_flag',\n 'heights/height_segment_w_gaussian',\n 'stats/photon_rate','stats/cloud_flag_asr',\n 'geophysical/height_segment_lpe',\n 'geophysical/height_segment_mss',\n 'geophysical/height_segment_ocean',\n 'geophysical/height_segment_ib']\n vformat = ('{1:0.0f}{0}{2:0.9f}{0}{3:0.9f}{0}{4:0.9f}{0}'\n '{5:0.9f}{0}{6:0.9f}{0}{7:0.9f}{0}{8:0.0f}{0}{9:0.0f}{0}'\n '{10:0.9f}{0}{11:0.9f}{0}{12:0.0f}{0}{13:0.9f}{0}'\n '{14:0.9f}{0}{15:0.9f}{0}{16:0.9f}')\n # for each output variable\n for i,v in enumerate(vnames):\n # convert data to numpy array for HDF5 compatibility\n values[v] = np.copy(var[v][:])\n # extract attributes\n vattrs[v] = {atn:atv for atn,atv in var[v].attrs.items()}\n # add precision attributes for ascii yaml header\n if v in ('height_segment_id','heights/height_segment_type',\n 'heights/height_segment_ssh_flag',\n 'stats/cloud_flag_asr'):\n vattrs[v]['precision'] = 'integer'\n else:\n vattrs[v]['precision'] = 'double_precision'\n vattrs[v]['comment'] = f'column {i+1:d}'\n elif (PRD == 'ATL08'):\n # land and vegetation height\n var = source[gtx]['land_segments']\n valid, = np.nonzero(var['terrain/h_te_best_fit'][:] !=\n var['terrain/h_te_best_fit'].fillvalue)\n # variables for the output ascii file\n vnames = ['segment_id_beg','segment_id_end','delta_time',\n 'latitude','longitude','terrain/h_te_best_fit',\n 'terrain/h_te_uncertainty','terrain/terrain_slope',\n 'canopy/h_canopy','canopy/h_canopy_uncertainty']\n vformat = ('{1:0.0f}{0}{2:0.0f}{0}{3:0.9f}{0}{4:0.9f}{0}'\n '{5:0.9f}{0}{6:0.9f}{0}{7:0.9f}{0}{8:0.9f}{0}{9:0.9f}{0}'\n '{10:0.9f}')\n # for each output variable\n for i,v in enumerate(vnames):\n # convert data to numpy array for HDF5 compatibility\n values[v] = np.copy(var[v][:])\n # extract attributes\n vattrs[v] = {atn:atv for atn,atv in var[v].attrs.items()}\n # add precision attributes for ascii yaml header\n if v in ('segment_id_beg','segment_id_end'):\n vattrs[v]['precision'] = 'integer'\n vattrs[v]['units'] = 'count'\n else:\n vattrs[v]['precision'] = 'double_precision'\n vattrs[v]['comment'] = f'column {i+1:d}'\n\n # column stack of valid output segment values\n output = np.column_stack([values[v][valid] for v in vnames])\n\n # output ascii file\n ascii_file = f'{fileBasename}_{gtx}.{self.reformat}'\n fid = open(os.path.expanduser(ascii_file), mode='w', encoding='utf8')\n # print YAML header to top of file\n fid.write('{0}:\\n'.format('header'))\n # global attributes for file\n fid.write(' {0}:\\n'.format('global_attributes'))\n for atn,atv in source.attrs.items():\n if atn not in ('Conventions','Processing Parameters','hdfversion',\n 'history','identifier_file_uuid'):\n fid.write(' {0:22}: {1}\\n'.format(atn,self.attributes_encoder(atv)))\n # beam attributes\n fid.write('\\n {0}:\\n'.format('beam_attributes'))\n for atn,atv in source[gtx].attrs.items():\n if atn not in ('Description',):\n fid.write(' {0:22}: {1}\\n'.format(atn,self.attributes_encoder(atv)))\n # data dimensions\n fid.write('\\n {0}:\\n'.format('dimensions'))\n nrow,ncol = np.shape(output)\n fid.write(' {0:22}: {1:d}\\n'.format('segments',nrow))\n # non-standard attributes\n fid.write('\\n {0}:\\n'.format('non-standard_attributes'))\n # value to convert to GPS seconds (seconds since 1980-01-06T00:00:00)\n fid.write(' {0:22}:\\n'.format('atlas_sdp_gps_epoch'))\n atlas_sdp_gps_epoch = source['ancillary_data']['atlas_sdp_gps_epoch']\n for atn in ['units','long_name']:\n atv = self.attributes_encoder(atlas_sdp_gps_epoch.attrs[atn])\n fid.write(' {0:20}: {1}\\n'.format(atn,atv))\n fid.write(' {0:20}: {1:0.0f}\\n'.format('value',atlas_sdp_gps_epoch[0]))\n # print variable descriptions to YAML header\n fid.write('\\n {0}:\\n'.format('variables'))\n for v in vnames:\n fid.write(' {0:22}:\\n'.format(posixpath.basename(v)))\n for atn in ['precision','units','long_name','comment']:\n atv = self.attributes_encoder(vattrs[v][atn])\n fid.write(' {0:20}: {1}\\n'.format(atn,atv))\n # end of header\n fid.write('\\n\\n# End of YAML header\\n')\n # print data to file\n for row in output:\n print(vformat.format(delimiter,*row),file=fid)\n # close the file\n fid.close()\n # close the source HDF5 file\n source.close()", "def gon2hpa(gon):\n return HPAngle(gon2hp(gon))", "def _al2co(clustal_in, al2co_out):\n log_out = \"{}.out\".format(al2co_out)\n log_err = \"{}.err\".format(al2co_out)\n al2co_command = \"al2co -i {:} -g 0.9 | head -n -12 > {:}\"\n with open(log_out, 'a') as f_out:\n with open(log_err, 'a') as f_err:\n command = al2co_command.format(clustal_in, al2co_out)\n f_out.write('=================== CALL ===================\\n')\n f_out.write(command + '\\n')\n subprocess.check_call(\n command, shell=True, stderr=f_err, stdout=f_out)\n f_out.write('================= END CALL =================\\n')", "def _writeOneFASTA(sequence, filehandle):\n filehandle.write(\">\" + sequence.getName()+\"\\n\")\n data = sequence.getSequence()\n lines = ( sequence.getLen() - 1) / 60 + 1\n for i in range(lines):\n #note: python lets us get the last line (var length) free\n #lineofseq = data[i*60 : (i+1)*60] + \"\\n\"\n lineofseq = \"\".join(data[i*60 : (i+1)*60]) + \"\\n\"\n filehandle.write(lineofseq)", "def geneA(nombreA,listaPGA): #Esta sección fue hecha por Ángel\n with open(nombreA + \".txt\", \"w\") as archivo:\n archivo.writelines(listaPGA)", "def to_tim_file(self, fname=None, toa_format=None):\n if toa_format is None:\n toa_format = self.default_format\n lines = []\n if toa_format.lower() == 'tempo2':\n lines.append(\"FORMAT 1\")\n else: \n if self.mode is not None:\n lines.append(\"MODE %d\" % self.mode)\n if self.track is not None:\n lines.append(\"TRACK %d\" % self.track)\n for ii in range(self.get_nTOAs()):\n if ii in self.phase_wraps:\n if self.phase_wraps[ii] > 0:\n ph_arg = \"+\" + str(self.phase_wraps[ii])\n else:\n ph_arg = str(self.phase_wraps[ii])\n lines.append(\"PHASE %s\" % ph_arg)\n if self.jump_statement_before(ii):\n lines.append(\"JUMP\")\n if toa_format.lower() == 'parkes':\n lines.append(self.TOAs[ii].to_parkes_format())\n elif toa_format.lower() == 'itoa':\n lines.append(self.TOAs[ii].to_ITOA_format())\n elif toa_format.lower() == 'princeton':\n lines.append(self.TOAs[ii].to_princeton_format())\n elif toa_format.lower() == 'tempo2':\n lines.append(self.TOAs[ii].to_Tempo2_format())\n else:\n print \"TOA Format must be 'princeton', 'parkes', or 'ITOA'.\"\n if self.jump_statement_after(ii):\n lines.append(\"JUMP\")\n if fname is None:\n for line in lines:\n print line\n else:\n with open(fname, 'w') as f:\n for line in lines:\n f.write(line + \"\\n\")", "def create_pythia_cmnd_files(self):\n \n for higgsname, higgspid in {'H': 35, 'A': 36}.iteritems():\n \n # Get mass and width from 2HDMC LHA file\n lha = LHA(self.lhafile)\n mass = lha.get_block('MASS').get_entry_by_key(higgspid)\n width = lha.get_decay(higgspid).width \n \n outname = self.lhafile.replace('.lha', '_%s.cmnd' % higgsname)\n self.cmndfiles[higgsname] = outname\n \n # Write command file\n with open(outname, 'w') as outfile:\n \n outfile.write('Beams:eCM = 13000.\\n')\n outfile.write('Higgs:useBSM = on\\n')\n \n if higgspid == 36:\n #outfile.write('HiggsBSM:allA3 = on\\n') # All production modes\n outfile.write('HiggsBSM:ffbar2A3 = on\\n') # quark fusion\n outfile.write('HiggsBSM:gg2A3 = on\\n') # gluon fusion\n elif higgspid == 35:\n #outfile.write('HiggsBSM:allH2 = on\\n') # All production modes\n outfile.write('HiggsBSM:ffbar2H2 = on\\n') # quark fusion\n outfile.write('HiggsBSM:gg2H2 = on\\n') # gluon fusion\n \n outfile.write('{}:all = A0 A0 1 0 0 {} {} 50.0 0.0\\n'.format(higgspid, mass, width))\n outfile.write('{}:onMode = off\\n'.format(higgspid))\n outfile.write('{}:onIfMatch = 15 -15\\n'.format(higgspid))\n \n outfile.write('15:onMode = off\\n')\n outfile.write('15:onIfMatch = 16 111 211\\n')\n outfile.write('\\n')\n outfile.write('Next:numberShowEvent = 0\\n')\n\n return 0", "def parse_obo(obo,\n output_file=None,\n id2name_file=None,\n id2namespace_file=None,\n alt_id_file=None):\n\n ## Keywords that screw up parsing:\n # import, is_anonymous, intersection_of, union_of\n\n ## Relations\n # 'is_a:'\n # 'relationship: has_part' # Not in filtered GO\n # 'relationship: occurs_in' # Not in filtered GO\n # 'relationship: part_of' \n # 'relationship: positively_regulates' \n # 'relationship: negatively_regulates'\n # 'relationship: regulates'\n # 'relationship: results_in' # Not in filtered GO\n\n stanza, edges = [], []\n id2name = dict()\n id2namespace = dict()\n alt_id = dict()\n in_term_stanza = False\n default_namespace_exists = False\n for line in io.open(obo).read().splitlines():\n\n line = line.split('!')[0].strip() # Remove comments\n\n if len(line)>0 and line[0]=='[' and line[-1]==']':\n # Add last stanza if it was a term stanza. Include namespace.\n if in_term_stanza:\n edges.extend(x+(namespace, ) for x in stanza)\n\n # Start new term stanza\n stanza = []\n \n # Set the default namespace, if it exists\n if default_namespace_exists:\n namespace = default_namespace\n \n # In a term stanzo or not\n in_term_stanza = line =='[Term]'\n\n name = None\n \n #if 'alt_id:' in line: assert False\n\n if 'id:' == line[:3]:\n curr_term = line.split('id:')[1].strip()\n elif 'alt_id:' in line:\n alt_term = line.split('alt_id:')[1].strip()\n if curr_term in alt_id: alt_id[curr_term].append(alt_term)\n else: alt_id[curr_term] = [alt_term]\n id2name[alt_term] = name\n elif 'name:' in line:\n name = line.split('name:')[1].strip()\n #assert not curr_term in id2name\n id2name[curr_term] = name\n elif 'is_a:' in line:\n parent = line.split('is_a:')[1].strip()\n stanza.append((parent, curr_term, 'is_a'))\n elif 'relationship:' in line:\n line = line.split('relationship:')[1].strip().split()\n if len(line)!=2: print(line)\n assert len(line)==2\n relation, parent = line\n stanza.append((parent, curr_term, relation))\n elif 'namespace:' == line[:10]:\n namespace = line.split('namespace:')[1].strip()\n assert not curr_term in id2namespace\n id2namespace[curr_term] = namespace\n elif 'default-namespace:' == line[:18]:\n namespace = line.split('default-namespace:')[1].strip()\n default_namespace_exists = True\n default_namespace = namespace\n\n pd.DataFrame(edges).to_csv(output_file, header=False, index=False, sep='\\t')\n pd.Series(id2name).to_csv(id2name_file, sep='\\t')\n pd.Series(id2namespace).to_csv(id2namespace_file, sep='\\t')\n pd.Series(dict([(a, c) for a, b in alt_id.items() for c in b])).to_csv(alt_id_file, sep='\\t')" ]
[ "0.583925", "0.56710726", "0.54376674", "0.543635", "0.5432066", "0.5290863", "0.5243628", "0.5122951", "0.510304", "0.50739604", "0.50417066", "0.5039839", "0.50381833", "0.5025238", "0.50088596", "0.49178177", "0.48926035", "0.48878336", "0.48868248", "0.48853645", "0.48714852", "0.4860662", "0.48589385", "0.48394245", "0.4826779", "0.48159042", "0.48085743", "0.48075882", "0.47976798", "0.47941673" ]
0.7347903
0