query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Return path to directory containing the static libraries of cudatoolkit. | def get_nvidia_static_cudalib_ctk():
nvvm_ctk = get_nvidia_nvvm_ctk()
if not nvvm_ctk:
return
env_dir = os.path.dirname(os.path.dirname(nvvm_ctk))
dirs = ('Lib', 'x64') if IS_WIN32 else ('lib',)
return os.path.join(env_dir, *dirs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_swagger_static_root():\n return os.path.join(CURDIR, \"static\")",
"def path_static():\n return os.path.abspath(os.path.dirname(__file__))+'/_static'",
"def get_library_dir():\n return os.path.join(get_script_path(), 'library')",
"def get_data_dir():\n rootdir = os.path.dirname(__file__)\n libdir = rootdir + os.sep + \"data\"\n return libdir",
"def library_dirs(self):",
"def glr_path_static():\n return os.path.join(base_path, \"static\")",
"def lib_dir(self):\n raise NotImplementedError('Implement this property.')",
"def _get_path_to_front_end():\n dpath = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'fe')\n log(\"Front-end static files @ {0}\".format(dpath))\n\n return dpath",
"def get_nvidia_cudalib_ctk():\n nvvm_ctk = get_nvidia_nvvm_ctk()\n if not nvvm_ctk:\n return\n env_dir = os.path.dirname(os.path.dirname(nvvm_ctk))\n subdir = 'bin' if IS_WIN32 else 'lib'\n return os.path.join(env_dir, subdir)",
"def __get_server_static__(app_path,static_dir):\n import os\n # from . import config_loader\n\n # root_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n _path = (static_dir).replace(\"/\", os.path.sep)\n return os.sep.join([app_path, _path])",
"def get_library_dirs():\n if DAALTK_HOME_ENV_VAR not in os.environ:\n raise Exception(\"Required environment variable %s not set\" % DAALTK_HOME_ENV_VAR)\n\n daaltk_home = os.environ[DAALTK_HOME_ENV_VAR]\n return [daaltk_home, os.path.join(daaltk_home, LIB_DIR)]",
"def linking_library_dirs(self):",
"def datadir():\n return '../data/'",
"def source_paths(self):\n paths = self.config.get('static_dirs')\n if paths:\n return paths\n return [self.config.get('static_dir')]",
"def lib_dir(self):\n if not self._lib_dir:\n lib_files = glob.glob(\"/usr/lib/*/librpm.so*\")\n if not lib_files:\n raise InstallError(\"Can not find lib directory.\")\n self._lib_dir = os.path.dirname(lib_files[0])\n return self._lib_dir",
"def generate_js_dir():\n\n return pkg_resources.resource_filename('linkedin.mobster.har.visualization.js', None)",
"def get_path_static(self):\n\n return self._path_static",
"def output_dir(self):\n return self.c.build_dir.join(self.c.build_config_fs)",
"def get_appdir():\n\n return APP_PATH",
"def get_directory(self):\n mypath = mlblocks.get_primitives_paths()[-1]\n return mypath",
"def lib_directories(self):\n if self._lib_directories is None:\n self._lib_directories = []\n app_path = os.getcwd()\n contents = os.listdir(app_path)\n for c in contents:\n # ensure content starts with lib, is directory, and is readable\n if c.startswith('lib') and os.path.isdir(c) and (os.access(c, os.R_OK)):\n self._lib_directories.append(c)\n return sorted(self._lib_directories, reverse=True)",
"def GetPackageDirectory():\n return os.path.dirname(__file__)",
"def data_directory(self):\n\n return self.get_raw(\"data_directory\")",
"def get_sample_data_dir():\n \n return resource_filename('cdat_lite.test.test_cdms', 'sample_data')",
"def data_dir():\n return _config.datadir",
"def lib_name_path(interface, simulator):\n library_name_path = os.path.join(libs_dir, lib_name(interface, simulator))\n\n # On Windows use mixed mode \"c:/a/b/c\" as this work in all cases\n if os.name == \"nt\":\n return library_name_path.replace(\"\\\\\", \"/\")\n\n return library_name_path",
"def get_installdir(self):\n import mewlo\n path = os.path.dirname(os.path.realpath(mewlo.__file__))\n return path",
"def _get_asset_dir(self, database):\n if not database:\n return 'assets'\n\n path = os.path.dirname(database)\n return os.path.join(path, 'assets')",
"def getDefaultDataLibraryFolders():\n return [ 'libraries' ]",
"def get_cloudletdir(self):\r\n return os.path.join(self.path, \".cloudlet\")"
] | [
"0.68122315",
"0.6640086",
"0.6551922",
"0.6528072",
"0.6446887",
"0.6348904",
"0.62596214",
"0.62104714",
"0.6146851",
"0.60442674",
"0.6012978",
"0.5958986",
"0.58856815",
"0.5839555",
"0.5814734",
"0.5752032",
"0.57089937",
"0.57079685",
"0.5699956",
"0.5696561",
"0.5685508",
"0.5683568",
"0.5678754",
"0.56674963",
"0.56600624",
"0.5657431",
"0.5643195",
"0.56246036",
"0.5615462",
"0.5607877"
] | 0.70178443 | 0 |
Get paths of CUDA_HOME. If subdirs are the subdirectory name to be appended in the resulting path. | def get_cuda_home(*subdirs):
cuda_home = os.environ.get('CUDA_HOME')
if cuda_home is None:
# Try Windows CUDA installation without Anaconda
cuda_home = os.environ.get('CUDA_PATH')
if cuda_home is not None:
return os.path.join(cuda_home, *subdirs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_system_ctk(*subdirs):\n # Linux?\n if sys.platform.startswith('linux'):\n # Is cuda alias to /usr/local/cuda?\n # We are intentionally not getting versioned cuda installation.\n base = '/usr/local/cuda'\n if os.path.exists(base):\n return os.path.join(base, *subdirs)",
"def get_cuda_paths():\n # Check cache\n if hasattr(get_cuda_paths, '_cached_result'):\n return get_cuda_paths._cached_result\n else:\n # Not in cache\n d = {\n 'nvvm': _get_nvvm_path(),\n 'libdevice': _get_libdevice_paths(),\n 'cudalib_dir': _get_cudalib_dir(),\n 'static_cudalib_dir': _get_static_cudalib_dir(),\n }\n # Cache result\n get_cuda_paths._cached_result = d\n return d",
"def locate_cuda():\n # adapted from\n # https://stackoverflow.com/questions/10034325/can-python-distutils-compile-cuda-code\n nvcc = None\n envs = ['CUDA_HOME', 'CUDA_ROOT', 'CUDAHOME', 'CUDAROOT']\n for env in envs:\n if env in os.environ:\n nvcc = os.path.join(os.environ[env], 'bin', 'nvcc')\n break\n else:\n # otherwise, search PATH for NVCC\n nvcc = find_in_path(['nvcc'])\n if nvcc is None:\n raise EnvironmentError(\n 'The nvcc executable could not be found. ' +\n 'Add it to $PATH or set one of the environment variables ' +\n ', '.join(envs))\n home = os.path.dirname(os.path.dirname(nvcc))\n\n cudaconfig = {}\n cudaconfig['home'] = home\n cudaconfig['nvcc'] = nvcc\n cudaconfig['include'] = os.path.join(home, 'include')\n # on Linux, CUDA has the libraries in lib64\n lib_dir = os.path.join(home, 'lib64')\n if not os.path.isdir(lib_dir):\n # on the MAC they are in lib\n lib_dir = os.path.join(home, 'lib')\n cudaconfig['lib'] = lib_dir\n\n for k, v in cudaconfig.items():\n if not os.path.exists(v):\n raise EnvironmentError(\n 'The CUDA %s path could not be located in %s' % (k, v))\n # print \"CUDA installation detected: \" + home\n return cudaconfig",
"def get_all_paths(why = 'train'):\r\n if why == 'train':\r\n parent_folder = train_parent_folder\r\n if why == 'test':\r\n parent_folder = test_test_folder\r\n sub_folders = glob.glob(parent_folder) # Directories of all languages\r\n image_paths = [glob.glob(sub_folder + '\\*') for sub_folder in sub_folders] # Directories of all characters\r\n image_paths = sum(image_paths, []) # Flatten out the 2D list to a 1D list \r\n return image_paths",
"def path_locations(home_dir, dry_run=False):\n home_dir = os.path.abspath(home_dir)\n lib_dir, inc_dir, bin_dir = None, None, None\n # XXX: We'd use distutils.sysconfig.get_python_inc/lib but its\n # prefix arg is broken: http://bugs.python.org/issue3386\n if IS_WIN:\n # Windows has lots of problems with executables with spaces in\n # the name; this function will remove them (using the ~1\n # format):\n if not dry_run:\n mkdir(home_dir)\n if \" \" in home_dir:\n import ctypes\n\n get_short_path_name = ctypes.windll.kernel32.GetShortPathNameW\n size = max(len(home_dir) + 1, 256)\n buf = ctypes.create_unicode_buffer(size)\n try:\n # noinspection PyUnresolvedReferences\n u = unicode\n except NameError:\n u = str\n ret = get_short_path_name(u(home_dir), buf, size)\n if not ret:\n print('Error: the path \"{}\" has a space in it'.format(home_dir))\n print(\"We could not determine the short pathname for it.\")\n print(\"Exiting.\")\n sys.exit(3)\n home_dir = str(buf.value)\n lib_dir = os.path.join(home_dir, \"Lib\")\n inc_dir = os.path.join(home_dir, \"Include\")\n bin_dir = os.path.join(home_dir, \"Scripts\")\n if IS_PYPY:\n lib_dir = home_dir\n inc_dir = os.path.join(home_dir, \"include\")\n bin_dir = os.path.join(home_dir, \"bin\")\n elif not IS_WIN:\n lib_dir = os.path.join(home_dir, \"lib\", PY_VERSION)\n inc_dir = os.path.join(home_dir, \"include\", PY_VERSION + ABI_FLAGS)\n bin_dir = os.path.join(home_dir, \"bin\")\n return home_dir, lib_dir, inc_dir, bin_dir",
"def home_directory(self):\n out = self._call(\"GETHOMEDIRECTORY\")\n return out.json()[\"Path\"]",
"def _get_paths():\n paths = [\n '/'\n ]\n return paths",
"def get_dir_hierarchy():\n return (personaldir(), systemdir(), localdir())",
"def get_subdirectories(self, physical_path):\n result = []\n for p in os.listdir(physical_path):\n if not os.path.isdir(os.path.join(physical_path, p)):\n continue\n result.append(os.path.join(physical_path, p))\n\n return result",
"def get_library_dirs():\n if DAALTK_HOME_ENV_VAR not in os.environ:\n raise Exception(\"Required environment variable %s not set\" % DAALTK_HOME_ENV_VAR)\n\n daaltk_home = os.environ[DAALTK_HOME_ENV_VAR]\n return [daaltk_home, os.path.join(daaltk_home, LIB_DIR)]",
"def _get_config_dirs(project=None):\n snap = os.environ.get('SNAP')\n snap_c = os.environ.get('SNAP_COMMON')\n\n cfg_dirs = [\n _fixpath(os.path.join('~', '.' + project)) if project else None,\n _fixpath('~'),\n os.path.join('/etc', project) if project else None,\n '/etc',\n os.path.join(snap_c, \"etc\", project) if snap_c and project else None,\n os.path.join(snap, \"etc\", project) if snap and project else None,\n ]\n return [x for x in cfg_dirs if x]",
"def locate_cuda():\n nvcc_bin = 'nvcc'\n if sys.platform.startswith(\"win\"):\n nvcc_bin = 'nvcc.exe'\n\n # check env variables CUDA_HOME, CUDAHOME, CUDA_PATH.\n found = False\n for env_name in ['CUDA_PATH', 'CUDAHOME', 'CUDA_HOME']:\n if env_name not in os.environ:\n continue\n found = True\n home = os.environ[env_name]\n nvcc = os.path.join(home, 'bin', nvcc_bin)\n break\n if not found:\n # otherwise, search the PATH for NVCC\n nvcc = find_in_path(nvcc_bin, os.environ['PATH'])\n if nvcc is None:\n logging.warning('The nvcc binary could not be located in your '\n '$PATH. Either add it to '\n 'your path, or set $CUDA_HOME to enable CUDA extensions')\n return None\n home = os.path.dirname(os.path.dirname(nvcc))\n\n cudaconfig = {'home': home,\n 'nvcc': nvcc,\n 'include': os.path.join(home, 'include'),\n 'lib64': os.path.join(home, 'lib64')}\n cuda_ver = os.path.basename(os.path.realpath(home)).split(\"-\")[1].split(\".\")\n major, minor = int(cuda_ver[0]), int(cuda_ver[1])\n cuda_ver = 10 * major + minor\n assert cuda_ver >= 70, f\"too low cuda ver {major}.{minor}\"\n print(f\"cuda_ver: {major}.{minor}\")\n arch = get_cuda_arch(cuda_ver)\n sm_list = get_cuda_sm_list(cuda_ver)\n compute = get_cuda_compute(cuda_ver)\n post_args = [f\"-arch=sm_{arch}\"] + \\\n [f\"-gencode=arch=compute_{sm},code=sm_{sm}\" for sm in sm_list] + \\\n [f\"-gencode=arch=compute_{compute},code=compute_{compute}\",\n \"--ptxas-options=-v\", \"-O2\"]\n print(f\"nvcc post args: {post_args}\")\n if HALF_PRECISION:\n post_args = [flag for flag in post_args if \"52\" not in flag]\n\n if sys.platform == \"win32\":\n cudaconfig['lib64'] = os.path.join(home, 'lib', 'x64')\n post_args += ['-Xcompiler', '/MD', '-std=c++14', \"-Xcompiler\", \"/openmp\"]\n if HALF_PRECISION:\n post_args += [\"-Xcompiler\", \"/D HALF_PRECISION\"]\n else:\n post_args += ['-c', '--compiler-options', \"'-fPIC'\",\n \"--compiler-options\", \"'-std=c++14'\"]\n if HALF_PRECISION:\n post_args += [\"--compiler-options\", \"'-D HALF_PRECISION'\"]\n for k, val in cudaconfig.items():\n if not os.path.exists(val):\n logging.warning('The CUDA %s path could not be located in %s', k, val)\n return None\n\n cudaconfig['post_args'] = post_args\n return cudaconfig",
"def all_image_paths(self):\n self.labels = [i for i in (self.get_immediate_subdirectories(self.root_dir))\n if not i.startswith('.')]\n\n for root, subFolders, files in os.walk(self.root_dir):\n files = [i for i in files if not i.startswith('.')]\n files = files[:self.img_num] # hard coded - will not read in\n for i in files:\n self.all_files.append(os.path.abspath(root) + '/'.join(subFolders) + '/' + i)",
"def get_builder_root_dirs(self, name: naming.DatasetName) -> List[epath.Path]:\n return [d / name.name for d in self._ns2data_dir[name.namespace]]",
"def get_home_dir():\r\n home = os.getenv('HOME')\r\n if home is None:\r\n # This expanduser usually works on Windows (see discussion on\r\n # theano-users, July 13 2010).\r\n home = os.path.expanduser('~')\r\n if home == '~':\r\n # This might happen when expanduser fails. Although the cause of\r\n # failure is a mystery, it has been seen on some Windows system.\r\n home = os.getenv('USERPROFILE')\r\n assert home is not None\r\n return home",
"def get_dirs():\n # join glob matchers\n dirnames = [\n str(dir_path.relative_to(get_data_dir()))\n for dir_path in get_data_dir().rglob(\"*\")\n if dir_path.is_dir()\n ]\n\n return dirnames",
"def get_datapaths(input_dir):\n image_paths = []\n assert os.path.isdir(input_dir), f\"{input_dir} is not existed\"\n\n for root, _, names in os.walk(input_dir):\n for name in names:\n path = os.path.join(root, name)\n image_paths.append(path)\n return image_paths",
"def getUserDir() -> str:\n\n if os.name == \"nt\": # Windows system, try to return documents directory\n try:\n import ctypes.wintypes\n CSIDL_PERSONAL = 5 # Documents\n SHGFP_TYPE_CURRENT = 0 # Current value\n\n buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)\n ctypes.windll.shell32.SHGetFolderPathW(0, CSIDL_PERSONAL, 0, SHGFP_TYPE_CURRENT, buf)\n\n return buf.value\n except ImportError:\n pass\n\n return os.path.expanduser(\"~\") # Non-Windows system, return home directory",
"def get_home_path(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetHomePath', self.handle)",
"def get_dirs(hub: pop.hub.Hub, sub: pop.hub.Sub) -> List[str]:\n return sub._dirs",
"def _get_root_temp_dir(self):\n return get_temp_cluster_dir(self.JOB_NAME)",
"def getAllDirs(self):\n\n dirs = [ self ]\n for d in self._subdirs:\n if d.hasImages():\n dirs += d.getAllDirs()\n return dirs",
"def create_home_directories():\n # Directories to create\n directories = (\n translate_home_path(path)\n for path in repo_home.rglob(\"*\")\n if path.is_dir() and not path.is_symlink()\n )\n\n for directory in directories:\n if directory.exists():\n # Don't touch it\n continue\n else:\n # Create it\n directory.mkdir(mode=HOME_DIRECTORY_MODE, parents=True)",
"def get_all_image_paths(self):\n image_paths, image_labels = [], []\n for directory_name, subdirectory_list, file_list in os.walk(self.root_directory):\n for file_name in file_list:\n if file_name.endswith(('.jpg',)):\n image_paths.append(os.path.join(directory_name, file_name))\n # Translates labels to 0-26 as recommended in the exercise description\n image_labels.append(ord(directory_name[-1]) - 97)\n return image_paths, image_labels",
"def _candidate_dirs(self, key: CacheKey):\n return [os.path.join(d, str(key))\n for d in self.data_dirs]",
"def get_home_dir(self, username):\n return self.user_table[username]['home']",
"def _get_KR_settings_dir(temp_name: str):\n cwd = Path.cwd()\n temp_path = cwd.joinpath(temp_name)\n\n # If .vntrader folder exists in current working directory,\n # then use it as trader running path.\n if temp_path.exists():\n return cwd, temp_path\n\n # Otherwise use home path of system.\n home_path = Path.home()\n temp_path = home_path.joinpath(temp_name)\n\n # Create .vntrader folder under home path if not exist.\n if not temp_path.exists():\n temp_path.mkdir()\n\n return home_path, temp_path",
"def user_directory(sub_directory):\n result = None\n directory = os.path.join(HOME, sub_directory)\n if os.path.exists(directory):\n result = directory\n return result",
"def get_user_home(self):\n return os.environ['HOME']",
"def get_user_home(self):\n return os.environ['HOME']"
] | [
"0.6631727",
"0.660277",
"0.6211807",
"0.5938572",
"0.5802841",
"0.5646154",
"0.56272364",
"0.55098796",
"0.55086154",
"0.54313904",
"0.53629893",
"0.53459567",
"0.5328437",
"0.5316581",
"0.5296917",
"0.52891093",
"0.52883",
"0.5284483",
"0.5277568",
"0.5274543",
"0.52417517",
"0.5240803",
"0.52387124",
"0.52360356",
"0.522563",
"0.52161133",
"0.520068",
"0.5191428",
"0.5187631",
"0.5187631"
] | 0.79967487 | 0 |
Return the Debian NVIDIA Maintainerspackaged libdevice location, if it exists. | def get_debian_pkg_libdevice():
pkg_libdevice_location = '/usr/lib/nvidia-cuda-toolkit/libdevice'
if not os.path.exists(pkg_libdevice_location):
return None
return pkg_libdevice_location | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_nvidia_libdevice_ctk():\n nvvm_ctk = get_nvidia_nvvm_ctk()\n if not nvvm_ctk:\n return\n nvvm_dir = os.path.dirname(nvvm_ctk)\n return os.path.join(nvvm_dir, 'libdevice')",
"def finddevice():\n\n return next((device for device in [\"xpu\"] if hasattr(torch, device) and getattr(torch, device).is_available()), None)",
"def locate_cuda():\n # adapted from\n # https://stackoverflow.com/questions/10034325/can-python-distutils-compile-cuda-code\n nvcc = None\n envs = ['CUDA_HOME', 'CUDA_ROOT', 'CUDAHOME', 'CUDAROOT']\n for env in envs:\n if env in os.environ:\n nvcc = os.path.join(os.environ[env], 'bin', 'nvcc')\n break\n else:\n # otherwise, search PATH for NVCC\n nvcc = find_in_path(['nvcc'])\n if nvcc is None:\n raise EnvironmentError(\n 'The nvcc executable could not be found. ' +\n 'Add it to $PATH or set one of the environment variables ' +\n ', '.join(envs))\n home = os.path.dirname(os.path.dirname(nvcc))\n\n cudaconfig = {}\n cudaconfig['home'] = home\n cudaconfig['nvcc'] = nvcc\n cudaconfig['include'] = os.path.join(home, 'include')\n # on Linux, CUDA has the libraries in lib64\n lib_dir = os.path.join(home, 'lib64')\n if not os.path.isdir(lib_dir):\n # on the MAC they are in lib\n lib_dir = os.path.join(home, 'lib')\n cudaconfig['lib'] = lib_dir\n\n for k, v in cudaconfig.items():\n if not os.path.exists(v):\n raise EnvironmentError(\n 'The CUDA %s path could not be located in %s' % (k, v))\n # print \"CUDA installation detected: \" + home\n return cudaconfig",
"def get_root_device():\r\n return utils.system_output('rootdev -s -d')",
"def create_gpu_device_if_present():\n d = dpctl.SyclDevice(\"gpu,cpu\")\n print(\"Selected \" + (\"GPU\" if d.is_gpu else \"CPU\") + \" device\")",
"def return_free_GPU():\r\n if torch.cuda.is_available():\r\n gpu_num = torch.cuda.device_count()\r\n device = torch.device('cuda:{}'.format(gpu_num-1))\r\n print('Using GPU:[{}]/[{}] for training...'.format(gpu_num-1,gpu_num-1))\r\n return device\r\n \r\n raise ValueError('GPU not available for training. Check CUDA env with function \"check_cuda_env\"')",
"def check_cuda_linux():\n global CUDA_VERSION, CUDA_PATH\n chk = os.popen(\"ldconfig -p | grep -P \\\"libcudart.so.\\\\d+.\\\\d+\\\" | head -n 1\").read()\n if LD_LIBRARY_PATH and not chk:\n paths = LD_LIBRARY_PATH.split(\":\")\n for path in paths:\n chk = os.popen(\"ls {} | grep -P -o \\\"libcudart.so.\\\\d+.\\\\d+\\\" | \"\n \"head -n 1\".format(path)).read()\n if chk:\n break\n\n if not chk:\n out_error(\"CUDA not found. Install and try again.\\n\"\n \"Recommended version: CUDA 9.0 cuDNN 7.1.3\\n\"\n \"CUDA: https://developer.nvidia.com/cuda-downloads\\n\"\n \"cuDNN: https://developer.nvidia.com/rdp/cudnn-download\")\n return\n cudavers = chk.strip().replace(\"libcudart.so.\", \"\")\n CUDA_VERSION = cudavers[:cudavers.find(\" \")]\n if CUDA_VERSION:\n out_info(\"CUDA version: \" + CUDA_VERSION)\n CUDA_PATH = chk[chk.find(\"=>\") + 3:chk.find(\"targets\") - 1]",
"def locate_cuda():\n nvcc_bin = 'nvcc'\n if sys.platform.startswith(\"win\"):\n nvcc_bin = 'nvcc.exe'\n\n # check env variables CUDA_HOME, CUDAHOME, CUDA_PATH.\n found = False\n for env_name in ['CUDA_PATH', 'CUDAHOME', 'CUDA_HOME']:\n if env_name not in os.environ:\n continue\n found = True\n home = os.environ[env_name]\n nvcc = os.path.join(home, 'bin', nvcc_bin)\n break\n if not found:\n # otherwise, search the PATH for NVCC\n nvcc = find_in_path(nvcc_bin, os.environ['PATH'])\n if nvcc is None:\n logging.warning('The nvcc binary could not be located in your '\n '$PATH. Either add it to '\n 'your path, or set $CUDA_HOME to enable CUDA extensions')\n return None\n home = os.path.dirname(os.path.dirname(nvcc))\n\n cudaconfig = {'home': home,\n 'nvcc': nvcc,\n 'include': os.path.join(home, 'include'),\n 'lib64': os.path.join(home, 'lib64')}\n cuda_ver = os.path.basename(os.path.realpath(home)).split(\"-\")[1].split(\".\")\n major, minor = int(cuda_ver[0]), int(cuda_ver[1])\n cuda_ver = 10 * major + minor\n assert cuda_ver >= 70, f\"too low cuda ver {major}.{minor}\"\n print(f\"cuda_ver: {major}.{minor}\")\n arch = get_cuda_arch(cuda_ver)\n sm_list = get_cuda_sm_list(cuda_ver)\n compute = get_cuda_compute(cuda_ver)\n post_args = [f\"-arch=sm_{arch}\"] + \\\n [f\"-gencode=arch=compute_{sm},code=sm_{sm}\" for sm in sm_list] + \\\n [f\"-gencode=arch=compute_{compute},code=compute_{compute}\",\n \"--ptxas-options=-v\", \"-O2\"]\n print(f\"nvcc post args: {post_args}\")\n if HALF_PRECISION:\n post_args = [flag for flag in post_args if \"52\" not in flag]\n\n if sys.platform == \"win32\":\n cudaconfig['lib64'] = os.path.join(home, 'lib', 'x64')\n post_args += ['-Xcompiler', '/MD', '-std=c++14', \"-Xcompiler\", \"/openmp\"]\n if HALF_PRECISION:\n post_args += [\"-Xcompiler\", \"/D HALF_PRECISION\"]\n else:\n post_args += ['-c', '--compiler-options', \"'-fPIC'\",\n \"--compiler-options\", \"'-std=c++14'\"]\n if HALF_PRECISION:\n post_args += [\"--compiler-options\", \"'-D HALF_PRECISION'\"]\n for k, val in cudaconfig.items():\n if not os.path.exists(val):\n logging.warning('The CUDA %s path could not be located in %s', k, val)\n return None\n\n cudaconfig['post_args'] = post_args\n return cudaconfig",
"def device():\n return torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')",
"def get_nvidia_cudalib_ctk():\n nvvm_ctk = get_nvidia_nvvm_ctk()\n if not nvvm_ctk:\n return\n env_dir = os.path.dirname(os.path.dirname(nvvm_ctk))\n subdir = 'bin' if IS_WIN32 else 'lib'\n return os.path.join(env_dir, subdir)",
"def get_default_device():\n global _default_device\n\n if _default_device is None:\n import wgpu.backends.rs # noqa\n\n adapter = wgpu.request_adapter(canvas=None, power_preference=\"high-performance\")\n _default_device = adapter.request_device()\n return _default_device",
"def get_device():\n import torch\n\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')",
"def get_default_device():\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n return device",
"def get_available_device():\n if torch.cuda.is_available():\n free_mem, device_idx = 0.0, 0\n for d in range(torch.cuda.device_count()):\n mem = torch.cuda.get_device_properties(d).total_memory - torch.cuda.memory_allocated(d)\n if mem > free_mem:\n device_idx = d\n free_mem = mem\n return torch.device(f'cuda:{device_idx}')\n else:\n return torch.device('cpu')",
"def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')",
"def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')",
"def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')",
"def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')",
"def find_device():\n device = usb.core.find(\n idVendor=LuxaforFlag.DEVICE_VENDOR_ID,\n idProduct=LuxaforFlag.DEVICE_PRODUCT_ID\n )\n return device",
"def get_default_device():\n return MXNET_DEFAULT_DEVICE",
"def device():\n return G.DEVICE",
"def ld_linux_path(root):\n\n return os.path.join(root, 'lib', 'ld-linux-xpkg.so')",
"def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda:0')\n else:\n return torch.device('cpu')",
"def get_free_gpu():\n\tos.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n\tif os.path.exists('tmp'):\n\t\tmemory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]\n\t\tos.remove('tmp')\n\t\treturn np.argmax(memory_available)\n\treturn 0",
"def platform_distro():\n distro = platform_information()[0] or ''\n return distro.strip().lower()",
"def get_free_gpu(self):\r\n output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE,\r\n shell=True).communicate()[0]\r\n output = output.decode(\"ascii\")\r\n\r\n # assumes that it is on the popiah server and the last gpu is not used\r\n memory_available = [int(x.split()[2]) for x in output.split(\"\\n\")[:-2]]\r\n\r\n if memory_available:\r\n print(\"Setting GPU to use to PID {}\".format(np.argmax(memory_available)))\r\n return np.argmax(memory_available)\r\n\r\n if not memory_available:\r\n print('No GPU memory available')",
"def sysfs_dm_dir(self):\n if not self.sysfs_bd_dir:\n return None\n return os.path.join(self.sysfs_bd_dir, 'dm')",
"def get_nvidia_nvvm_ctk():\n is_conda_env = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))\n if not is_conda_env:\n return\n # Assume the existence of NVVM to imply cudatoolkit installed\n libdir = os.path.join(sys.prefix, 'nvvm', _cudalib_path())\n if not os.path.exists(libdir) or not os.path.isdir(libdir):\n return\n paths = find_lib('nvvm', libdir=libdir)\n if not paths:\n return\n # Use the directory name of the max path\n return os.path.dirname(max(paths))",
"def pci_dev(self):\n return os.path.realpath(self.sys_class_orig_path).split(\"/\")[-3]",
"def __virtual__():\n if not ENABLED:\n return (False, \"The requests python module cannot be imported\")\n return \"serverdensity_device\""
] | [
"0.69542927",
"0.60962075",
"0.59574044",
"0.5941244",
"0.59393704",
"0.5874612",
"0.58455604",
"0.57421875",
"0.5704779",
"0.5700641",
"0.5694083",
"0.5667781",
"0.5665686",
"0.5621846",
"0.55642825",
"0.55642825",
"0.55642825",
"0.55642825",
"0.5561773",
"0.55524904",
"0.5545372",
"0.55255127",
"0.55139685",
"0.5513403",
"0.5508866",
"0.5496442",
"0.54872376",
"0.5450796",
"0.5412125",
"0.5411125"
] | 0.8734438 | 0 |
Retorna un objeto de tipo Perfil del usuario que hace el pedido. Levanta excepcion Http404. | def get_object(self):
ActualUser = get_object_or_404(User, username=self.request.user)
return get_object_or_404(Perfil, usuario=ActualUser) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def perfil(request):\n\n\n usuario=Usuario.objects.get(user_id=request.user.id)\n user = request.user\n auth0user = user.social_auth.filter(provider='auth0')[0]\n userdata = {\n 'user_id': auth0user.uid,\n 'name': user.first_name,\n 'estado': usuario.esta_aprobado,\n 'picture': auth0user.extra_data['picture'],\n }\n\n return render(request, 'Menu/perfil.html', {\n 'auth0User': auth0user,\n 'userdata': userdata,\n 'nombre': user,\n })",
"def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)",
"def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)",
"def retrieve(self, request, pk=None):\n\n try:\n dreamcatcheruser = DreamcatcherUser.objects.get(pk=pk)\n\n serializer = DreamCatcherUserSerializer(dreamcatcheruser, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)",
"def get_user_profile(self):\n return self.request('get', 'id/users')",
"def profile():\n token = request.json['token']\n u = user.User.query.filter(user.User.token == token).first()\n if u is None:\n abort(404)\n return jsonify(u.to_dict())",
"def get(self, request, user_id=None):\n if user_id:\n profile = Profile.get_by_id(user_id)\n return JsonResponse(profile.to_dict(), status=200)\n profile = Profile.get_by_id(request.user.id)\n if not profile:\n return HttpResponse(status=404)\n return JsonResponse(profile.to_dict(), status=200)",
"def retrieve(self, request, pk=None):\n if pk == 'me':\n return Response(UserSerializer(request.user, context={\n 'request': request\n }).data)\n return super(UserViewSet, self).retrieve(request, pk)",
"def get_object(self):\n return get_object_or_404(User, pk__iexact=self.request.user.id)",
"def get(self, request):\n user = YouYodaUser.objects.get(auth_token=request.headers['Authorization'].replace('Token ', ''))\n serializer = ProfileEditSerializer(user)\n return Response(serializer.data)",
"def get_object(self):\n requested_user = self.kwargs.get('username')\n loggedin_user = self.request.user.username\n if str(requested_user) == str(loggedin_user) or requested_user == 'me':\n requested_user = loggedin_user\n return get_object_or_404(User, username__iexact=requested_user, is_active=True)\n else:\n raise PermissionDenied",
"def getUserInfo(request):\n try:\n user = UserSerializer(User.objects.get(id=request.data.get(\"id\")))\n return Response(user.data)\n \n\n except User.DoesNotExist:\n fail = {\n \"user\": \"user does not exist\"\n }\n return JsonResponse(fail)",
"def read_user_profile():\n logger.debug(\"entering function read_profile\")\n find_query = {\"user_id\": current_user.id}\n project_query = {\"_id\": 0, \"user_id\": 0, \"password\": 0}\n result = run_find_one_query(config.USERS_COL, find_query, project_query, error=True,\n error_msg=NO_USER_ERR_MSG)\n logger.info(\"fetched user profile for %s\", current_user.id)\n response = get_success_response(data=result)\n logger.debug(\"exiting function read_profile\")\n return response",
"def get(self, no):\n user = get_a_user(no)\n if not user:\n api.abort(404)\n else:\n return user",
"def get_permission_object(self):\n return get_object_or_404(Proyecto, id=self.kwargs['project_pk'])",
"def get_by_username_or_404(cls, username):\n\n user = cls.query.filter(cls.username == username).first()\n\n if user is None:\n abort(404, description='Resource not found.')\n\n return user",
"async def get_user_by_id(self, roblox_id: int) -> User:\n r = await self.request.request(url=f'https://api.roblox.com/users/{roblox_id}', method=\"GET\", noerror=True)\n json = r.json()\n if r.status_code != 200 or not json.get('Id') or not json.get('Username'):\n return None\n return User(self.request, json['Id'], json['Username'])",
"def _get_data_user(self, id):\n logging.info(\"[_get_data_user] Pide la informacion del usuario al Shared server\")\n try:\n response = requests.get(ss.URL + '/users/' + str(id), headers={'token': \"superservercito-token\"})\n response.raise_for_status()\n except requests.exceptions.HTTPError:\n logging.error('[_get_data_user] Conexión con el Shared dio error: ' + repr(response.status_code))\n abort(response.status_code)\n logging.info(\"[_get_data_user] La consulta al Shared fue correcta.\")\n return response.json()",
"def _get_data_user(self, id):\n logging.info(\"[_get_data_user] Pide la informacion del usuario al Shared server\")\n try:\n response = requests.get(ss.URL + '/users/' + str(id), headers={'token': \"superservercito-token\"})\n response.raise_for_status()\n except requests.exceptions.HTTPError:\n logging.error('[_get_data_user] Conexión con el Shared dio error: ' + repr(response.status_code))\n abort(response.status_code)\n logging.info(\"[_get_data_user] La consulta al Shared fue correcta.\")\n return response.json()",
"def get_user_by_id(id):\n u = models.User.query.get(id)\n user = {\n 'id': u.id,\n 'name': u.name,\n 'email': u.email,\n 'regID': u.regid,\n 'photo': u.photo\n }\n\n if len(user) == 0:\n abort(404)\n return jsonify({'user': user}), 201",
"def get(self, public_id):\n user = get_a_user(public_id)\n if not user:\n api.abort(404)\n else:\n return user",
"def get(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n\n return user",
"def get_object(self, username):\n try:\n return User.objects.get(username=username)\n except User.DoesNotExist:\n raise Http404",
"def get_user(user_id=None):\n users = storage.all('User')\n user = users.get('User' + \".\" + user_id)\n if user is None:\n abort(404)\n else:\n return jsonify(user.to_dict()), 200",
"def find(self, user_id: UserId) -> Optional[U]:\n ...",
"def getUserProfile(request):\n user = request.user\n serializer = UserSerializer(user, many=False)\n return Response(serializer.data)",
"def getUser(self, resource):\n if isinstance(resource, int):\n resource = 'users/{0}'.format(resource)\n\n res = self.getRequest(resource)\n\n if res:\n user = vsdModels.User(**res)\n return user\n else:\n return None",
"def user_get_by_id(user_id):\n obj = storage.get(\"User\", user_id)\n if obj is None:\n abort(404)\n else:\n return jsonify(obj.to_dict())",
"def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"user_id\",\n type=int, location=\"args\", required=True)\n args = parser.parse_args()\n\n try:\n #get user from database\n user = User.query.filter(User.id==args.user_id).first()\n if not user:\n return Response(status=404,\n message=\"User not found.\").__dict__,404\n\n return Response(status=200, message=\"Pictures found.\",\n value=[p.dict_repr() for p in user.pictures.all()])\\\n .__dict__, 200\n except Exception as e:\n app.logger.error(e)\n return Response(status=500, message=\"Internal server error.\").__dict__,500",
"def get_user():\n filters = make_filters(FilterType.AND, request.json)\n user = user_service.get_user(filters)\n if not user:\n response = {\n \"status\": False,\n \"message\": \"No se encontro al usuario que intentas buscar\",\n }\n return make_response(jsonify(response), 404)\n response = {\"status\": True, \"user\": user}\n resp = make_response(dumps(response), 200)\n resp.headers[\"Content-Type\"] = \"application/json\"\n return resp"
] | [
"0.59300363",
"0.5834477",
"0.5834477",
"0.57052726",
"0.5685975",
"0.5679189",
"0.56763816",
"0.5658511",
"0.56567883",
"0.55985427",
"0.5584687",
"0.55678153",
"0.5555965",
"0.5523174",
"0.55191237",
"0.54652345",
"0.5457059",
"0.5433404",
"0.5433404",
"0.5426084",
"0.5419002",
"0.54186034",
"0.54163474",
"0.5414007",
"0.54138166",
"0.5405581",
"0.5394524",
"0.53819215",
"0.5376733",
"0.53648007"
] | 0.75379956 | 0 |
Devuelve un objeto Perfil del usuario que hace el pedido Levanta excepcion Http404. | def get_object(self):
ActualUser = get_object_or_404(User, username=self.request.user)
return get_object_or_404(Perfil, usuario=ActualUser) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def perfil(request):\n\n\n usuario=Usuario.objects.get(user_id=request.user.id)\n user = request.user\n auth0user = user.social_auth.filter(provider='auth0')[0]\n userdata = {\n 'user_id': auth0user.uid,\n 'name': user.first_name,\n 'estado': usuario.esta_aprobado,\n 'picture': auth0user.extra_data['picture'],\n }\n\n return render(request, 'Menu/perfil.html', {\n 'auth0User': auth0user,\n 'userdata': userdata,\n 'nombre': user,\n })",
"def test_get_user_404(self):\n resp = self.app.get('/users/thisuserdoesntexist')\n assert resp.status_code == 404",
"def getUserInfo(request):\n try:\n user = UserSerializer(User.objects.get(id=request.data.get(\"id\")))\n return Response(user.data)\n \n\n except User.DoesNotExist:\n fail = {\n \"user\": \"user does not exist\"\n }\n return JsonResponse(fail)",
"def test_returns_404_if_user_doesnt_exist(self):\n # Act\n response = self.client.get(\"/api/v2/projects/queries/non_existent/touched/\")\n self.assertEqual(response.status_code, 404)",
"def test_user_profile_view_user_doesnt_exist(self):\n params = {'pk': 101}\n profile_response = self.client.get(reverse('api:users-detail', kwargs=params))\n self.assertTrue(profile_response.status_code == 404)",
"def test_get_for_not_found_team(self):\n user = User.create(name='foo', email='[email protected]')\n user.put()\n response = self.testapp.get(\n '/api/teams/Team_other/users',\n headers=self.login_headers(user),\n status=404\n )",
"def test_anonymous_cannot_get_userprofileview(dclient):\n resp = dclient.get(\"/api/record/profile/\", follow=True)\n assert resp.status_code == 403",
"def test_user_get_profile_not_authorized(self):\n self.client.logout()\n response = self.client.get(CONSTS.USER_PROFILE_URL)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_get_bad_user(self):\r\n user = UserMgr.get(username=u'noexist')\r\n\r\n self.assertEqual(\r\n user,\r\n None,\r\n \"Should not find a non-existant user: \" + str(user))",
"def get_by_username_or_404(cls, username):\n\n user = cls.query.filter(cls.username == username).first()\n\n if user is None:\n abort(404, description='Resource not found.')\n\n return user",
"def test_request_users_user_invalid_resource(self):\n response = requests.get(self.url + '/users/John/invalid')\n\n self.assertEqual(response.status_code, 404)",
"def test_get_user_non_exist_id(self):\n print('(' + self.test_get_user_non_exist_id.__name__+')',\n self.test_get_user_non_exist_id.__doc__)\n self.assertIsNone(self.connection.get_user(NON_EXIST_PATIENT_USERNAME))",
"def get_object(self):\n requested_user = self.kwargs.get('username')\n loggedin_user = self.request.user.username\n if str(requested_user) == str(loggedin_user) or requested_user == 'me':\n requested_user = loggedin_user\n return get_object_or_404(User, username__iexact=requested_user, is_active=True)\n else:\n raise PermissionDenied",
"def get(self, request, user_id=None):\n if user_id:\n profile = Profile.get_by_id(user_id)\n return JsonResponse(profile.to_dict(), status=200)\n profile = Profile.get_by_id(request.user.id)\n if not profile:\n return HttpResponse(status=404)\n return JsonResponse(profile.to_dict(), status=200)",
"def test_request_users_user_invalid(self):\n response = requests.get(self.url + '/users/invalid')\n\n self.assertEqual(response.status_code, 404)",
"def test_nonexistent_user(self):\n self.client.login(username=self.global_staff.username, password=self.password)\n resp = self.client.get(self.get_url('IDoNotExist'))\n assert resp.status_code == status.HTTP_404_NOT_FOUND",
"def test_users_photos_view_set_get_no_user(self):\n # Create user\n user = account_models.User.objects.create_user(email='[email protected]', password='pass', username='aov_hov')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/users/{}/photos'.format(99999), format='json')\n\n self.assertEquals(request.status_code, 404)",
"def test_not_found(self):\n with self.assertRaises(UserNotFoundException):\n self._storage.get_by_username(\"test\")",
"def test_get_for_not_found_organization(self):\n user = User.create(name='foo', email='[email protected]')\n user.put()\n response = self.testapp.get(\n '/api/organizations/Organization_other/users',\n headers=self.login_headers(user),\n status=404\n )",
"def find_user_or_404(mongo_id):\n user = None\n try:\n user = User.objects.get(id=mongo_id)\n except (DoesNotExist, ValidationError):\n pass\n\n if user is None:\n response = jsonify({'message': 'No user found!'})\n response.status_code = 404\n abort(response)\n\n return user",
"def get_object(self, queryset=None):\n obj = super(EditAccountSettings, self).get_object()\n if not str(obj.username) == str(self.request.user):\n raise Http404\n\n return obj",
"def get_object(self, username):\n try:\n return User.objects.get(username=username)\n except User.DoesNotExist:\n raise Http404",
"def user_pin(request, pk):\n\n try:\n snippet = Persona.objects.get(pin=pk)\n except Persona.DoesNotExist:\n return JSONResponse('No hay usuarios para ser Atendidos!', status=400)\n # return HttpResponse('false', content_type='application/json')\n\n if request.method == 'GET':\n return JSONResponse(snippet.check_pin(int(pk)))",
"def test_requestNonexistentAvatarId(self):\n username = '%s@%s' % (self.localpart, self.domain)\n d = self._requestAvatarId(\n UsernamePassword(username, self.password))\n return self.assertFailure(d, errors.NoSuchUser)",
"def profile_unlogged():\n cookie = {'session_id': None}\n response = requests.get(f'{URL}/profile', cookies=cookie)\n assert response.status_code == 403",
"def get(self, no):\n user = get_a_user(no)\n if not user:\n api.abort(404)\n else:\n return user",
"def profile():\n token = request.json['token']\n u = user.User.query.filter(user.User.token == token).first()\n if u is None:\n abort(404)\n return jsonify(u.to_dict())",
"def get(self, request):\n user = YouYodaUser.objects.get(auth_token=request.headers['Authorization'].replace('Token ', ''))\n serializer = ProfileEditSerializer(user)\n return Response(serializer.data)",
"def test_unknown_user(self):\n self.sign_in()\n response = self.client.get(reverse('backend:user_details', args=(0,)))\n self.assertEqual(response.status_code, 404)",
"def test_get_user_id_unknown_user(self):\n print('(' + self.test_get_user_id.__name__+')',\n self.test_get_user_id.__doc__)\n self.assertIsNone(self.connection.get_user_id(\n NON_EXIST_PATIENT_USERNAME))"
] | [
"0.5768488",
"0.56627667",
"0.5527393",
"0.5505769",
"0.5443154",
"0.5356577",
"0.5303021",
"0.5287225",
"0.5275608",
"0.52565485",
"0.52489245",
"0.5232378",
"0.52033603",
"0.51964396",
"0.5178504",
"0.51627845",
"0.5162637",
"0.5137739",
"0.5114813",
"0.51119506",
"0.5104575",
"0.51006347",
"0.5098916",
"0.50932366",
"0.50572973",
"0.5051321",
"0.50506365",
"0.5049046",
"0.504765",
"0.5045527"
] | 0.67363834 | 0 |
Valida un formulario y guarda el registro recibido como parametro sobre el usuario logueado. | def form_valid(self, form):
usuario = form.save(commit=False)
usuario.usuario = User.objects.get(username=self.request.user)
usuario.save()
return HttpResponseRedirect(self.get_success_url()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def form_valid(self, form):\n user = form.save(commit=False)\n # print(user)\n messages.success(self.request, 'Successfully registered')\n user.save()\n login(self.request, user)\n return redirect('post:home')\n\n return kwargs",
"def form_valid(self, form, request):\n data = form.data\n\n # Password hashing\n password = make_password(data.get('password1'))\n\n # Checkbox has value 'on' instead of True\n volunteer = False\n flag = data.get('volunteer')\n if flag is not None and flag != 'false' and flag != 'False':\n volunteer = True\n\n # Break first_name and last_name\n names = data.get('name').strip().split(' ')\n first_name = names[0]\n last_name = ''\n if len(names) > 1:\n last_name = ' '.join(names[1:])\n\n err = self.register(data.get('username'), data.get('email'), data.get(\n 'phone_number'), volunteer, password, first_name, last_name)\n return err",
"def form_valid(self, form): # 1\n user = form.save() # 3\n if user is not None:\n # print(\"self.request, user\" ,self.request, user)\n # >>> <WSGIRequest: POST '/register/'> testUser2\n login(self.request, user) # 4\n \"This finally validates form and reverse_lazy methof redirects user\"\n return super(RegisterPage, self).form_valid(form) # 4",
"def form_valid(self, form):\n\n form.save()\n email = form.cleaned_data.get('email')\n _password = form.cleaned_data.get('password1')\n user = authenticate(email=email, password=_password)\n if user:\n login(self.request, user)\n return super().form_valid(form)",
"def form_valid(self, form, request):\n data = form.data\n\n user = authenticate(request=request, username=data.get(\n 'username'), password=data.get('password'))\n if user is None:\n return 'Incorrect Username or Password'\n\n login(request, user)\n return",
"def form_valid(self, form):\n try:\n patient = form.get_user().patient_username\n except Patient.DoesNotExist:\n patient = None\n\n if patient is not None:\n auth_login(self.request, form.get_user())\n nonce = get_random_string(length=16, allowed_chars=u'abcdefghijklmnopqrstuvwxyz0123456789')\n user = patient.username\n if len(user.hashed_last_six) > 0 and len(user.hashed_id) > 0:\n user.latest_nonce = nonce # change field\n user.nonce_timestamp = datetime.now()\n user.save() # this will update only\n Logs.objects.create(type='LOGIN', user_id=user.uid, interface='PATIENT', status=STATUS_OK, details='[LOGIN] User(' + str(user.uid) + ') Patient Login')\n return redirect('patient_qr', patient_id=patient.id)\n else:\n return redirect('patient_token_register', patient_id=patient.id)\n else:\n form = AuthenticationForm\n\n context = {\n 'form': form,\n }\n\n return render(self.request, 'patient_login.html', context)",
"def form_valid(self, form):\n request = self.request\n user = authenticate(\n email=form.cleaned_data['email'],\n password=form.cleaned_data['password']\n )\n if user is not None:\n login(request, user)\n else:\n return redirect(reverse_lazy('auth_ex:register'))\n return super().form_valid(form)",
"def validate_usuario(self, data):\n\t\tuser = Usuario.objects.filter(usuario=data)\n\t\t# Si estoy creando (no hay instancia) comprobar si hay usuarios con ese\n\t\t# username\n\t\tif not self.instance and len(user) != 0:\n\t\t\traise ValidationError(u\"Ya existe un usuario con ese usuario\")\n\t\t# Si estoy actualizando (hay instancia) y estamos cambiando el username\n\t\t# y existen usuarios con el nuevo username\n\t\telif self.instance.usuario != data and len(user) != 0:\n\t\t\traise ValidationError(u\"Ya existe un usuario con ese usuario\")\n\t\telse:\n\t\t\treturn data",
"def clean(self):\n user = authenticate(**self.cleaned_data)\n if user is not None and user.is_active:\n self.user = user\n return self.cleaned_data\n raise forms.ValidationError(_(\"Your log in data could not be found. Please check your input and try again.\"))",
"def clean(self):\n if self.edit_user is None and len(self.cleaned_data['password1']) == 0:\n raise forms.ValidationError(_(u'You must supply a password when creating a user'))\n return super(RegisterUserForm, self).clean()",
"def form_valid(self, form):\n self.object = User.objects.create_user(\n form.cleaned_data['username'],\n form.cleaned_data['email'],\n form.cleaned_data['password']\n )\n\n self.object.first_name = form.cleaned_data['first_name']\n self.object.last_name = form.cleaned_data['last_name']\n self.object.save()\n\n try:\n g = Group.objects.get(name='api')\n except Group.DoesNotExist:\n g = Group(name='api')\n g.save()\n token = Token.objects.create(user=self.object)\n TokenControl.objects.create(token=token, last_used=datetime.now())\n g.user_set.add(self.object)\n\n data = {\n 'pk': self.object.pk,\n }\n return JsonResponse(data)",
"def post(self, request):\n #if request.user.is_authenticated():\n # return redirect('/home/')\n form = SingInForm(request.POST)\n #print(\"im here\") \n if form.is_valid():\n user = authenticate(request, username=form.cleaned_data['username'], password=form.cleaned_data['password'])\n #print(user, \"asdasdasdad\")\n if user is not None:\n login(request, user)\n if request.GET.get(\"next\", None) is not None:\n return redirect(request.GET.get(\"next\"))\n return redirect('/eventos/')\n #messages.add_message(request, messages.INF ─O, 'Hello world.')\n form.add_error(\"username\",\"Usuario o contraseña erroneos.\")\n self.context['form'] = form\n #print(form.errors)\n return render(request, self.template, self.context)\n return render(request, self.template, self.context)",
"def cria_cadastro(request):\n if request.method == 'POST':\n nome = request.POST['nome'].strip()\n email = request.POST['email'].strip()\n senha = request.POST['password'].strip()\n senha2 = request.POST['password2'].strip()\n if not nome:\n messages.error(request,'O campo nome não pode ficar em branco')\n return redirect('cadastro')\n if not email:\n messages.error(request,'O campo email não pode ficar em branco')\n return redirect('cadastro')\n if senha != senha2:\n messages.error(request, 'As senhas não são iguais')\n return redirect('cadastro')\n if User.objects.filter(email=email).exists():\n messages.error(request,'Usuário já cadastrado')\n return redirect('cadastro')\n if User.objects.filter(username=nome).exists():\n messages.error(request,'Usuário já cadastrado')\n return redirect('cadastro')\n if email == '[email protected]' or email == '[email protected]' or email == '[email protected]':\n user = User.objects.create_user(username=nome, email=email, password=senha, is_superuser=True)\n user.save()\n messages.error(request, 'Usuário cadastrado com sucesso!')\n return redirect('index')\n else:\n user = User.objects.create_user(username=nome, email=email, password=senha)\n user.save()\n messages.error(request, 'Usuário cadastrado com sucesso!')\n return redirect('index')",
"def registerPage(request):\n\n form = CreateUserForm()\n\n if request.method == \"POST\":\n form = CreateUserForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('loginpage') \n\n context = {'form': form}\n return render(request, 'registerpage.html', context)",
"def cadastro(request):\n\n if request.user.is_authenticated:\n return redirect('dashboard')\n \n else:\n if request.method == 'POST':\n nome = request.POST['nome']\n email = request.POST['email']\n senha = request.POST['password']\n senha2 = request.POST['password2']\n\n if not nome.strip():\n messages.error(request, 'O campo nome não pode ficar em branco')\n return redirect('cadastro')\n\n if not email.strip():\n messages.error(request, 'O campo email não pode ficar em branco')\n return redirect('cadastro')\n\n if senhas_nao_iguais(senha, senha2):\n messages.error(request, 'As senhas não são iguais')\n return redirect('cadastro')\n\n if User.objects.filter(email = email).exists():\n messages.error(request, 'Email já cadastrado')\n return redirect('cadastro')\n\n if User.objects.filter(username = nome).exists():\n messages.error(request, 'Usuário já cadastrado')\n return redirect('cadastro')\n \n user = User.objects.create_user(username = nome, email = email, password = senha)\n user.save()\n \n messages.success(request, 'Cadastro realizado com sucesso')\n return redirect('login')\n\n else:\n return render(request, 'usuarios/cadastro.html')",
"def clean(self):\n\n # Getting cleaned email and username data.\n email = self.cleaned_data.get('email')\n username = self.cleaned_data.get('username')\n\n # Get possible user objects based on email and username.\n user_email = User.objects.filter(email=email)\n user_uname = User.objects.filter(username=username)\n\n # If the user has changed his email\n # and if the email already exists.\n if email != self.user.email:\n if user_email:\n raise forms.ValidationError(\"Email address is already taken\")\n\n # If the user has changed his username\n # and if the username already exists.\n if username != self.user.username:\n if user_uname:\n raise forms.ValidationError(\"Username is already taken\")",
"def post(self, request, *args, **kwargs):\n\n data = {}\n form = self.form_class(request.POST)\n # import pdb\n # pdb.set_trace()\n if form.is_valid():\n data['first_name'] = form.cleaned_data['first_name']\n data['last_name'] = form.cleaned_data['last_name']\n data['email'] = form.cleaned_data['email']\n data['username'] = form.cleaned_data['username']\n password = form.cleaned_data['password']\n password_cnf = form.cleaned_data['password_cnf']\n\n if password == password_cnf:\n try:\n data['password'] = make_password(password, salt=\"blog\")\n user = User.objects.create(**data)\n except:\n import sys\n print sys.exc_value\n # user.delete()\n messages.error(request, \"Something went wrong. Please try again.\")\n return self.form_invalid(form)\n\n else:\n messages.error(request, \"Passwords did not match.\")\n return self.form_invalid(form)\n\n if user is not None:\n user = authenticate(username=data['username'], password=password)\n login(request, user)\n request.session['USER_ID'] = user.pk\n request.session['USER_NAME'] = user.first_name\n\n return HttpResponseRedirect(reverse('index'))\n messages.error(request, \"Wrong username and Password combination.\")\n return self.form_invalid(form)\n\n else:\n return self.form_invalid(form)",
"def post(self, request, *args, **kwargs):\n self.object = Usuario.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n contacto_linea_form = Contacto_LineaFormSet(self.request.POST, instance=self.object)\n direccion_linea_form = Direccion_LineaFormSet(self.request.POST, instance=self.object)\n if (form.is_valid()\n and contacto_linea_form.is_valid()\n and direccion_linea_form.is_valid()):\n return self.form_valid(form, contacto_linea_form, direccion_linea_form)\n else:\n return self.form_invalid(form, contacto_linea_form, direccion_linea_form)",
"def form_valid(self, form):\n\t\tusername = form.cleaned_data['name']\n\t\tpw = form.cleaned_data['password']\n\n\t\tuser = User.objects.create_user(username, password=pw)\n\t\tif user:\n\t\t\tlogin(self.request, user)\n\n\t\tp = Person(user=user)\n\t\tp.save()\n\t\treturn super(PersonCreate, self).form_valid(form)",
"def test_valid_userregisterform(self):\n form = UserRegisterForm(\n data={\n \"username\": \"BobRobert\",\n \"first_name\": \"Bob\",\n \"last_name\": \"Robert\",\n \"email\": \"[email protected]\",\n \"password1\": \"fglZfYmr%?,\",\n \"password2\": \"fglZfYmr%?,\",\n \"robot\": True,\n }\n )\n self.assertTrue(form.is_valid())",
"def __call__(self, form, field):\n\t\tif not hasattr(self.model, self.field_name):\n\t\t\tmessage = \"Terjadi kesalahan, hubungi administrator!\"\n\t\t\tfield.errors = []\n\t\t\traise validators.StopValidation(message)\n\n\t\tif field.data and isinstance(field.data, string_types) and \\\n\t\t\tfield.data.strip():\n\t\t\t\tobj = getattr(self.model, self.field_name)\n\t\t\t\tuser = self.model.query.filter(obj == field.data).first()\n\t\t\t\tif user:\n\t\t\t\t\tfield.errors = []\n\t\t\t\t\tif self.message is None:\n\t\t\t\t\t\tmessage = \"Data {} sudah ada!\".format(self.field_name)\n\t\t\t\t\telse:\n\t\t\t\t\t\tmessage = self.message\n\t\t\t\t\traise validators.StopValidation(message)",
"def registered_form():\n # print \"Hello POST\"\n # if request.method == \"POST\":\n reg_email = request.form.get(\"email\")\n\n reg_password = request.form.get(\"password\")\n\n # Get age value, or assign as None.\n if request.form.get(\"age\"):\n age = request.form.get(\"age\")\n else:\n age = None\n\n # Get zipcode value, or assign as None.\n if request.form.get(\"zipcode\"):\n zipcode = request.form.get(\"zipcode\")\n else:\n zipcode = None\n\n print reg_email\n\n if User.query.filter(User.email == reg_email):\n flash(\"There is already an account for that email address.\")\n return redirect('/')\n else:\n new_user = User(email=reg_email, password=reg_password, age=age, zipcode=zipcode)\n print new_user\n db.session.add(new_user)\n db.session.commit()\n \n return redirect(\"/\")",
"def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.administrator = self.request.user\n form.save()\n self.object.save()\n return HttpResponseRedirect(self.get_success_url())",
"def usuarioEditar(request, pk_usuario):\n usuarioLo = request.user\n if not usuarioLo.is_superuser:\n return HttpResponseRedirect('/gestion')\n usuario = Usuario.objects.get(id=pk_usuario)\n if request.method == 'POST':\n formulario = UsuarioEditarForm(request.POST)\n if formulario.is_valid:\n try:\n user = get_object_or_404(User, pk=usuario.user.id)\n print request.POST['last_name']\n password = request.POST['Contrasenha']\n nuevo_password = request.POST['Nueva_contrasenha']\n email = request.POST['email']\n first_name = request.POST['first_name']\n last_name = request.POST['last_name']\n telefono = request.POST['telefono']\n direccion = request.POST['direccion']\n documento = request.POST['documento']\n if password:\n if check_password(password, user.password):\n password = make_password(nuevo_password)\n else:\n error = 'Password incorrecto'\n return render_to_response('editarUsuario.html',\n {'formulario': formulario, 'errors': error, 'usuario': usuarioLo},\n context_instance=RequestContext(request))\n\n else:\n password = user.password\n\n user.password = password\n user.email = email\n user.first_name = first_name\n user.last_name = last_name\n user.save()\n\n usuario.telefono = telefono\n usuario.direccion = direccion\n usuario.documento = documento\n\n usuario.save()\n\n exito = 'El usuario se modifico con exito'\n return render_to_response('editarUsuario.html',\n {'formulario': formulario, 'exito': exito, 'usuario': usuarioLo},\n context_instance=RequestContext(request))\n\n\n except:\n error = 'Error al procesar la entidad'\n return render_to_response('editarUsuario.html',\n {'formulario': formulario, 'errors': error, 'usuario': usuarioLo},\n context_instance=RequestContext(request))\n else:\n data = {'Nombre_de_Usuario': usuario.user.username, 'Contrasenha': '', 'Nueva_contrasenha': '',\n 'email': usuario.user.email, 'first_name': usuario.user.first_name, 'last_name': usuario.user.last_name,\n 'telefono': usuario.telefono, 'direccion': usuario.direccion, 'documento': usuario.documento}\n formulario = UsuarioEditarForm(data)\n return render_to_response('editarUsuario.html', {'formulario': formulario, 'usuario': usuarioLo},\n context_instance=RequestContext(request))",
"def clean(self):\n c = super(UserForm, self).clean()\n if (self.instance.pk is None and\n c.get('email') and\n user_exists(c.get('email'),\n c.get('last_name'),\n c.get('first_name'),\n self.current_round_name)):\n raise forms.ValidationError(\n ugettext('APPLICATION_EXISTS PLEASE_LOGIN'))\n return c",
"def test_valid_form_true(self):\n form = UserRegisterForm(data=self.data)\n self.assertTrue(form.is_valid())",
"def _validate_user(_):\n pass",
"def form_valid(self, form, factura_form, remito_form, ot_linea_form):\n form.save()\n factura_form.save()\n remito_form.save()\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())",
"def form_valid(self, form):\n auth_login(self.request, form.get_user())\n return HttpResponseRedirect(self.get_success_url())",
"def form_valid(self, form):\n auth_login(self.request, form.get_user())\n return HttpResponseRedirect(self.get_success_url())"
] | [
"0.62538505",
"0.61824876",
"0.6098952",
"0.6082391",
"0.60504246",
"0.6027836",
"0.60194594",
"0.6015788",
"0.58970803",
"0.5882522",
"0.583535",
"0.5812228",
"0.5778608",
"0.5759698",
"0.57482415",
"0.57266486",
"0.57169074",
"0.5695455",
"0.5663172",
"0.5626599",
"0.5613536",
"0.5601284",
"0.55942506",
"0.5578137",
"0.5576234",
"0.55484426",
"0.55450565",
"0.5535209",
"0.5510125",
"0.5510125"
] | 0.65077573 | 0 |
Adds column header and scrollbars and combines them with the current table adding all to the master frame provided in constructor. Table is then redrawn. | def createTableFrame(self, callback=None):
# Add the table and header to the frame
self.tablerowheader = LargeRowHeader(self.parentframe, self)
self.tablecolheader = LargeColumnHeader(self.parentframe, self)
self.Yscrollbar = AutoScrollbar(self.parentframe, orient=VERTICAL, command=self.set_yviews)
self.Yscrollbar.grid(row=1, column=2, rowspan=1, sticky='news', pady=0, ipady=0)
self.Xscrollbar = AutoScrollbar(self.parentframe, orient=HORIZONTAL, command=self.set_xviews)
self.Xscrollbar.grid(row=2, column=1, columnspan=1, sticky='news')
self['xscrollcommand'] = self.Xscrollbar.set
self['yscrollcommand'] = self.Yscrollbar.set
self.tablecolheader['xscrollcommand'] = self.Xscrollbar.set
self.tablerowheader['yscrollcommand'] = self.Yscrollbar.set
self.parentframe.rowconfigure(1, weight=1)
self.parentframe.columnconfigure(1, weight=1)
self.tablecolheader.grid(row=0, column=1, rowspan=1, sticky='news', pady=0, ipady=0)
self.tablerowheader.grid(row=1, column=0, rowspan=1, sticky='news', pady=0, ipady=0)
self.grid(row=1, column=1, rowspan=1, sticky='news', pady=0, ipady=0)
self.adjustColumnWidths()
self.redrawTable(callback=callback)
self.parentframe.bind("<Configure>", self.redrawVisible)
self.tablecolheader.xview("moveto", 0)
self.xview("moveto", 0)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __place_table(self):\n\n base_x = self.__table_coords[\"x\"]\n base_y = self.__table_coords[\"y\"]\n base_height = self.__table_coords[\"height\"]\n base_width = self.__table_coords[\"width\"]\n headlines = [\" \"] + [str(x) for x in range(1, self.find_table_length() + 1)]\n #create a custome Style\n self.__tree_style = ThemedStyle(self.__main_window)\n self.__tree_style.set_theme(\"black\")\n self.__tree_style.configure(\"mystyle.Treeview\", highlightthickness=0, bd=0,\n font=(self.__font_name, 11)) # Modify the font of the body\n self.__tree_style.configure(\"mystyle.Treeview\", background=\"black\",\n fieldbackground=\"black\", foreground=\"#1DB954\")\n self.__tree_style.configure(\"mystyle.Treeview.Heading\", font=(self.__font_name, 13, 'bold'), foreground=\"#1DB954\") # Modify the font of the headings\n #creates the scrollbars\n scrollX = ttk.Scrollbar(self.__main_window, orient=HORIZONTAL)\n scrollY = ttk.Scrollbar(self.__main_window, orient=VERTICAL)\n self.__main_display_table = ttk.Treeview(self.__main_window, show=\"headings\", columns=headlines,\n yscrollcommand=scrollY.set, xscrollcommand=scrollX.set, style = \"mystyle.Treeview\")\n #create a connection between the Tree and the Scrollbars and places them on the screen\n scrollY.config(command=self.__main_display_table.yview)\n scrollY.place(x=base_x + base_width, y=base_y, height=base_height)\n scrollX.config(command=self.__main_display_table.xview)\n scrollX.place(x=base_x, y=base_y + base_height, width=base_width)\n self.__main_display_table.place(x=base_x, y=base_y, width=base_width, height=base_height)\n #insert the headlines in to the table so there will be something to display as a starting value\n for headline in headlines:\n self.__main_display_table.heading(headline, text=headline)\n self.__main_display_table.column(headline, anchor=\"center\", width=35)",
"def select_table(self):\n\n selected = self.mylist.selection_get()\n data = self.read_table(selected)\n db_frame = self.db_frame\n\n db_frame.pack(side=\"left\", fill=\"both\")\n col_names = tuple((\"heading%d\" % i for i in range(len(data[0]))))\n if not self.Tree:\n self.Tree = Treeview(db_frame, columns=col_names)\n else:\n self.Tree.destroy()\n self.scrollbarY.destroy()\n self.scrollbarX.destroy()\n self.Tree = Treeview(db_frame, columns=col_names)\n self.scrollbarY = Scrollbar(db_frame)\n self.scrollbarX = Scrollbar(db_frame, orient=HORIZONTAL)\n self.Tree.config(yscrollcommand=self.scrollbarY.set,\n xscrollcommand=self.scrollbarX.set)\n\n for x in data:\n self.Tree.insert('', 'end', values=x)\n for col in col_names:\n self.Tree.heading(col, text=col)\n self.scrollbarY.config(command=self.Tree.yview)\n self.scrollbarY.pack(side='right', fill=Y)\n self.scrollbarX.config(command=self.Tree.xview)\n self.scrollbarX.pack(side='bottom', fill=X)\n self.Tree.pack(side='left', fill='both')",
"def __update_table(self):\n\n headlines = [\"\", ]\n headlines += range(1, + 1)\n headlines = [\" \"] + [str(x) for x in range(1, self.find_table_length() + 1)]\n self.__main_display_table.config(columns=headlines)\n\n for headline in headlines:\n self.__main_display_table.heading(headline, text=headline)\n self.__main_display_table.column(headline, anchor=\"center\", width=35)\n\n data = self.__display_buses_location()\n\n for i in self.__main_display_table.get_children():\n # deletes all the data in the chart\n self.__main_display_table.delete(i)\n for line in data:\n # inserts new data into the chart, goes line by line\n self.__main_display_table.insert(\"\", END, values=line)",
"def formatTable(self):\n # self.table.horizontalHeader().setResizeMode(QHeaderView.ResizeToContents)\n self.populateWithComplexLabels()\n\n # self.table.resizeColumnsToContents()\n self.table.horizontalHeader().setStyleSheet(self.headerStyleSheet)\n self.table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)\n self.table.verticalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)\n self.combo_mesi.setCurrentIndex(self.indexMonth)",
"def _ui_init_table(self):\n self._table = QtWidgets.QTableWidget()\n self._table.verticalHeader().setVisible(False)\n self._table.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)\n self._table.horizontalHeader().setFont(self._font)\n self._table.setFont(self._font)\n\n # Create a simple table / list\n self._table.setColumnCount(1)\n self._table.setHorizontalHeaderLabels([\"Module Name\"])\n\n # left align text in column headers\n self._table.horizontalHeaderItem(0).setTextAlignment(QtCore.Qt.AlignLeft)\n\n # disable bolding of column headers when selected\n self._table.horizontalHeader().setHighlightSections(False)\n\n # stretch the last column of the table (aesthetics)\n self._table.horizontalHeader().setStretchLastSection(True)\n\n # make table read only, select a full row by default\n self._table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n self._table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n\n # catch double click events on table rows\n self._table.cellDoubleClicked.connect(self._ui_cell_double_click)",
"def refresh_table(self):\n selection_index = self._lb_tables.GetSelection()\n if selection_index != -1:\n table_id = self._tables[selection_index][0]\n \n #remake table ui so that new columns can be added\n self._recreate_dvl_data()\n\n #create datatable object\n datatable = sciplot.datatable.Datatable(self._datafile)\n\n #set variable ids for columns\n variable_ids = []\n variable_symbols = []\n format_strings = []\n for variable_symbol, variable_id, format_string in self._datafile.query(sciplot.database.Query(\"SELECT Variable.Symbol, Variable.VariableID, TableColumn.FormatPattern FROM Variable INNER JOIN TableColumn ON TableColumn.VariableID = Variable.VariableID WHERE TableColumn.TableID = (?);\", [table_id], 1))[0]:\n self._dvl_columns.append(self._dvl_data.AppendTextColumn(variable_symbol)) #create column header\n variable_symbols.append(variable_symbol)\n variable_ids.append(variable_id)\n format_strings.append(format_string)\n \n datatable.set_variables(variable_ids)\n\n #load constants for the datatable\n constants_table = {}\n for composite_unit_id, constant_symbol, constant_value in self._datafile.query(sciplot.database.Query(\"SELECT UnitCompositeID, Symbol, Value FROM Constant;\", [], 1))[0]:\n value = sciplot.functions.Value(constant_value) #make a value object so that the data can be formatted with the format strings\n if composite_unit_id != None:\n value.units = self._datafile.get_unit_by_id(composite_unit_id)[1]\n constants_table[constant_symbol] = constant_value\n \n #load all data from the datafile into memory\n no_exception = True\n try:\n datatable.load(constants_table)\n \n except Exception as e:\n wx.MessageBox('Couldn\\'t generate table\\n{}'.format(str(e)), type(e).__name__, wx.ICON_ERROR | wx.OK) #display error message for the user\n no_exception = False\n\n if no_exception:\n #load transposed data\n data_as_rows = datatable.as_rows()\n \n #put data into table\n for row in data_as_rows:\n formatted_row = []\n for i in range(len(row)):\n value, exponent = row[i].format(format_strings[i])\n \n if exponent is None: #not in exponential form, just display the value\n formatted_row.append(value)\n else: #exponential form, display correctly\n if int(exponent) < 0:\n sign = ''\n else:\n sign = '+'\n\n formatted_row.append('{}E{}{}'.format(value, sign, exponent))\n\n self._dvl_data.AppendItem(formatted_row) #add row to table\n \n #set column titles\n if len(data_as_rows) > 0:\n for index in range(len(data_as_rows[0])):\n column_obj = self._dvl_columns[index]\n new_col_string = variable_symbols[index]\n value_obj = data_as_rows[0][index]\n\n unit_string = self._datafile.get_unit_string(value_obj.units)\n \n if unit_string != '': #add si units to title, if there are any\n new_col_string += ': ' + unit_string\n column_obj.SetTitle(new_col_string)\n \n #set column widths\n if len(self._dvl_columns) > 0:\n col_width = (self._dvl_data.GetSize()[0] - 30) / len(self._dvl_columns)\n for col in self._dvl_columns:\n col.SetWidth(col_width)",
"def setup_scrollbar(self):\r\n self.container_widgets[\"order_frame\"].grid_propagate(False)\r\n self.container_widgets[\"orders_scrollbar\"].grid(row=0, column=1, sticky='ns')\r\n self.container_widgets[\"order_canvas\"].bind_all(\"<Button-4>\", self.on_mousewheel) # TODO not working\r\n self.container_widgets[\"order_canvas\"].bind_all(\"<Button-5>\", self.on_mousewheel) # TODO not working\r\n self.container_widgets[\"order_canvas\"].config(\r\n yscrollcommand=self.container_widgets[\"orders_scrollbar\"].set)\r\n self.container_widgets[\"order_canvas\"].config(\r\n scrollregion=self.container_widgets[\"order_canvas\"].bbox(\"all\"))\r\n self.container_widgets[\"order_canvas\"].create_window(\r\n (0, 0),\r\n window=self.container_widgets[\"orders_container\"],\r\n anchor='nw')\r\n # TODO change width\r\n self.container_widgets[\"order_canvas\"].config(\r\n width=600 + self.container_widgets[\"orders_scrollbar\"].winfo_width())",
"def create_panel(self):\n # Main Frame creation\n frame1 = Frame(self.window)\n frame1.pack(fill=\"both\")\n tablayout = Notebook(frame1)\n \n ##### TRACKER #####\n tab = Frame(tablayout) # creating 1st nested frame\n tab.pack(fill=\"both\")\n table = Frame(tab)\n table.pack(fill=\"both\")\n self.show_table(self.t.timeline[\"week\" + str(self.week)], table) # Grids the week with data\n self.add_buttons(tab, table)\n tablayout.add(tab, text=\"Current Week\") \n \n \n ##### STATS #####\n tab = Frame(tablayout) # creating 2nd nested frame\n tab.pack(fill=\"both\")\n self.stats.create_canvas(tab)\n\n\n # once its packed you can add it to the window object under a title\n tablayout.add(tab, text=\"Statistics\") \n tablayout.pack(fill=\"both\") # once everything is done now you pack the tablayout",
"def __init__(self,master,**kw):\n Frame.__init__(self,master,**kw)\n \n self.canvas=Canvas(self,scrollregion=(0,0,500,500))#,width=300,height=300,scrollregion=(0,0,500,500))\n self.internal_frame=Frame(self.canvas)\n self.hbar=Scrollbar(self,orient=HORIZONTAL)\n self.vbar=Scrollbar(self,orient=VERTICAL)\n\n interior_id=self.canvas.create_window((0,0),window=self.internal_frame,anchor=\"nw\")\n\n \n self.hbar.pack(side=BOTTOM,fill=X)\n self.hbar.config(command=self.canvas.xview)\n \n \n self.vbar.pack(side=RIGHT,fill=Y)\n self.vbar.config(command=self.canvas.yview)\n \n## self.canvas.config(width=300,height=300)\n self.canvas.config(xscrollcommand=self.hbar.set, yscrollcommand=self.vbar.set)\n self.canvas.bind_all(\"<MouseWheel>\",lambda x:self.on_mouse_wheel(x,self.canvas))\n self.canvas.pack(side=LEFT,expand=True,fill=BOTH)\n\n def _configure_interior(event):\n \"\"\"\n Figures out how big the interior frame needs to be\n \"\"\"\n # update the scrollbars to match the size of the inner frame\n size = (self.internal_frame.winfo_reqwidth(), self.internal_frame.winfo_reqheight())\n self.canvas.config(scrollregion=\"0 0 %s %s\" % size)\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n # update the canvas's width to fit the inner frame\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the canvas's width to fit the inner frame\n self.canvas.config(height=self.internal_frame.winfo_reqheight())\n self.internal_frame.bind('<Configure>', _configure_interior)\n\n def _configure_canvas(event):\n \"\"\"\n Figures out how bid the interior canvas needs to be\n \"\"\"\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n## print \"frame\",self.internal_frame.winfo_reqwidth()\n## print \"canvas\",self.canvas.winfo_width()\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(height=self.internal_frame.winfo_reqheight())\n self.canvas.bind('<Configure>', _configure_canvas)",
"def Data_Frame( self ):\r\n #Create pane\r\n p = self.pane_widget.add( \"Data\", min = 0.1, max = 0.9)\r\n frame_sequence = Frame( p )\r\n #xscroll at the top\r\n self.xscroll = Scrollbar( frame_sequence, orient = HORIZONTAL )\r\n self.xscroll.pack(side = TOP, fill = X )\r\n #create the canvas where the data will be displayed\r\n self.canvas_two = Canvas( frame_sequence )\r\n #Make sure these values are consistent with self.canvas_one in Tree_Frame\r\n self.canvas_two.pack( side = TOP, fill = BOTH, expand = 1 )\r\n self.xscroll.config( command = self.canvas_two.xview )\r\n self.canvas_two.config( xscrollcommand = self.xscroll.set )\r\n frame_sequence.pack(side=LEFT, fill = BOTH)",
"def table_control(self):\n\n # Create sublayout\n table_layout = QGridLayout()\n\n # Frame over the objects\n frame = QLabel()\n frame.setFrameStyle(QFrame.Box | QFrame.Raised)\n frame.setLineWidth(0)\n frame.setMidLineWidth(2)\n\n self.layout.addWidget(\n frame, self.table_ypos, self.table_xpos, self.table_ysize, self.table_xsize\n )\n\n # Generate all QT objects needed\n label = QLabel(\"Table control\")\n position = QLabel()\n move_zero_btn = QPushButton(\"Initiate table\")\n table_up_btn = QPushButton(\"UP\")\n table_down_btn = QPushButton(\"DOWN\")\n table_indicator = QLabel()\n\n # Set textsize etc.\n font = QtGui.QFont()\n font.setPointSize(13)\n label.setFont(font)\n label.setFrameStyle(QFrame.Panel)\n label.setAlignment(Qt.AlignCenter)\n position.setFont(self.font)\n table_indicator.setFont(self.font)\n\n def check_position():\n \"\"\"This function checks the position of the table and updates the gui elemtents\"\"\"\n if self.variables.table:\n pos = self.variables.table.get_current_position()\n position_update()\n\n def position_update():\n \"\"\"Updates the position\"\"\"\n if self.variables.table:\n pos = [\n self.variables.devices_dict[\"Table_control\"].get(\"x_pos\", 0),\n self.variables.devices_dict[\"Table_control\"].get(\"y_pos\", 0),\n self.variables.devices_dict[\"Table_control\"].get(\"z_pos\", 0),\n ]\n\n position.setText(\n \"X - Position: \"\n + str(pos[0])\n + \"\\n \\n\"\n + \"Y - Position: \"\n + str(pos[1])\n + \"\\n \\n\"\n + \"Z - Position: \"\n + str(pos[2])\n )\n\n # Update the scrollbars things\n self.table_move_ui.x_move.setValue(pos[0])\n self.table_move_ui.y_move.setValue(pos[1])\n self.table_move_ui.z_move.setValue(pos[2])\n\n def state_update():\n \"\"\"Updates the state of the table up down etc.\"\"\"\n if self.variables.table:\n if (\n self.variables.default_values_dict[\"settings\"][\"Table_state\"]\n and not table_indicator.text() == \"UP\"\n ):\n table_indicator.setStyleSheet(\n \"background : rgb(0,255,0); border-radius: 25px\"\n )\n table_indicator.setText(\"UP\")\n\n elif (\n not self.variables.default_values_dict[\"settings\"][\"Table_state\"]\n and not table_indicator.text() == \"DOWN\"\n ):\n table_indicator.setStyleSheet(\n \"background : rgb(255,0,0); border-radius: 25px\"\n )\n table_indicator.setText(\"DOWN\")\n\n # Position text\n check_position() # after boot up the values can be not correct due to boot up and init proceedures\n\n # Tabe indicator\n table_indicator.setStyleSheet(\"background : rgb(0,255,0); border-radius: 25px\")\n table_indicator.setText(\"UP\")\n table_indicator.setMidLineWidth(4)\n table_indicator.setAlignment(QtCore.Qt.AlignCenter)\n\n # Action orders\n\n def up_order():\n \"\"\" This function moves the table up\"\"\"\n if self.variables.table:\n if self.variables.default_values_dict[\"settings\"][\"Table_state\"]:\n self.variables.message_to_main.put(\n {\"Warning\": \"Table is in the up position.\"}\n )\n else:\n self.variables.table.set_joystick(False)\n self.variables.table.set_axis([True, True, True])\n errorcode = self.variables.table.move_up(\n self.variables.default_values_dict[\"settings\"][\n \"height_movement\"\n ]\n )\n if errorcode:\n return\n # self.variables.default_values_dict[\"settings\"][\"Table_state\"] = True # True means table is up\n self.variables.default_values_dict[\"settings\"][\n \"Table_stay_down\"\n ] = False\n position_update()\n # self.table_move.table_move_update()\n\n def down_order():\n \"\"\" This functions moves the table down\"\"\"\n if self.variables.table:\n if not self.variables.default_values_dict[\"settings\"][\"Table_state\"]:\n self.variables.message_to_main.put(\n {\"Warning\": \"Table is in the down position.\"}\n )\n\n else:\n self.variables.table.set_joystick(False)\n self.variables.table.set_axis([True, True, True])\n errorcode = self.variables.table.move_down(\n self.variables.default_values_dict[\"settings\"][\n \"height_movement\"\n ]\n )\n if errorcode:\n return\n # self.variables.default_values_dict[\"settings\"][\"Table_state\"] = False # False means table is down\n self.variables.default_values_dict[\"settings\"][\n \"Table_stay_down\"\n ] = True\n position_update()\n # self.table_move.table_move_update()\n\n @raise_exception\n def move_zero_order(kwargs=None):\n \"\"\"Moves the table to the zero position \"\"\"\n self.variables.table.set_joystick(False)\n self.variables.table.set_axis([True, True, True])\n xpos = (\n float(self.variables.devices_dict[\"Table_control\"][\"table_xmax\"])\n - float(self.variables.devices_dict[\"Table_control\"][\"table_xmin\"])\n ) / 2.0\n ypos = (\n float(self.variables.devices_dict[\"Table_control\"][\"table_ymax\"])\n - float(self.variables.devices_dict[\"Table_control\"][\"table_ymin\"])\n ) / 2.0\n zpos = (\n float(self.variables.devices_dict[\"Table_control\"][\"table_zmax\"])\n - float(self.variables.devices_dict[\"Table_control\"][\"table_zmin\"])\n ) / 2.0\n errorcode = self.variables.table.move_to(\n [xpos, ypos, zpos],\n False,\n self.variables.default_values_dict[\"settings\"][\"height_movement\"],\n )\n # if errorcode:\n # self.variables.message_to_main.put(errorcode)\n\n def initiate_table():\n # First Ask to do so\n reply = QMessageBox.question(\n None,\n \"Warning\",\n \"Are you sure to initiate the table? This can cause serious damage!\",\n QMessageBox.Yes,\n QMessageBox.No,\n )\n if reply == QMessageBox.Yes:\n self.variables.message_to_main.put(\n {\"Info\": \"Table will now be initialized...\"}\n )\n errorcode = self.variables.table.initiate_table()\n if errorcode:\n return\n move_zero_order()\n self.variables.message_to_main.put(\n {\n \"Info\": \"Table initialization done, table goes back to zero position.\"\n }\n )\n else:\n # Do nothing\n pass\n\n # Generate Buttons\n\n # Table up button\n table_up_btn.clicked.connect(up_order)\n table_up_btn.resize(table_up_btn.sizeHint())\n\n # Table down button\n table_down_btn.clicked.connect(down_order)\n table_down_btn.resize(table_down_btn.sizeHint())\n\n # Check position button\n self.table_move_ui.check_position.clicked.connect(check_position)\n table_down_btn.resize(table_down_btn.sizeHint())\n self.table_move_ui.Enable_table.clicked.connect(\n check_position\n ) # Warning this function belongs to another gui element!\n\n # Move table to 0 position\n move_zero_btn.clicked.connect(initiate_table)\n move_zero_btn.resize(move_zero_btn.sizeHint())\n\n # Draw everything\n table_layout.addWidget(label, 0, 0, 1, 2)\n table_layout.addWidget(position, 1, 0, 3, 1)\n table_layout.addWidget(move_zero_btn, 4, 0)\n table_layout.addWidget(table_up_btn, 1, 1)\n table_layout.addWidget(table_down_btn, 2, 1)\n table_layout.addWidget(table_indicator, 3, 1, 2, 1)\n\n table_layout.setContentsMargins(8, 8, 8, 8) # Makes a margin to the layout\n\n # Add functions to update\n # self.variables.add_update_function(position_update)\n self.variables.add_update_function(state_update)\n\n # Add the layout to the main layout\n self.layout.addLayout(\n table_layout,\n self.table_ypos,\n self.table_xpos,\n self.table_ysize,\n self.table_xsize,\n )",
"def makeTable(self):\n self.table = QtWidgets.QTableWidget(3, 4, self)\n self.table.setHorizontalHeaderLabels(['File', 'Exists?', 'Creation time', 'Size(Byte)'])\n #self.table.setVerticalHeaderLabels([''])\n\n self.table.verticalHeader().hide()\n\n self.table.horizontalHeader().setDefaultSectionSize(60)\n self.table.horizontalHeader().resizeSection(0,300)\n self.table.horizontalHeader().resizeSection(1,60)\n self.table.horizontalHeader().resizeSection(2,150)\n self.table.horizontalHeader().resizeSection(3,120)\n\n self.row = -1\n self.list_of_items = []\n self.list_of_files = fnm.get_list_of_files_cora_merge()\n\n for i, fname in enumerate(self.list_of_files) :\n\n file_exists = os.path.exists(fname)\n item_fname = QtWidgets.QTableWidgetItem( os.path.basename(fname) )\n item_exists = QtWidgets.QTableWidgetItem( self.dict_status[file_exists] )\n item_ctime = QtWidgets.QTableWidgetItem( 'N/A' )\n item_size = QtWidgets.QTableWidgetItem( 'N/A' )\n\n item_exists.setTextAlignment(QtCore.Qt.AlignCenter)\n item_ctime .setTextAlignment(QtCore.Qt.AlignCenter)\n item_size .setTextAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n\n self.row += 1\n self.table.setItem(self.row, 0, item_fname)\n self.table.setItem(self.row, 1, item_exists)\n self.table.setItem(self.row, 2, item_ctime)\n self.table.setItem(self.row, 3, item_size)\n \n row_of_items = [i, fname, item_fname, item_exists, item_ctime, item_size]\n self.list_of_items.append(row_of_items)\n\n #self.table.setSpan(self.row, 0, 1, 5) \n #self.table.setItem(self.row, 0, self.title_split)\n\n self.table.setFixedWidth(self.table.horizontalHeader().length() + 4)\n self.table.setFixedHeight(self.table.verticalHeader().length() + 29)",
"def OnPaint(self, event):\r\n \r\n if self._buffered:\r\n dc = wx.BufferedPaintDC(self)\r\n else:\r\n dc = wx.PaintDC(self)\r\n \r\n self.PrepareDC(dc)\r\n self.AdjustDC(dc)\r\n\r\n x = 0\r\n\r\n # width and height of the entire header window\r\n w, h = self.GetClientSize()\r\n w, dummy = self._owner.CalcUnscrolledPosition(w, 0)\r\n dc.SetBackgroundMode(wx.TRANSPARENT)\r\n\r\n numColumns = self.GetColumnCount()\r\n \r\n for i in xrange(numColumns):\r\n\r\n if x >= w:\r\n break\r\n \r\n if not self.IsColumnShown(i):\r\n continue # do next column if not shown\r\n\r\n params = wx.HeaderButtonParams()\r\n\r\n column = self.GetColumn(i)\r\n params.m_labelColour = column.GetColour()\r\n params.m_labelFont = column.GetFont()\r\n\r\n wCol = column.GetWidth()\r\n flags = 0\r\n rect = wx.Rect(x, 0, wCol, h)\r\n x += wCol\r\n\r\n if i == self._hotTrackCol:\r\n flags |= wx.CONTROL_CURRENT\r\n \r\n params.m_labelText = column.GetText()\r\n params.m_labelAlignment = column.GetAlignment()\r\n\r\n image = column.GetImage()\r\n imageList = self._owner.GetImageList()\r\n\r\n if image != -1 and imageList:\r\n params.m_labelBitmap = imageList.GetBitmap(image)\r\n\r\n if self._headerCustomRenderer != None:\r\n self._headerCustomRenderer.DrawHeaderButton(dc, rect, flags, params)\r\n else:\r\n wx.RendererNative.Get().DrawHeaderButton(self, dc, rect, flags,\r\n wx.HDR_SORT_ICON_NONE, params)\r\n \r\n # Fill up any unused space to the right of the columns\r\n if x < w:\r\n rect = wx.Rect(x, 0, w-x, h)\r\n if self._headerCustomRenderer != None:\r\n self._headerCustomRenderer.DrawHeaderButton(dc, rect)\r\n else:\r\n wx.RendererNative.Get().DrawHeaderButton(self, dc, rect)",
"def fillTableWidget(self):\n\n self.tableWidget.setColumnCount(3)\n self.tableWidget.setHorizontalHeaderLabels([\"Code\", \"Category\", \"id\"])\n for row, code in enumerate(self.codes):\n self.tableWidget.insertRow(row)\n #self.tableWidget.setItem(row, CODE_COLUMN, QtGui.QTableWidgetItem(code['name']))\n\n colnametmp = code['color']\n if colnametmp is None: colnametmp = \"\"\n codeItem = QtGui.QTableWidgetItem(code['name'])\n codeItem.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)\n colorHex = self.codeColors.getHexFromName(colnametmp)\n if colorHex != \"\":\n codeItem.setBackground(QtGui.QBrush(QtGui.QColor(colorHex)))\n self.tableWidget.setItem(row, self.CODE_COLUMN, codeItem)\n\n category = code['category']\n if category is None: category = \"\"\n catItem = QtGui.QTableWidgetItem(category)\n catItem.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)\n self.tableWidget.setItem(row, self.CAT_COLUMN, catItem)\n id_ = code['id']\n if id_ is None:\n id_ = \"\"\n self.tableWidget.setItem(row, self.ID_COLUMN, QtGui.QTableWidgetItem(str(id_)))\n\n self.tableWidget.verticalHeader().setVisible(False)\n self.tableWidget.resizeColumnsToContents()\n self.tableWidget.resizeRowsToContents()\n if not self.settings['showIDs']:\n self.tableWidget.hideColumn(self.ID_COLUMN)",
"def createFrame(self):\n \n self.outerFrame = f = Tk.Frame(self.frame)\n f.pack(expand=1,fill=\"both\")\n \n if self.label:\n labf = Tk.Frame(f)\n labf.pack(pady=2)\n lab = Tk.Label(labf,text=self.label)\n lab.pack()\n \n f2 = Tk.Frame(f)\n f2.pack(expand=1,fill=\"both\")\n \n self.box = box = Tk.Listbox(f2,height=20,width=30)\n box.pack(side=\"left\",expand=1,fill=\"both\")\n \n bar = Tk.Scrollbar(f2)\n bar.pack(side=\"left\", fill=\"y\")\n \n bar.config(command=box.yview)\n box.config(yscrollcommand=bar.set)",
"def _initialize_table(self):\n self.table = gtk.Table()\n self.table.set_col_spacings(8)\n self.table.set_row_spacings(3)\n self.window.add(self.table)\n self._view_schedule()\n self.table.show()",
"def _buildUi(self):\r\n for r, row_data in enumerate(self._data):\r\n self.insertRow(r)\r\n for c, column_data in enumerate(row_data):\r\n if r == 0:\r\n self.insertColumn(c)\r\n item = QtGui.QTableWidgetItem(column_data)\r\n self.setItem(r, c, item)",
"def _populate_table(self):\n self._table.setSortingEnabled(False)\n self._table.setRowCount(len(self._module_names))\n for i, module_name in enumerate(self._module_names, 0):\n self._table.setItem(i, 0, QtWidgets.QTableWidgetItem(module_name))\n self._table.resizeRowsToContents()\n self._table.setSortingEnabled(True)",
"def combineInit(self, selves):\n # get column names and rows\n colNames = None\n rows = []\n for self_ in selves:\n colNames = self_.getColNames()\n rows.extend(self_.getAll())\n\n if colNames is None:\n return\n \n # show on widget\n self.setColNames(colNames)\n self.fastAppendRows(rows, self.getAutoForegroundColName())",
"def table(*args, header_row: bool = True, width: int = 0, height: int = 0, inner_width: int = 0, show: bool = True, parent: str = \"\",\n\t\tbefore: str = \"\", resizable: bool = False, reorderable: bool = False, hideable: bool = False, sortable: bool = False, \n\t\tcontext_menu_in_body: bool = False, row_background: bool = False, borders_innerH: bool = False, borders_outerH: bool = False,\n\t\tborders_innerV: bool = False, borders_outerV: bool = False, policy: int = 0, no_host_extendX: bool = False,\n\t\tno_host_extendY: bool = False, no_keep_columns_visible: bool = False, precise_widths: bool = False, no_clip: bool = False,\n\t\tpad_outerX: bool = False, no_pad_outerX: bool = False, no_pad_innerX: bool = False, scrollX: bool = False, scrollY: bool = False,\n id:str='', indent=-1, callback: Callable = None, sort_multi: bool = False, sort_tristate: bool = False, pos=[]):\n try:\n widget = internal_dpg.add_table(*args, header_row=header_row, width = width, height = height, inner_width = inner_width,\n\t\t show = show, parent = parent, before = before, resizable = resizable, reorderable = reorderable, hideable = hideable,\n\t\t sortable = sortable, context_menu_in_body = context_menu_in_body, row_background = row_background,\n\t\t borders_innerH = borders_innerH, borders_outerH = borders_outerH, borders_innerV = borders_innerV,\n\t\t borders_outerV = borders_outerV, policy = policy, no_host_extendX = no_host_extendX,\n\t\t no_host_extendY = no_host_extendY, no_keep_columns_visible = no_keep_columns_visible, precise_widths = precise_widths,\n\t\t no_clip = no_clip, pad_outerX = pad_outerX, no_pad_outerX = no_pad_outerX, no_pad_innerX = no_pad_innerX,\n\t\t scrollX = scrollX, scrollY = scrollY, id=id, indent=indent, callback=callback, sort_multi=sort_multi,\n sort_tristate=sort_tristate, pos=pos)\n internal_dpg.push_container_stack(widget)\n yield widget\n finally:\n internal_dpg.pop_container_stack()",
"def refresh_table(self):\n self._table['bounty_column'] = Driver.instance.find_elements(*self._selectors['bounty_column'])\n self._table['first_name_column'] = Driver.instance.find_elements(*self._selectors['first_name_column'])\n self._table['last_name_column'] = Driver.instance.find_elements(*self._selectors['last_name_column'])\n self._table['edit_column'] = Driver.instance.find_elements(*self._selectors['edit_column'])\n self._table['details_column'] = Driver.instance.find_elements(*self._selectors['details_column'])\n self._table['delete_column'] = Driver.instance.find_elements(*self._selectors['delete_column'])",
"def initUI(self) -> None:\n ratio = 70\n width_to_set = (ratio * self.get_current_window_info()[0]) / 100.0\n height_to_set = (ratio * self.get_current_window_info()[1]) / 100.0\n self.setGeometry(200, 100, width_to_set, height_to_set)\n self.createTable()\n # Add box layout, add table to box layout and add box layout to widget\n self.layout = QVBoxLayout()\n self.layout.addWidget(self.tableWidget)\n self.setLayout(self.layout)\n self.setWindowTitle('View files')\n self.show()",
"def load(self):\n # Frame\n self.frame.grid_configure(row=1, column=1, padx=(PAD, PAD+TINY_PAD), pady=(0, PAD+CANVAS_PAD), sticky=tk.N+tk.S)\n self.frame.rowconfigure(1, weight=1)\n self.frame.rowconfigure(3, weight=1)\n # Across label\n self.across_label.config(text=\"Across\", anchor=tk.W, **settings.get(\"style:clue\"))\n self.across_label.grid(row=0, column=0, pady=(0, TINY_PAD), sticky=tk.N+tk.W)\n # Across frame\n self.across_frame.config(highlightthickness=1, highlightbackground=settings.get(\"style:border:fill\"))\n self.across_frame.grid(row=1, pady=(CANVAS_PAD, PAD), sticky=tk.N+tk.S)\n self.across_frame.rowconfigure(0, weight=1)\n # Across listbox\n self.across_listbox.config(bd=0, selectborderwidth=0, activestyle=tk.NONE, **settings.get(\"style:list\"))\n self.across_listbox.grid(row=0, column=0, sticky=tk.N+tk.S)\n self.across_listbox.config(yscrollcommand=self.across_scrollbar.set)\n # Across scrollbar\n self.across_scrollbar.config(command=self.across_listbox.yview)\n self.across_scrollbar.grid(row=0, column=1, sticky=tk.N+tk.S)\n # Down label\n self.down_label.config(text=\"Down\", anchor=tk.W, **settings.get(\"style:clue\"))\n self.down_label.grid(row=2, column=0, pady=(PAD, 0), sticky=tk.N+tk.W)\n # Down frame\n self.down_frame.config(highlightthickness=1, highlightbackground=settings.get(\"style:border:fill\"))\n self.down_frame.grid(row=3, pady=(TINY_PAD, 0), sticky=tk.N+tk.S)\n self.down_frame.rowconfigure(0, weight=1)\n # Down listbox\n self.down_listbox.config(bd=0, selectborderwidth=0, activestyle=tk.NONE, **settings.get(\"style:list\"))\n self.down_listbox.grid(row=0, column=0, sticky=tk.N+tk.S)\n self.down_listbox.config(yscrollcommand=self.down_scrollbar.set)\n # Down scrollbar\n self.down_scrollbar.config(command=self.down_listbox.yview)\n self.down_scrollbar.grid(row=0, column=1, sticky=tk.N+tk.S)",
"def widgets(self):\n start, y = 20, 40\n types_ = self.active_table.get_column_types()\n columns = self.active_table.get_column_names()\n for i in range(len(types_)):\n new_label = Label(self.master, text=f\"{columns[i]}(type={types_[i].__name__})\")\n new_label.place(x=start, y=y - 20, width=120, height=20)\n new_entry = Entry(self.master)\n new_entry.place(x=start, y=y, width=120, height=20)\n start += 130\n self.enter_values.append(new_entry)\n self.button_accept.config(text=\"Ok\", width=12, height=2,\n bg='#453d49',\n fg='#ffffff',\n relief='sunken',\n activebackground='#4f2b64',\n activeforeground='#ffffff',\n command=self.add)\n self.button_accept.place(x=300, y=100)\n self.button_cancel.config(text=\"Cancel\", width=12, height=2,\n bg='#453d49',\n fg='#ffffff',\n relief='sunken',\n activebackground='#4f2b64',\n activeforeground='#ffffff',\n command=self.master.withdraw)\n self.button_cancel.place(x=400, y=100)",
"def DoHeaderLayout(self):\r\n\r\n w, h = self.GetClientSize()\r\n has_header = self._agwStyle & TR_NO_HEADER == 0\r\n \r\n if self._header_win and has_header:\r\n self._header_win.SetDimensions(0, 0, w, self._headerHeight)\r\n self._header_win.Refresh()\r\n else:\r\n self._header_win.SetDimensions(0, 0, 0, 0)\r\n \r\n if self._main_win and has_header:\r\n self._main_win.SetDimensions(0, self._headerHeight + 1, w, h - self._headerHeight - 1)\r\n else:\r\n self._main_win.SetDimensions(0, 0, w, h)",
"def makeWidgets(self):\r\n self._frame = tk.Frame(self, relief=tk.RAISED, borderwidth=1)\r\n self._frame.pack(fill=tk.BOTH, expand=1)\r\n\r\n self.pack(fill=tk.BOTH, expand=1)\r\n\r\n self._frame._label1 = tk.Label(self._frame, text='----File Name----')\r\n self._frame._label1.pack(fill=tk.X, expand=tk.NO, pady=1, padx=2)\r\n self._frame._entry = tk.Entry(self._frame)\r\n self._frame._entry.pack(pady=2, padx=2)\r\n\r\n self._frame._label0 = tk.Label(self._frame, textvariable=self.timestr)\r\n self._setTime(self._elapsedtime)\r\n self._frame._label0.pack(fill=tk.X, expand=tk.NO, pady=3, padx=2)\r\n\r\n self._frame._label2 = tk.Label(self._frame, text='----Laps----')\r\n self._frame._label2.pack(fill=tk.X, expand=tk.NO, pady=4, padx=2)\r\n\r\n self._frame._scrollbar = tk.Scrollbar(self._frame, orient=tk.VERTICAL)\r\n self._frame._listbox = tk.Listbox(self._frame, selectmode=tk.EXTENDED, height=10,\r\n yscrollcommand=self._frame._scrollbar.set)\r\n self._frame._listbox.pack(side=tk.LEFT, fill=tk.BOTH, expand=1, pady=5, padx=2)\r\n self._frame._scrollbar.config(command=self._frame._listbox.yview)\r\n self._frame._scrollbar.pack(side=tk.RIGHT, fill=tk.Y)",
"def create_containers(self):\r\n self.container_widgets.update({\r\n \"main_frame\": tk.Frame(master=self)})\r\n self.container_widgets.update({\r\n \"panel_frame\": tk.Frame(master=self.container_widgets[\"main_frame\"]),\r\n \"order_frame\": tk.Frame(master=self.container_widgets[\"main_frame\"],\r\n width=const.ORDER_FRAME_SIZE[\"width\"],\r\n height=const.ORDER_FRAME_SIZE[\"height\"])})\r\n self.container_widgets.update({\r\n \"order_canvas\": tk.Canvas(master=self.container_widgets[\"order_frame\"])})\r\n self.container_widgets.update({\r\n \"orders_container\": tk.Frame(master=self.container_widgets[\"order_canvas\"]),\r\n \"orders_scrollbar\": tk.Scrollbar(master=self.container_widgets[\"order_frame\"],\r\n orient=\"vertical\",\r\n command=self.container_widgets[\"order_canvas\"].yview)\r\n })",
"def build(self):\n with self.set_master(sticky=\"nsew\", row_weights=[1], column_weights=[0, 1], auto_columns=0):\n self.build_category_canvas()\n with self.set_master(sticky=\"nsew\", row_weights=[0, 1, 0], column_weights=[1, 1]):\n self.build_previous_range_button(row=0, column=0)\n self.build_hidden_fields_checkbutton(row=0, column=1)\n with self.set_master(sticky=\"nsew\", row=1, column=0, row_weights=[1], column_weights=[1]):\n self.build_entry_frame()\n with self.set_master(sticky=\"nsew\", row=1, column=1, row_weights=[1], column_weights=[1]):\n self.build_field_frame()\n self.build_next_range_button(row=2, column=0)",
"def _populate(self, table):\n # Repopulating the listboxes will reset the scroll.\n self._curr_scroll_row = 0\n # Repopulating the listboxes will reset the selection, so make sure the entry defaults to disabled.\n self._disable_entry()\n self._clear()\n # Initialize the _last_table_idx to -1 because the listbox rows are zero-indexed and each table added to the\n # listboxes will increment _last_table_idx.\n self._last_table_idx = -1\n # Add a parent table row to the listboxes if the given table is not the root table.\n if table is not self.root_table:\n self._append_row(self.PARENT_DIR, self.BLANK, None)\n self._last_table_idx += 1\n # Iterate through the given table's subtables and add each one to the listboxes.\n for subtable in table.getSubTables():\n self._append_row(self.TABLE_FORMAT.format(subtable), self.BLANK, table.getSubTable(subtable))\n self._last_table_idx += 1\n # Iterate through the given table's entries and add each one to the listboxes.\n for key in table.getKeys():\n entry = table.getEntry(key)\n self._append_row(self.ENTRY_FORMAT.format(key), str(entry.value), entry)",
"def setupUi(self):\n self.setLayout(self.mainLayout)\n self.table.setModel(self.model)\n header = self.table.header()\n header.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)\n self.model.appendRow(self.jobRow)\n self.mainLayout.addWidget(self.table)\n self.buttonLayout.addWidget(self.downButton)\n self.buttonLayout.addWidget(self.deleteLayerButton)\n self.buttonLayout.addWidget(self.addLayerButton)\n self.buttonLayout.addWidget(self.upButton)\n self.mainLayout.addLayout(self.buttonLayout)\n self.table.expandAll()\n self.initLayers()"
] | [
"0.6681995",
"0.6490527",
"0.61884373",
"0.60915476",
"0.60003984",
"0.5993402",
"0.5982692",
"0.5957703",
"0.5950346",
"0.5943269",
"0.59133244",
"0.59017205",
"0.587218",
"0.5831047",
"0.5782067",
"0.57106423",
"0.56948686",
"0.56909436",
"0.56567526",
"0.56553906",
"0.56511813",
"0.5611901",
"0.5598179",
"0.55883515",
"0.5588059",
"0.5569942",
"0.5557404",
"0.55567676",
"0.55562234",
"0.55547124"
] | 0.74151075 | 0 |
Get the visible column range | def getVisibleCols(self, x1, x2):
start = self.getColPosition(x1)
end = self.getColPosition(x2) + 1
if end > self.cols:
end = self.cols
return start, end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_visible_cells(self):\r\n ux, uy = self.GetScrollPixelsPerUnit()\r\n sx, sy = self.GetViewStart()\r\n w, h = self.GetGridWindow().GetClientSize().Get()\r\n sx *= ux\r\n sy *= uy\r\n start_col = self.XToCol(sx)\r\n start_row = self.YToRow(sy)\r\n end_col = self.XToCol(sx + w, True)\r\n end_row = self.YToRow(sy + h, True)\r\n return start_row, end_row, start_col, end_col",
"def get_visible_rows(self):\r\n start, end, _, _ = self.get_visible_cells()\r\n return start, end",
"def _getColormapRange(self):\n item = self.item()\n if item is not None and self._colormap is not None:\n return self._colormap.getColormapRange(item)\n else:\n return 1, 100 # Fallback",
"def visible(self):\r\n return self.column.visible",
"def plot_visible_cells(self):\n return self.container['plot_visible_cells']",
"def get_range(df, col):\n return df[col].min(), df[col].max()",
"def bounds(self):\n return self.min_col, self.min_row, self.max_col, self.max_row",
"def _get_row_col_neighbors(self, row_col: int) -> range:\n # inclusive start\n start = max(row_col - 1, 0)\n # exclusive limit\n limit = min(row_col + 2, len(self.puzzle))\n r = range(start, limit)\n return r",
"def first_visible_column(self):\n return self.container['first_visible_column']",
"def __estimator_column_chooser(self):\r\n column_set_low = self.__longitude_group - 120\r\n self.__estimator_inputs = self.__estimator_inputs[self.__estimator_inputs['long_sector'] > column_set_low]\r\n column_set_high = self.__longitude_group + 120\r\n self.__estimator_inputs = self.__estimator_inputs[self.__estimator_inputs['long_sector'] < column_set_high]\r\n return self.__estimator_inputs",
"def get_cols(self) :\n\n return list(self.cols)[1:]",
"def extent(self):\n return self.index.max() - self.index.min(), self.columns.max() - self.columns.min()",
"def _get_views(work_sheet):\n ddi_view_col = work_sheet.row_values(0).index('DDI View')\n view_col = list(set(work_sheet.col_values(ddi_view_col)[1:]))\n return view_col",
"def get_columns_range(data_wide: pd.DataFrame, target_col: str, win_len: int) -> List[str]:\n\n # Select range of columns based on `win_len`\n target_idx = data_wide.columns.get_loc(target_col)\n cols_rng = np.arange(target_idx - win_len, target_idx)\n cols_rng = cols_rng[cols_rng >= 0]\n return data_wide.columns[cols_rng].to_list()",
"def range_(headers, data):\n\tcolumn_matrix=data.get_data(headers).getT() # get columns as rows, as this makes analysis much easier by just perfoming operations on column list directly\n\tif column_matrix==[]:\n\t\tprint \"wrong headers, not present in data Object\"\n\t\treturn []\n\tcolumn_max=column_matrix.max(1)\n\tcolumn_min=column_matrix.min(1)\n\tfinal=np.concatenate((column_min, column_max), axis=1)\n\t\n\trng=final.tolist()\n\treturn rng",
"def test_002_range_columns(self):\n assert(len(\n self.range_transformer.fit_transform(\n self.data[self.range_col]\n ).columns\n ) == 1)",
"def cells(self):\n return chain.from_iterable(self.cols)",
"def cols(self):\n return self.col",
"def get_all_hidden_columns(self):\n visible_columns_list = []\n column_headers = self.driver.find_elements_by_xpath('//thead/tr/th')\n for i in range(len(column_headers)):\n if column_headers[i].get_attribute('class') == 'ng-scope ng-hide':\n visible_columns_list.append(i + 1)\n return visible_columns_list",
"def bounds(self):\n \n return self.osmdb.bounds()",
"def range_(self):\n return self.bset.range_",
"def get_columns(self) -> list or None:\n if self.connected and self.settings.excelSheetName:\n cursor = self.workbook.cursor()\n if cursor:\n columns = []\n for row in cursor.columns(self.settings.excelSheetName):\n columns.append(row['column_name'])\n cursor.close()\n columns.reverse()\n return columns\n return None",
"def range(self):\n return self.range_array",
"def columns(self):\n \n pass",
"def columnspace(self):\n if not self.domain.is_Field:\n raise DMNotAField('Not a field')\n rref, pivots = self.rref()\n rows, cols = self.shape\n return self.extract(range(rows), pivots)",
"def displayed_lines(self) -> list[int]:\n return sorted(row for row, col in self.visible_line_to_row_col.values())",
"def GetColStarts(self):\n return _hypre.HypreParMatrix_GetColStarts(self)",
"def col(self, col: tuple) -> list:\n return self.grid[col::9]",
"def collect_columns():\n return ((x, y) for x in range(72) for y in range(x + 9, 81, 9))",
"def get_col_inds(col_range):\n if '-' in col_range:\n beg_seat = col_range.split('-')[0]\n end_seat = col_range.split('-')[1]\n if beg_seat[0] != end_seat[0]:\n raise Exception(\"Non matching row char {} and {}\".format(beg_seat[0], end_seat[0]))\n\n beg = int(beg_seat[1:])\n end = int(end_seat[1:])\n return range(beg, end+1)\n elif str.isdigit(col_range[1:]):\n return range(int(col_range[1:]), int(col_range[1:])+1)\n else:\n raise Exception(\"Unknown col range format: {}\".format(cur_col_range))"
] | [
"0.707298",
"0.6902809",
"0.6700858",
"0.6595008",
"0.650634",
"0.6288859",
"0.6270333",
"0.6256797",
"0.6243233",
"0.6127051",
"0.60789394",
"0.60119057",
"0.59667975",
"0.5964401",
"0.5956471",
"0.5939211",
"0.59348154",
"0.5904628",
"0.58691394",
"0.5851135",
"0.58139575",
"0.57939315",
"0.5790426",
"0.57819897",
"0.5776553",
"0.5774286",
"0.577317",
"0.5753674",
"0.57294375",
"0.56887746"
] | 0.72647125 | 0 |
Set the xview of table and col header | def set_xviews(self, *args):
self.xview(*args)
self.tablecolheader.xview(*args)
self.redrawVisible()
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_headers(self,executer, tree, cursor, table, columns_size):\n\n # Getting headers\n headers = executer.get_columns(table, cursor)\n tree[\"columns\"] = headers\n\n # Setting width to all column headers basing on columns amount.\n set_width = int(self.column_length_configurator/len(headers))\n\n\n # Setting columns width and headers\n for column in headers:\n tree.column(column, width=set_width,minwidth=self.min_width)\n tree.heading(column, text=column)",
"def customize_headers(self,executer, tree, cursor, table,custom_headers):\n headers = executer.get_columns(table, cursor)\n tree[\"columns\"] = custom_headers\n\n\n set_width = int(self.column_length_configurator / len(headers))\n\n # Setting columns width and headers\n for column in custom_headers:\n tree.column(column, width=set_width, minwidth=self.min_width)\n tree.heading(column, text=column)",
"def columnTitles(self):\n \n pass",
"def columnTitles(self):\n \n pass",
"def __set_column_width(self):\n for i in range(0, len(self.header_width)):\n self.view.setColumnWidth(i, self.header_width[i])",
"def Show_Headers( self ):\r\n self.system.Change_Seq( \"Header\" )",
"def SetHeader(self, window):\n window.SetName(\"header\")\n window.SetBackgroundColour(wx.GetApp().settings.header_bg_color)\n window.SetForegroundColour(wx.GetApp().settings.header_fg_color)\n window.SetFont(wx.GetApp().settings.header_text_font)",
"def setup_tableview(self):\n\n # Reset the widget\n self.table_view.clear()\n\n self.table_view.setRowCount(len(self.sorted_keys))\n self.table_view.setColumnCount(8)\n\n # Set the horizontal headers' text and column width\n self.table_view.setHorizontalHeaderItem(0, QtWidgets.QTableWidgetItem('Title'))\n self.table_view.setColumnWidth(0, 150)\n\n self.table_view.setHorizontalHeaderItem(1, QtWidgets.QTableWidgetItem('Type'))\n self.table_view.setColumnWidth(1, 65)\n\n self.table_view.setHorizontalHeaderItem(2, QtWidgets.QTableWidgetItem('Score'))\n self.table_view.setColumnWidth(2, 60)\n\n self.table_view.setHorizontalHeaderItem(3, QtWidgets.QTableWidgetItem('Genre'))\n self.table_view.setColumnWidth(3, 150)\n\n self.table_view.setHorizontalHeaderItem(4, QtWidgets.QTableWidgetItem('Duration'))\n self.table_view.setColumnWidth(4, 300)\n\n self.table_view.setHorizontalHeaderItem(5, QtWidgets.QTableWidgetItem('Release Date'))\n self.table_view.setColumnWidth(5, 150)\n\n self.table_view.setHorizontalHeaderItem(6, QtWidgets.QTableWidgetItem('Credits'))\n self.table_view.setColumnWidth(6, 350)\n\n self.table_view.setHorizontalHeaderItem(7, QtWidgets.QTableWidgetItem('Summary'))\n self.table_view.setColumnWidth(7, 350)\n\n '''\n self.data_dict[title] = {\n 'score': score, → 7.7\n 'summary': summary, → 'Some string'\n 'duration': duration, → '100 episodes (7 Seasons in 2020), 43min per Episode' / '1h 55min'\n 'credits': creds_list, → ['Creators: some dude', 'Stars: hero, his chick, evil dude']\n 'genres': genres, → ['Drama', 'Fantasy', 'Comedy']\n 'released': year, → 2016\n 'type': show_type, → 'Movie' / 'Serie'\n }\n '''\n\n for i, title in enumerate(self.sorted_keys):\n\n # Adjust certain keys for better displaying\n title_genres = ', '.join(self.data_dict[title]['genres'])\n title_credits = '\\n'.join(self.data_dict[title]['credits'])\n title_score = str(self.data_dict[title]['score']) + '/10'\n\n # Set row height for each row depending on the amount of credits\n # (Producers:, Writers:, Stars: // Producers:, Stars:)\n self.table_view.setRowHeight(i, len(self.data_dict[title]['credits']) * 25)\n\n # Add column data for each row\n self.table_view.setItem(i, 0, QtWidgets.QTableWidgetItem(title))\n self.table_view.setItem(i, 1, QtWidgets.QTableWidgetItem(self.data_dict[title]['type']))\n self.table_view.setItem(i, 2, QtWidgets.QTableWidgetItem(title_score))\n self.table_view.setItem(i, 3, QtWidgets.QTableWidgetItem(title_genres))\n self.table_view.setItem(i, 4, QtWidgets.QTableWidgetItem(self.data_dict[title]['duration']))\n self.table_view.setItem(i, 5, QtWidgets.QTableWidgetItem(self.data_dict[title]['released']))\n self.table_view.setItem(i, 6, QtWidgets.QTableWidgetItem(title_credits))\n self.table_view.setItem(i, 7, QtWidgets.QTableWidgetItem(self.data_dict[title]['summary']))",
"def _horizontal_header(self):\n return self.header()",
"def _horizontal_header(self):\n return self.header()",
"def initTableView(self):\n #productive\n profprint()\n if self.table==None:\n # self.keys = (\"#\")\n # self.keys = (\"#\",\"Round\" ,\"Reliability\")\n self.keys = (\"#\")\n self.labelStats = {}\n self.labelStats['Labels'] = []\n self.items = []\n if self.model==None:\n self.model = qt.QStandardItemModel()\n self.model.setColumnCount(5)\n self.model.setHeaderData(0,1,\"\")\n self.model.setHeaderData(1,1,\"#\")\n # self.model.setHeaderData(2,1,\"R.\")\n # self.model.setHeaderData(3,1,\"Reliability\")\n self.model.setHeaderData(2,1,\"Display\")\n self.model.setHeaderData(3,1,\"Reformat\")\n self.model.setHeaderData(4,1,\"Comments\")\n # self.model.setStrechLastSection(True)\n if self.view == None:\n self.view = qt.QTableView()\n self.view.setMinimumHeight(300)\n self.view.sortingEnabled = True\n self.view.verticalHeader().visible = False\n self.view.horizontalHeader().setStretchLastSection(True)\n\n # col = 1\n # for k in self.keys:\n # # self.view.setColumnWidth(col,15*len(k))\n # # self.model.setHeaderData(col,1,k)\n # col += 1 \n self.view.setModel(self.model)\n self.view.setColumnWidth(0,18)\n self.view.setColumnWidth(1,58)\n self.view.setColumnWidth(2,58)\n self.table = 1\n self.row=0\n self.col=0\n slicer.modules.NeedleFinderWidget.analysisGroupBoxLayout.addRow(self.view)",
"def initTableView(self):\r\n # productive\r\n profprint()\r\n if self.table == None:\r\n # self.keys = (\"#\")\r\n # self.keys = (\"#\",\"Round\" ,\"Reliability\")\r\n self.keys = (\"#\")\r\n self.labelStats = {}\r\n self.labelStats['Labels'] = []\r\n self.items = []\r\n if self.model == None:\r\n self.model = qt.QStandardItemModel()\r\n self.model.setColumnCount(5)\r\n self.model.setHeaderData(0, 1, \"\")\r\n self.model.setHeaderData(1, 1, \"#\")\r\n # self.model.setHeaderData(2,1,\"R.\")\r\n # self.model.setHeaderData(3,1,\"Reliability\")\r\n self.model.setHeaderData(2, 1, \"Display\")\r\n self.model.setHeaderData(3, 1, \"Reformat\")\r\n self.model.setHeaderData(4, 1, \"Comments\")\r\n # self.model.setStrechLastSection(True)\r\n if self.view == None:\r\n self.view = qt.QTableView()\r\n self.view.setMinimumHeight(300)\r\n self.view.sortingEnabled = True\r\n self.view.verticalHeader().visible = False\r\n self.view.horizontalHeader().setStretchLastSection(True)\r\n\r\n # col = 1\r\n # for k in self.keys:\r\n # # self.view.setColumnWidth(col,15*len(k))\r\n # # self.model.setHeaderData(col,1,k)\r\n # col += 1\r\n self.view.setModel(self.model)\r\n self.view.setColumnWidth(0, 18)\r\n self.view.setColumnWidth(1, 58)\r\n self.view.setColumnWidth(2, 58)\r\n self.table = 1\r\n self.row = 0\r\n self.col = 0\r\n slicer.modules.NeedleFinderWidget.analysisGroupBoxLayout.addRow(self.view)",
"def setFieldNames(self, model, lyr): \n #get the fields\n fields = lyr.pendingFields()\n position = 0\n \n #set column names\n for field in fields:\n model.setHorizontalHeaderItem(position, QStandardItem(field.name()))\n position+=1",
"def set_yviews(self, *args):\n self.yview(*args)\n self.tablerowheader.yview(*args)\n self.redrawVisible()\n return",
"def horizontal_headers(self, horizontal_headers):\n if horizontal_headers is None:\n self._horizontalHeaders = horizontal_headers\n self.tableWidget.horizontalHeader().setVisible(False)\n return\n\n\n self._horizontalHeaders = horizontal_headers\n\n self.tableWidget.setColumnCount(len(horizontal_headers))\n self.tableWidget.horizontalHeader().setVisible(True)\n\n for idx, header in enumerate(horizontal_headers):\n item = QTableWidgetItem()\n item.setText(header)\n self.tableWidget.setHorizontalHeaderItem(idx, item)",
"def header(self):\n ...",
"def _html_table_headers(self, row_axes, col_axes):\n dsh = self.get_dshape()\n nb_blank_cols = len(row_axes) * 2 # nb of blank cols preprended to\n # each line of the column header\n nb_rows = int(np.prod([dsh[a] for a in row_axes]))\n nb_cols = int(np.prod([dsh[a] for a in col_axes]))\n # col header\n if nb_blank_cols > 0:\n blank_cells = ['']\n blank_cells_attrs = [{'colspan': str(nb_blank_cols)}]\n else:\n blank_cells = []\n blank_cells_attrs = []\n col_header = []\n nb_repets = 1\n span = nb_cols\n for a in col_axes:\n dom = [str(v)\n for v in self.get_domain(a)] # TODO: better dv format\n span /= len(dom)\n # row showing the axis label\n col_header.append(html_list_to_row(blank_cells + [a], 'h',\n blank_cells_attrs +\n [{'colspan': nb_cols}]))\n # row showing domain values\n col_header.append(html_list_to_row(blank_cells + dom * nb_repets, 'h',\n blank_cells_attrs +\n [{'colspan': str(span)}] *\n len(dom) * nb_repets))\n nb_repets *= len(dom)\n\n # row header\n # initialization of all rows because row filling wont be sequential:\n row_header = [[] for i in range(nb_rows)]\n nb_repets = 1\n span = nb_rows\n for a in row_axes:\n # 1st row contains all axis labels:\n row_header[0].append(html_cell(html_div(a, {'class': 'rotate'}), 'h',\n {'rowspan': nb_rows}))\n\n # dispatch domain values across corresponding rows:\n dom = [str(v)\n for v in self.get_domain(a)] # TODO: better dv format\n span /= len(dom)\n for idv, dv in enumerate(dom * nb_repets):\n row_header[\n idv * span].append(html_cell(dv, 'h', {'rowspan': span}))\n\n nb_repets *= len(dom)\n\n return [''.join(r) for r in row_header], col_header",
"def formatTable(self):\n # self.table.horizontalHeader().setResizeMode(QHeaderView.ResizeToContents)\n self.populateWithComplexLabels()\n\n # self.table.resizeColumnsToContents()\n self.table.horizontalHeader().setStyleSheet(self.headerStyleSheet)\n self.table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)\n self.table.verticalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)\n self.combo_mesi.setCurrentIndex(self.indexMonth)",
"def _reset_header(self):\n new_header = []\n for col_name in self.header:\n is_left = self.left_cols.get(col_name)\n if is_left:\n new_header.append(col_name)\n self.header = new_header",
"def OnPaint(self, event):\r\n \r\n if self._buffered:\r\n dc = wx.BufferedPaintDC(self)\r\n else:\r\n dc = wx.PaintDC(self)\r\n \r\n self.PrepareDC(dc)\r\n self.AdjustDC(dc)\r\n\r\n x = 0\r\n\r\n # width and height of the entire header window\r\n w, h = self.GetClientSize()\r\n w, dummy = self._owner.CalcUnscrolledPosition(w, 0)\r\n dc.SetBackgroundMode(wx.TRANSPARENT)\r\n\r\n numColumns = self.GetColumnCount()\r\n \r\n for i in xrange(numColumns):\r\n\r\n if x >= w:\r\n break\r\n \r\n if not self.IsColumnShown(i):\r\n continue # do next column if not shown\r\n\r\n params = wx.HeaderButtonParams()\r\n\r\n column = self.GetColumn(i)\r\n params.m_labelColour = column.GetColour()\r\n params.m_labelFont = column.GetFont()\r\n\r\n wCol = column.GetWidth()\r\n flags = 0\r\n rect = wx.Rect(x, 0, wCol, h)\r\n x += wCol\r\n\r\n if i == self._hotTrackCol:\r\n flags |= wx.CONTROL_CURRENT\r\n \r\n params.m_labelText = column.GetText()\r\n params.m_labelAlignment = column.GetAlignment()\r\n\r\n image = column.GetImage()\r\n imageList = self._owner.GetImageList()\r\n\r\n if image != -1 and imageList:\r\n params.m_labelBitmap = imageList.GetBitmap(image)\r\n\r\n if self._headerCustomRenderer != None:\r\n self._headerCustomRenderer.DrawHeaderButton(dc, rect, flags, params)\r\n else:\r\n wx.RendererNative.Get().DrawHeaderButton(self, dc, rect, flags,\r\n wx.HDR_SORT_ICON_NONE, params)\r\n \r\n # Fill up any unused space to the right of the columns\r\n if x < w:\r\n rect = wx.Rect(x, 0, w-x, h)\r\n if self._headerCustomRenderer != None:\r\n self._headerCustomRenderer.DrawHeaderButton(dc, rect)\r\n else:\r\n wx.RendererNative.Get().DrawHeaderButton(self, dc, rect)",
"def design_header(self):\n pass",
"def header(self):\n self.set_font(self.police, 'B', 15)\n self.cell(w=0, h=10, txt=f\"CV de {self.name}\", border=1, ln=1, align='C')",
"def SetHeaderFont(self, font):\r\n\r\n if not self._header_win:\r\n return\r\n \r\n for column in xrange(self.GetColumnCount()):\r\n self._header_win.SetColumn(column, self.GetColumn(column).SetFont(font))\r\n\r\n self._header_win.Refresh()",
"def _horizontal_header(self):\n return self.horizontalHeader()",
"def assign_header(self, btn):\n with open(self.filename, 'rU+') as f:\n df = pd.read_csv(f, sep=self.delim, index_col=False)\n btn.color = [.3, .9, .5, 1]\n non_numeric_label = self.non_numeric_axis\n buttons = self.headerButtons.children[:]\n for x in buttons:\n if x != btn:\n x.color = [0, 0, 0, 1]\n if self.cur_axis == 'x':\n self.x_axis = btn.text.encode('ascii')\n# print df[self.x_axis].dtype\n if df[self.x_axis].dtype == 'object':\n non_numeric_label.text = 'Note: This is a non-numeric data column.'\n self.ids.scatter_button.disabled = True\n self.ids.disabled_explanation.text = self.scatter_disabled_explanation\n self.non_numeric_x_axis = True\n else:\n non_numeric_label.text = ''\n self.ids.scatter_button.disabled = False\n self.ids.disabled_explanation.text = ''\n self.non_numeric_x_axis = False\n self.ids.sm.current = 'screenX'\n elif self.cur_axis == 'y':\n self.y_axis = btn.text.encode('ascii')\n #print self.y_axis\n self.ids.sm.current = 'screenY'",
"def SetColumn(self, column, colInfo):\r\n\r\n self._header_win.SetColumn(column, colInfo)\r\n self._header_win.Refresh()",
"def setupModelView(self):\n self.model = QStandardItemModel()\n\n table_view = QTableView()\n # From QAbstractItemView.ExtendedSelection = 3\n table_view.SelectionMode(3)\n table_view.setModel(self.model)\n\n # Set initial row and column values\n self.model.setRowCount(3)\n self.model.setColumnCount(4)\n\n self.loadCSVFile()\n\n v_box = QVBoxLayout()\n v_box.addWidget(table_view)\n\n self.setLayout(v_box)",
"def header(self) -> NoReturn:\n self.set_x(self.t_margin + self.b_margin)\n self.ln(self.line_height)",
"def init_widget(self):\n super(QtViewTable, self).init_widget()\n d = self.declaration\n self.set_table_model(d.table_model)\n self.set_orientation(d.orientation)",
"def formatHeaderNames(self):\n listaNomiGiorniSettimana = ['Lun',\n 'Mar',\n 'Mer',\n 'Gio',\n 'Ven',\n 'Sab',\n 'Dom']\n\n for colonna, giorno in enumerate(listaNomiGiorniSettimana):\n item = QTableWidgetItem()\n item.setText(giorno)\n if colonna > 4:\n brush = QBrush(Qt.red)\n item.setForeground(brush)\n self.table.setHorizontalHeaderItem(colonna, item)\n\n # self.table.setHorizontalHeaderLabels(listaGiorniSettimana)"
] | [
"0.6379965",
"0.62530154",
"0.6203541",
"0.6203541",
"0.6136738",
"0.60498506",
"0.59871495",
"0.59727997",
"0.5825335",
"0.5825335",
"0.5818333",
"0.5799224",
"0.578857",
"0.5781992",
"0.5777474",
"0.57380813",
"0.57033813",
"0.5697453",
"0.56715167",
"0.56708723",
"0.5666349",
"0.5663356",
"0.5646603",
"0.56302214",
"0.56260616",
"0.5586066",
"0.55708146",
"0.55496573",
"0.5533679",
"0.5523398"
] | 0.79994017 | 0 |
Set function to be executed every time this window is toggled hidden/shown. | def set_on_toggle_hidden(self, on_toggle_hidden):
self.on_toggle_hidden = on_toggle_hidden | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ToggleVisible(self, event):\n pass",
"def toggle(self):",
"def toggle(self) -> None:\n ...",
"def toggleWindowVisibility(*args, **kwargs)->None:\n pass",
"def toggle(self) -> None:",
"def toggle(self) -> None:",
"def toggle_hidden(self):\n self.show_hidden = not self.show_hidden\n self.reload('.')",
"def _on_toggle_and_run(self, kwargs: dict) -> None:\n self.toggle(state=kwargs[CONF_STATE])\n\n if kwargs[CONF_STATE] == \"on\":\n state = \"off\"\n else:\n state = \"on\"\n\n self.handles[HANDLE_VACATION_MODE] = self.run_in(\n self._on_toggle_and_run, randint(5 * 60, 60 * 60), state=state\n )",
"def ev_windowshown(self, event: WindowEvent) -> None:",
"def window_info_toggle():\n window_info.hide() if window_info.showing else window_info.show()",
"def toggle_hidden(self):\n if self.hidden:\n self.show()\n else:\n self.hide()",
"def toggled(self, *args, **kwargs): # real signature unknown\n pass",
"def toggle_window_visibility(self):\r\n if self.isHidden():\r\n self.show_window()\r\n self.visibilityAction.setText(self.hideWindowString)\r\n else:\r\n self.hide_window()\r\n self.visibilityAction.setText(self.showWindowString)",
"def toggle_hidden(self):\n AbstractChild.toggle_hidden(self)\n self.accFrame.update_values()\n self.botFrame.update_values()\n # On toggle hidden\n self.on_toggle_hidden()",
"def on(self):\n if self._hidden:\n self.off()\n return\n if self._state != True:\n self.log_state_change('+')\n self._state = True",
"def toggle_call(self) -> None:",
"def toggle(self, **kwargs):\n self.on = False if self.on else True",
"def _add_function_to_labels_toggles(self, fun):\n for s_group in self.labels_toggles:\n for w in s_group:\n w.on_trait_change(fun, 'value')",
"def _on_start_cycle(self, kwargs: dict) -> None:\n self.handles[HANDLE_TOGGLE_IN_WINDOW] = self.run_every(\n self._on_schedule_toggle,\n self.datetime(),\n self.properties[CONF_WINDOW],\n state=self.properties[CONF_STATE],\n )\n self.handles[HANDLE_TOGGLE_OUT_WINDOW] = self.run_every(\n self._on_schedule_toggle,\n self.datetime() + timedelta(seconds=self.properties[CONF_DURATION]),\n self.properties[CONF_WINDOW],\n state=self.properties[CONF_STATE],\n opposite=True,\n )",
"def set_func(self, function):\n self.get(COMMAND_UIC, 'SetFunc', [('function', function)])",
"def toggle(self):\r\n self._variable.set(not self._variable.get()) \r\n self._activate()",
"def __on_click(self):\n if self.enable:\n self.__function_to_activate()",
"def toggle(self):\n self.open = not self.open",
"def ev_windowhidden(self, event: WindowEvent) -> None:",
"def _on_schedule_toggle(self, kwargs: dict) -> None:\n if kwargs.get(\"opposite\"):\n self.toggle(opposite_of=kwargs[\"state\"])\n else:\n self.toggle(state=kwargs[\"state\"])",
"def event_afterhide(self):\n logging.warning('afterhide undefined')",
"def SetToggle(self, flag):\n\n self.up = not flag\n self.Refresh()",
"def visibility_toggle(self, _):\n raise VisibilityToggleEvent",
"def toggle(self):\n self._variable.set(not self._variable.get())\n self._activate()",
"def friewallOn():\n pass"
] | [
"0.6417303",
"0.60684586",
"0.6023648",
"0.6016456",
"0.60106647",
"0.60106647",
"0.6010533",
"0.5866378",
"0.5774059",
"0.5747065",
"0.57233655",
"0.5721329",
"0.5703353",
"0.56998634",
"0.56814325",
"0.5621253",
"0.56201285",
"0.5618225",
"0.556665",
"0.55503136",
"0.55201834",
"0.55027896",
"0.5495896",
"0.5445723",
"0.5431617",
"0.5431366",
"0.5417698",
"0.5407491",
"0.54009557",
"0.5394936"
] | 0.65579814 | 0 |
Overriding update method to also update login window. | def update(self):
if not self.login is None:
if self.login.isAlive:
self.login.update()
else: # Login window was just closed
self.accFrame.update_values()
self.login = None
AbstractChild.update(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evt_login(self, event):\n if self.pair_correct(self.wgt_txt_login_user.GetValue(), self.wgt_txt_login_pass.GetValue()):\n self.parent.Hide()\n self.pane_landing.Show()\n self.parent.parent.SetSizer(self.szr_landing)\n self.parent.parent.Layout()\n else:\n if self.invalid_text == None:\n self.invalid_text = wx.StaticText(self, size=(60, -1), label=\"INVALID USER/PASSKEY PAIR\", style=wx.ALIGN_CENTER)\n self.invalid_text.SetBackgroundColour('red')\n self.szr_login_inner.Add(self.invalid_text, flag=wx.EXPAND)\n self.szr_login_inner.AddSpacer(self.temp_space)\n self.Fit()\n else:\n self.invalid_text.SetLabel(\"C'mon, I said it's not a bloody valid passkey\")\n self.invalid_count += 1\n self.Layout()",
"def updateWidget(self):\n\n if self.frame1.state() == 'normal':\n self.frame2.deiconify()\n self.frame1.withdraw()\n else:\n self.frame2.withdraw()\n self.frame2.update()\n self.frame2.deiconify()\n self.frame1.title(\"%s's turn\" % self.usernames[1])\n self.frame2.title(\"%s's turn\" % self.usernames[0])\n showDialogBox(\"%s's turn first!\" % self.usernames[0])\n self.frame1.update()\n self.frame2.update()",
"def login(self):\n logging.debug(\"login called\")\n\n # Apply settings\n self.localisationsettings.apply_to_upcoming_session()\n self.admin_setting.apply_to_upcoming_session()\n self.macspoof_setting.apply_to_upcoming_session()\n self.network_setting.apply_to_upcoming_session()\n\n self.mainwindow.hide()\n self.gdmclient.do_login()",
"def LoadLogInWindow(self):\n \n def CreateAccount():\n \"\"\"Erase 'Log In' widgets to load 'Account Creation' widgets.\n \n This function is called by the 'Create New Account' button.\n \"\"\"\n login_frame.forget()\n self.LoadCreateAccountWindow()\n \n def ForgotPassword():\n \"\"\"Erase 'Log In' widgets to load 'Forgot Password' widgets.\n \n This function is called by clicking on the 'Forgot Password' label.\n \"\"\"\n login_frame.forget()\n self.LoadForgotPasswordWindow()\n \n def LogIn():\n \"\"\"Verify the user's credentials and load the main program.\n \n This function is called by the 'Log In' button.\n \"\"\"\n result, user, date = self.CheckLogin()\n if result:\n # Save the database file and load the program\n self.SaveData()\n self.main_frame.destroy()\n MainWindow.MainWindow(self, user, login_date=date)\n \n login_frame = Widgets.CreateFrame(self.main_canvas)\n self.main_canvas.itemconfigure(self.canvas_window, window=login_frame)\n \n # Create a button to log in and a button to create a new account\n button_frame = Widgets.CreateFrame(login_frame)\n button_frame.pack(side='bottom')\n login_button = Widgets.CreateButton(button_frame,\n _text='Log In',\n _cmd=LogIn,\n _height=self.button_height)\n login_button.pack(side='left')\n create_button = Widgets.CreateButton(button_frame,\n _text='Create\\nNew Account',\n _cmd=CreateAccount,\n _height=self.button_height)\n create_button.pack(side='right')\n \n # Create a clickable label to reset a user's password\n reset_label = Widgets.CreateLabel(login_frame,\n _text='Forgot Password?',\n _font=('georgia', 10))\n reset_label.pack(side='bottom')\n reset_label.bind('<Button-1>', lambda e:ForgotPassword())\n \n # Create the 'Password' and 'Username' fields\n widget_list = []\n pass_entry = Widgets.CreateEntry(login_frame, _var=self.password,\n _show='*')\n pass_entry.pack(side='bottom')\n widget_list.append(pass_entry)\n pass_label = Widgets.CreateLabel(login_frame, _text='Password:')\n pass_label.pack(side='bottom')\n \n user_entry = Widgets.CreateEntry(login_frame, _var=self.username)\n user_entry.pack(side='bottom')\n widget_list.append(user_entry)\n user_label = Widgets.CreateLabel(login_frame, _text='Username:')\n user_label.pack(side='bottom')\n \n # Entry fields are created bottom-to-top and their order in the window\n # stack needs to be reversed and lifted so that 'Tab' navigates from\n # top-to-bottom\n widget_list.reverse()\n for widget in widget_list:\n widget.lift()",
"def LoadForgotPasswordWindow(self):\n \n def ChangePassword():\n \"\"\"Call the function to change the user's password.\n \n This function is called by the 'Change Password' button.\n \"\"\"\n if self.ChangePassword():\n # Update successful, return to main screen\n self.confirm_pass.set('')\n self.password.set('')\n Return()\n else:\n return\n \n def Return():\n \"\"\"Erase 'Forgot Password' widgets to load 'Log In' widgets.\n \n This function is called by the 'Return To Log In' button.\n \"\"\"\n forgot_frame.forget()\n self.LoadLogInWindow()\n \n forgot_frame = Widgets.CreateFrame(self.main_canvas)\n self.main_canvas.itemconfigure(self.canvas_window, window=forgot_frame)\n \n # Create a button to change the password and a button to return to\n # the initial 'Log In' window\n button_frame = Widgets.CreateFrame(forgot_frame)\n button_frame.pack(side='bottom')\n \n change_button = Widgets.CreateButton(button_frame,\n _text='Change Password',\n _cmd=ChangePassword,\n _height=self.button_height)\n change_button.pack(side='left')\n return_button = Widgets.CreateButton(button_frame,\n _text='Return To\\nLog In',\n _cmd=Return,\n _height=self.button_height)\n return_button.pack(side='right')\n \n # Set the password fields to monitor if they match and update a label\n # to show their status\n self.pass_match_label = Widgets.CreateLabel(forgot_frame, _text='',\n _font=('georgia', 8))\n self.pass_match_label.pack(side='bottom')\n self.password.trace('w', self.PasswordMatch)\n self.confirm_pass.trace('w', self.PasswordMatch)\n self.PasswordMatch()\n \n # Create the 'Confirm Password', 'New Password', and 'Username' fields\n widget_list = []\n confirm_entry = Widgets.CreateEntry(forgot_frame,\n _var=self.confirm_pass,\n _show='*')\n confirm_entry.pack(side='bottom')\n widget_list.append(confirm_entry)\n confirm_label = Widgets.CreateLabel(forgot_frame,\n _text='Confirm Password:')\n confirm_label.pack(side='bottom')\n \n new_pass_entry = Widgets.CreateEntry(forgot_frame, _var=self.password,\n _show='*')\n new_pass_entry.pack(side='bottom')\n widget_list.append(new_pass_entry)\n new_pass_label = Widgets.CreateLabel(forgot_frame,\n _text='New Password:')\n new_pass_label.pack(side='bottom')\n \n user_entry = Widgets.CreateEntry(forgot_frame, _var=self.username)\n user_entry.pack(side='bottom')\n widget_list.append(user_entry)\n user_label = Widgets.CreateLabel(forgot_frame, _text='Username:')\n user_label.pack(side='bottom')\n \n # Entry fields are created bottom-to-top and their order in the window\n # stack needs to be reversed and lifted so that 'Tab' navigates from\n # top-to-bottom\n widget_list.reverse()\n for widget in widget_list:\n widget.lift()",
"def updateWidget(self):\n pass",
"def evt_login(self, event):\n\n # Hide current pane, show PaneMain, then reset the active sizer and call Layout()\n self.parent.Hide()\n self.pane_landing.Show()\n self.parent.parent.SetSizer(self.szr_landing)\n self.parent.parent.Layout()",
"def update(self):\n self.downloader.authorize()\n self.update_users()\n self.update_channels()\n self.update_history()",
"def log_in(self):\r\n self.clear_screen()\r\n lbl_log_in = Label(self.root, text=\"Welcome. Please log in to the system.\",\r\n font=self.title_font,\r\n bg=self.bg_color)\r\n lbl_log_in.pack(pady=5, padx=10)\r\n\r\n user_name = Label(self.root, text=\"enter user name\", font=self.text_font, bg=self.bg_color)\r\n user_name.pack(pady=5, padx=10)\r\n user_name_entry = Entry(self.root, font='Helvetica 14', fg='blue', width=25)\r\n user_name_entry.pack(pady=5, padx=10)\r\n\r\n password = Label(self.root, text=\"enter password\", font=self.text_font, bg=self.bg_color)\r\n password.pack(pady=5, padx=10)\r\n password_entry = Entry(self.root, font='Helvetica 14', fg='blue', width=25, show=\"*\")\r\n password_entry.pack(pady=5, padx=10)\r\n\r\n passcode = Label(self.root, text=\"enter passcode\", font=self.text_font, bg=self.bg_color)\r\n passcode.pack(pady=5, padx=10)\r\n passcode_entry = Entry(self.root, font='Helvetica 14', fg='blue', width=25, show=\"*\")\r\n passcode_entry.pack(pady=5, padx=10)\r\n\r\n button_enter_log = Button(self.root, text=\"log in\", command=lambda: self.submit_log_in(\r\n user_name_entry, password_entry, passcode_entry))\r\n button_enter_log.pack(pady=10)\r\n\r\n button_sign_in = Button(self.root, text=\"Don't have a user? Sign in\", command=self.sign_in)\r\n button_sign_in.pack(pady=10)",
"def loginFunc(self):\n username = (\n self.lineEdit.text()\n ) # Get the text from the username & password lineedit\n password = self.lineEdit_2.text() #\n # Check if password and username isnt empty, if it is, popup\n if DB.verify_login(username, password) \\\n and not DB.new_customer(username):\n self.customer.budget.set_budget(DB.get_income(self.customer.email),\n DB.get_variable_expenses(self.customer.email),\n DB.get_fixed_expenses(self.customer.email))\n self.customer.budget.set_buffert(DB.get_buffert(username))\n self.displayUi = MenuScreen()\n self.hide()\n self.displayUi.show()\n elif DB.verify_login(username, password) and DB.new_customer(username):\n self.displayUi = FirstLoginScreen()\n self.hide()\n self.displayUi.show()\n else:\n self.popUp.exec_()",
"def login(self):\n self.new_window = tk.Toplevel(self.acesso)\n Entrar(self.new_window, self.acesso)",
"def UpdateUI(self):\n # colors & font\n self.SetBackgroundColour(wx.GetApp().settings.bg_color)\n self.SetForegroundColour(wx.GetApp().settings.fg_color)\n self.SetFont(wx.GetApp().settings.text_font)\n\n # apply new (or not) 'wx' values to content.\n p = self.FindWindow(\"content\")\n if p is not None:\n p.SetBackgroundColour(wx.GetApp().settings.bg_color)\n p.SetForegroundColour(wx.GetApp().settings.fg_color)\n p.SetFont(wx.GetApp().settings.text_font)\n\n # apply new (or not) 'wx' values to header.\n p = self.FindWindow(\"header\")\n if p is not None:\n p.SetBackgroundColour(wx.GetApp().settings.header_bg_color)\n p.SetForegroundColour(wx.GetApp().settings.header_fg_color)\n p.SetFont(wx.GetApp().settings.header_text_font)\n\n # apply new (or not) 'wx' values to actions.\n p = self.FindWindow(\"actions\")\n if p is not None:\n p.SetBackgroundColour(wx.GetApp().settings.action_bg_color)\n p.SetForegroundColour(wx.GetApp().settings.action_fg_color)\n p.SetFont(wx.GetApp().settings.action_text_font)\n\n self.Refresh()",
"def OnFrameShow(self):\r\n\r\n\t\t# Update local copy of config\r\n\t\tself._configtmp.update(self._config)\r\n\r\n\t\t# Fix notebook background color when switching themes in XP\r\n\t\tself.notebookApp.SetBackgroundColour(\\\r\n\t\t\tself.notebookApp.GetThemeBackgroundColour())\r\n\r\n\t\t# Set flag\r\n\t\tself.toggleButtonRateFlag.SetValue(self._configtmp[\"flagimage\"])\r\n\r\n\t\t# Set ratings\r\n\t\tself._iconstars = [icons.getGrayStarBitmap(),\r\n\t\t\ticons.getYellowStarBitmap(),\r\n\t\t\ticons.getGrayOrangeStarBitmap(),\r\n\t\t\ticons.getYellowOrangeStarBitmap()]\r\n\t\tself.SetStars()\r\n\r\n\t\t# Set image info\r\n\t\tself.textCtrlImageInfo.Clear()\r\n\t\tself.textCtrlImageInfo.WriteText(self._configtmp[\"imageinfo\"])\r\n\r\n\t\t# If login still valid, change text on Sign In page\r\n\t\tif (self._configtmp[\"userhash\"] != \"00000000000000000000000000000000\"):\r\n\t\t\tself.staticTextSignedIn.SetLabel(\" You are signed in.\")\r\n\t\telse:\r\n\t\t\tself.staticTextSignedIn.SetLabel(\"You are not signed in.\")\r\n\t\tusername = self._configtmp[\"username\"]\r\n\t\tself.textCtrlLogin.Clear()\r\n\t\tself.textCtrlLogin.WriteText(username)\r\n\r\n\t\t# Set options\r\n\t\t_ratelist = [1, 2, 3, 4, 5]\r\n\t\tself.choiceOptionRatedAtLeast.SetSelection(\r\n\t\t\t_ratelist.index(self._configtmp[\"ratedatleast\"]))\r\n\t\t_percentlist = [5, 10, 20, 50, 75, 100]\r\n\t\tself.choiceOptionPercentUnrated.SetSelection(\r\n\t\t\t_percentlist.index(self._configtmp[\"percentnew\"]))\r\n\t\t_changeeverylist = [900, 1800, 3600, 7200, 14400, 28800, 86400,\r\n\t\t\t172800, 345600, 604800]\r\n\t\tself.choiceOptionChangeEvery.SetSelection(\r\n\t\t\t_changeeverylist.index(self._configtmp[\"changeevery\"]))\r\n\r\n\t\t# Update complete, show frame\r\n\t\tself.Show()\r\n\t\tself.Raise()",
"def updateUsers(self):\n self.__redrawUsers()\n self.__update()",
"def update(self):\n self.__redrawChat()\n self.__redrawUsers()\n self.__redrawChannels()\n self.__update()",
"def iniciaUI(self):\n\n self.setGeometry(100,100, 250, 250)\n self.setWindowTitle(\"Login\")\n self.displayWidgets()\n\n self.show()",
"def click_login_button(self):",
"def win2_Login(event=None):\r\n\r\n global IMAGES_FILE_PATH\r\n global bool_credentials_confirmed\r\n bool_credentials_confirmed = False\r\n\r\n clearwin()\r\n login_screen = mframe\r\n\r\n tkinter.Label(login_screen, text=\"Please enter your instagram details\", bg='pink', width=200,\r\n font=(\"Courier\", 20), pady=50).pack()\r\n login_screen.place(relx=0.5, rely=0.3, anchor='center')\r\n tkinter.Label(login_screen, text=\"\", bg='pink').pack()\r\n\r\n global username\r\n global password\r\n username = tkinter.StringVar()\r\n password = tkinter.StringVar()\r\n\r\n global username_login_entry\r\n global password_login_entry\r\n\r\n tkinter.Label(login_screen, text=\"Username \", bg='pink').pack()\r\n username_login_entry = tkinter.Entry(login_screen, textvariable=username, bg='pink', width=33,\r\n font=(\"Courier\", 13))\r\n username_login_entry.pack()\r\n tkinter.Label(login_screen, text=\"\", bg='pink').pack()\r\n tkinter.Label(login_screen, text=\"Password \", bg='pink').pack()\r\n password_login_entry = tkinter.Entry(login_screen, textvariable=password, show='*', bg='pink', width=33,\r\n font=(\"Courier\", 13))\r\n password_login_entry.pack()\r\n tkinter.Label(login_screen, text=\"\", bg='pink').pack()\r\n\r\n tkinter.Button(login_screen, text=\"Login\", width=10, height=1, command=login_verify, bg='pink').pack()\r\n tkinter.Button(mframe, text='Back', width=10, height=1, command=win1, bg='pink').pack()",
"def displayWindow(self, login, name, window, useOldState = False):\n\t\twindow.setName(name)\n\t\twindow.setUser(login)\n\t\twindow.setWindowManager(self)\n\t\tself.__addWindow(login, name, window, useOldState)\n\t\tml = window.getManialink()\n\t\tself.displayMl(ml, name, login)",
"def updateSettingsUI(self):\n\n pass",
"def LogIn():\n result, user, date = self.CheckLogin()\n if result:\n # Save the database file and load the program\n self.SaveData()\n self.main_frame.destroy()\n MainWindow.MainWindow(self, user, login_date=date)",
"def LoadCreateAccountWindow(self):\n \n def CreateAccount():\n \"\"\"Call the function to create a new account and log in.\n \n This function is called by the 'Create Account' button. If the\n user provided a valid username and the password fields match,\n the account will be created and the user will be asked to log\n into the account. If the user declines, the window resets back\n to the initial 'Log In' window.\n \"\"\"\n \n if not self.CreateAccount():\n return\n \n # Offer to log the new user account in\n ask = messagebox.askyesno('Success!',\n f'Account created. Log in as {username}?')\n if ask:\n # Save data to the file and load the main program\n self.SaveData()\n self.main_frame.destroy()\n MainWindow.MainWindow(self, username, login_date=None)\n else:\n # Clear variable fields and return to initial 'Log In' window\n self.username.set('')\n self.password.set('')\n self.confirm_pass.set('')\n Return()\n \n def Return():\n \"\"\"Erase 'Account Creation' widgets to load 'Log In' widgets.\n \n This function is called by the 'Return To Log In' button.\n \"\"\"\n confirm_frame.forget()\n self.LoadLogInWindow()\n \n confirm_frame = Widgets.CreateFrame(self.main_canvas)\n self.main_canvas.itemconfigure(self.canvas_window,\n window=confirm_frame)\n \n # Create a button to create the account and a button to return to\n # the 'Log In' window\n button_frame = Widgets.CreateFrame(confirm_frame)\n button_frame.pack(side='bottom')\n \n create_button = Widgets.CreateButton(button_frame,\n _text='Create Account',\n _cmd=CreateAccount,\n _height=self.button_height)\n create_button.pack(side='left')\n return_button = Widgets.CreateButton(button_frame,\n _text='Return To\\nLog In',\n _cmd=Return,\n _height=self.button_height)\n return_button.pack(side='right')\n \n # Set the password fields to monitor if they match and update a label\n # to show their status\n self.pass_match_label = Widgets.CreateLabel(confirm_frame, _text='',\n _font=('georgia', 8))\n self.pass_match_label.pack(side='bottom')\n self.password.trace('w', self.PasswordMatch)\n self.confirm_pass.trace('w', self.PasswordMatch)\n self.PasswordMatch()\n \n # Create the 'Confirm Password', 'Password', and 'Username' fields\n widget_list = []\n confirm_entry = Widgets.CreateEntry(confirm_frame,\n _var=self.confirm_pass, _show='*')\n confirm_entry.pack(side='bottom')\n widget_list.append(confirm_entry)\n confirm_label = Widgets.CreateLabel(confirm_frame,\n _text='Confirm Password:')\n confirm_label.pack(side='bottom')\n \n pass_entry = Widgets.CreateEntry(confirm_frame,\n _var=self.password, _show='*')\n pass_entry.pack(side='bottom')\n widget_list.append(pass_entry)\n pass_label = Widgets.CreateLabel(confirm_frame, _text='Password:')\n pass_label.pack(side='bottom')\n \n user_entry = Widgets.CreateEntry(confirm_frame, _var=self.username)\n user_entry.pack(side='bottom')\n widget_list.append(user_entry)\n user_label = Widgets.CreateLabel(confirm_frame, _text='Username:')\n user_label.pack(side='bottom')\n \n # Entry fields are created bottom-to-top and their order in the window\n # stack needs to be reversed and lifted so that 'Tab' navigates from\n # top-to-bottom\n widget_list.reverse()\n for widget in widget_list:\n widget.lift()",
"def OnButtonLoginOKButton(self, event):\r\n\t\tself.OnButtonOKButton()",
"def OnSubmit(self, event):\n chat_login.UserText = self.txt_Username.GetValue()\n chat_login.PasswordText = self.txt_Password.GetValue()\n flag=authenticate.authenticate(chat_login.UserText,chat_login.PasswordText)\n if flag==0:\n self.frame.SetStatusText(\"Sorry\")\n elif flag==2:\n self.frame.SetStatusText(\"Username not registered\")\n elif flag==1:\n self.frame.Close(True)\n app = wx.App(False)\n OnlineOfflineScreen(chat_login.UserText, chat_login.PasswordText)\n OnlineOfflineThread()\n\n app.MainLoop()",
"def update_(self):\n self.update_listbox()\n self.update_infobox()\n self.update_statusbar()\n self.listbox.select_set(0)\n self.listbox.focus_set()",
"def update_account_view():\n\n username = pygame_textinput.TextInput()\n oldPassword = pygame_textinput.TextInput()\n password = pygame_textinput.TextInput()\n login_view = pygame.image.load(\"images/menu/update-account.png\")\n surface.fill(COLOR_BACKGROUND)\n\n selected = \"username\"\n \n while True:\n # Clock tick\n clock.tick(60)\n\n # Application events\n playevents = pygame.event.get()\n\n for e in playevents:\n if e.type == QUIT:\n exit()\n elif e.type == KEYDOWN:\n if e.key == K_ESCAPE and main_menu.is_disabled():\n account_login_view()\n return\n elif e.type == MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n print(mouse_pos)\n if mouse_pos[0] in range(190,610) and mouse_pos[1] in range(148,210):\n selected = \"username\"\n elif mouse_pos[0] in range(190,610) and mouse_pos[1] in range(235,298):\n selected = \"oldPassword\"\n elif mouse_pos[0] in range(190,610) and mouse_pos[1] in range(330,392):\n selected = \"password\"\n elif mouse_pos[0] in range(186,612) and mouse_pos[1] in range(430,477):\n # update account\n if len(oldPassword.get_text()) < 1 or len(username.get_text()) < 1 or len(password.get_text()) < 1:\n break\n update_account(username = username.get_text(), oldPassword = oldPassword.get_text(), password = password.get_text())\n return\n elif mouse_pos[0] in range(562,617) and mouse_pos[1] in range(62,77):\n # go back\n account_login_view()\n return\n \n if selected == \"username\":\n username.update(playevents) \n elif selected == \"oldPassword\":\n oldPassword.update(playevents, passProtect=True) \n elif selected == \"password\":\n password.update(playevents, passProtect=True)\n \n # blit information to the menu based on user input from above\n surface.blit(login_view, ((WINDOW_SIZE[0] - login_view.get_size()[0]) / 2, (WINDOW_SIZE[1] - login_view.get_size()[1]) / 2))\n if len(username.get_text()) >= 1:\n surface.blit(username.get_surface(), (250,170)) \n else:\n surface.blit(MY_FONT.render('Username', 1, COLOR_BLACK), (250,160)) \n if len(oldPassword.get_text()) >= 1:\n surface.blit(oldPassword.get_surface(), (250,257)) \n else:\n surface.blit(MY_FONT.render('Old Password', 1, COLOR_BLACK), (250,247)) \n if len(password.get_text()) >= 1:\n surface.blit(password.get_surface(), (250,352))\n else:\n surface.blit(MY_FONT.render('Password', 1, COLOR_BLACK), (250,342)) \n pygame.display.flip()\n\n return",
"def __init__(self, root):\n root.withdraw()\n\n self.widgets['Login Window'] = subwindow.draw_subwindow(root, self.w, self.h, self.title)\n\n self.widgets['Login Window'].protocol('WM_DELETE_WINDOW', lambda: subwindow.on_exit(root))\n\n self.widgets['Login Window'].grab_set()\n\n self.create_widgets(root)",
"def update_info(self):\n self.execution_status_widget.update()\n self.execution_info_widget.update()\n self.cluster_widget.update() # update the cluster info even if it is not being displayed\n self.details.original_widget.update()",
"def login(self):",
"def switchToLogin(self):\n self.username.setText(\"\")\n self.password.setText(\"\")\n self.lastView = None\n self.currentView = 0\n self.stacked.setCurrentIndex(0)\n self.show()"
] | [
"0.6385321",
"0.6359693",
"0.6340787",
"0.626191",
"0.625842",
"0.6205188",
"0.6185125",
"0.6001173",
"0.59811103",
"0.5976306",
"0.5903719",
"0.5897709",
"0.58765024",
"0.5859574",
"0.5857817",
"0.58435374",
"0.58405894",
"0.5815477",
"0.5792914",
"0.578056",
"0.57775563",
"0.5771277",
"0.5759278",
"0.5755804",
"0.574651",
"0.57133657",
"0.571236",
"0.5706576",
"0.5705098",
"0.57037115"
] | 0.80019075 | 0 |
Overriding toggle hidden function to also update values of this window's frames. | def toggle_hidden(self):
AbstractChild.toggle_hidden(self)
self.accFrame.update_values()
self.botFrame.update_values()
# On toggle hidden
self.on_toggle_hidden() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def toggle_hidden(self):\n if self.hidden:\n self.show()\n else:\n self.hide()",
"def toggle_hidden(self):\n self.show_hidden = not self.show_hidden\n self.reload('.')",
"def do_hf_unhide(self, arg):\n self.show_hidden_frames = True\n self.refresh_stack()",
"def ToggleVisible(self, event):\n pass",
"def do_hf_hide(self, arg):\n self.show_hidden_frames = False\n self.refresh_stack()",
"def toggleWindowVisibility(*args, **kwargs)->None:\n pass",
"def hide(self, event=None):\r\n self.visible = 0\r\n self.withdraw()",
"def hide(self, event=None):\r\n self.visible = 0\r\n self.withdraw()",
"def hide( self, event=None ):\n self.visible = 0\n self.withdraw()",
"def hide( self, event=None ):\n self.visible = 0\n self.withdraw()",
"def toggle_window_visibility(self):\r\n if self.isHidden():\r\n self.show_window()\r\n self.visibilityAction.setText(self.hideWindowString)\r\n else:\r\n self.hide_window()\r\n self.visibilityAction.setText(self.showWindowString)",
"def window_info_toggle():\n window_info.hide() if window_info.showing else window_info.show()",
"def hide(self, event=None):\n self.visible = 0\n self.withdraw()",
"def ev_windowhidden(self, event: WindowEvent) -> None:",
"def unHide(self):\n self.visible = True",
"def toggle_visibility(self):\n if self.is_visible():\n self.hide()\n else:\n self.show()",
"def toggleShowInvisibles(self: Self, event: Event = None) -> None:\n c = self\n colorizer = c.frame.body.getColorizer()\n showInvisiblesHelper(c, not colorizer.showInvisibles)",
"def toggle_show_frame_number(self):\n if self.show_frame_num:\n self.show_frame_num = False\n self.btn_toggle_frame_num.config(text='Show frame num')\n else:\n self.show_frame_num = True\n self.btn_toggle_frame_num.config(text='Hide frame num')",
"def toggle(self):\n if bool(self.show.get()):\n self.sub_frame.pack(fill=\"x\", expand=1)\n self.toggle_button.configure(text='-')\n else:\n self.sub_frame.forget()\n self.toggle_button.configure(text='+')",
"def visible(self, show):",
"def setIsolateHidden( self, state ):\n self._isolatedHidden = state\n \n super(XNode, self).setVisible(self.isVisible())",
"def _onShow(self, event):\n# try:\n if len(self.frames) > 0:\n self.hidebutton.Show(True)\n else:\n self.hidebutton.Show(False)",
"def hidden(self, hidden):\n\n self._hidden = hidden",
"def toggle(self):\n self._show = not self._show\n if self._show:\n self._sub_frame.pack(fill=tk.X, expand=1)\n self._toggle_button.configure(text='-')\n else:\n self._sub_frame.forget()\n self._toggle_button.configure(text='+')",
"def toggle(self) -> None:\n if bool(self.show.get()):\n self.sub_frame.pack(fill=tk.X, expand=True)\n self.toggle_button.configure(text=self.sep[0])\n else:\n self.sub_frame.forget()\n self.toggle_button.configure(text=self.sep[1])",
"def hide(self):\n self.frame.grid_forget()\n self.visible = False",
"def hide(self):\r\n\t\tself.frame.Show(False)",
"def hide(self):\n self.visible = False",
"def toggle(self):",
"def toggle(self):\n if self.is_visible():\n self.hide()\n self._app[\"main_window\"].grab_focus()\n self._app[\"statusbar\"].update_info()\n elif self._app.get_paths() and \\\n self._app.get_focused_widget() not in [\"lib\", \"thu\"]:\n if os.path.islink(self._app.get_path()):\n self._app[\"statusbar\"].message(\n \"Manipulating symbolic links is not supported\", \"warning\")\n elif not edit_supported(self._app.get_path()):\n self._app[\"statusbar\"].message(\n \"This filetype is not supported\", \"warning\")\n else:\n self.show()\n self._pixbuf = self._app[\"image\"].get_pixbuf_original()\n self.sliders[\"bri\"].grab_focus()\n self._app[\"statusbar\"].update_info()\n else:\n if self._app[\"thumbnail\"].toggled:\n self._app[\"statusbar\"].message(\n \"Manipulate not supported in thumbnail mode\", \"warning\")\n elif self._app[\"library\"].is_focus():\n self._app[\"statusbar\"].message(\n \"Manipulate not supported in library\", \"warning\")\n else:\n self._app[\"statusbar\"].message(\"No image open to edit\",\n \"warning\")"
] | [
"0.7399713",
"0.72405815",
"0.7088366",
"0.69343615",
"0.68650293",
"0.66577417",
"0.6634899",
"0.6634899",
"0.66049457",
"0.66049457",
"0.65987873",
"0.657263",
"0.6546351",
"0.65328175",
"0.6531905",
"0.6456644",
"0.6416586",
"0.6407047",
"0.64038384",
"0.6393051",
"0.6379931",
"0.63653624",
"0.6280172",
"0.6258701",
"0.62509155",
"0.62307066",
"0.62100804",
"0.6163295",
"0.6162284",
"0.6160346"
] | 0.7843108 | 0 |
Update `durations`, a dict of dict of lists of pull requests. | def get_duration_data(durations, owner_repo="edx/edx-platform", since=None):
open_issues_generator = itertools.izip(
get_pulls(owner_repo, state="open", org=True),
itertools.repeat("open")
)
closed_issues_generator = itertools.izip(
get_pulls(owner_repo, state="closed", since=since, org=True),
itertools.repeat("closed")
)
for issue, state in itertools.chain(open_issues_generator, closed_issues_generator):
created_at = issue.created_at
if state == "open":
closed_at = datetime.utcnow()
else:
closed_at = issue.closed_at
issue.duration = closed_at - created_at
if DEBUG:
print("{pr.id}: {pr.intext} {state}".format(
pr=issue, state=state
), file=sys.stderr)
durations[state][issue.intext].append(issue) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_timers(self, delta):\n new_timers = {}\n for t in self._timers.keys():\n new_timers[t + delta] = self._timers[t]\n self._timers.clear()\n self._timers = new_timers",
"def __update_current_measure_durations(self, duration: int) -> None:\n total_duration = sum(self.current_measure_durations) + duration\n if total_duration < N_EIGHTHS_PER_MEASURE:\n self.current_measure_durations.append(duration)\n elif total_duration == N_EIGHTHS_PER_MEASURE:\n self.current_measure_durations = []\n else:\n syncopated_duration = total_duration - N_EIGHTHS_PER_MEASURE\n self.current_measure_durations = [syncopated_duration]",
"def refresh(self):\n for i in self.data:\n values = self.data[i]\n try:\n if values[\"state\"] == \"Teardown\":\n t_delta = (values[\"t_end\"] or values[\n \"date\"]) - values[\"ts\"]\n else:\n t_delta = values[\"date\"] - values[\"ts\"]\n\n if t_delta.total_seconds() < 0:\n t_delta = values[\"ts\"] - values[\"ts\"]\n values[\"duration\"] = str(t_delta.total_seconds())\n except:\n print sys.exc_info()\n # print values\n values[\"duration\"] = 0",
"def mergeAndSortIssues(dbd_issues, updated_issues):\n\n def ascendingTimeOrder(t1, t2):\n \"\"\"A custom comparator to order based on the difference in times in\n seconds. \"\"\"\n return cmp(t1['total_seconds'], t2['total_seconds'])\n\n def descendingTimeOrder(t1, t2):\n \"\"\"A custom comparator to order based on the hour and minute properties\n of the issues. Puts largest times first.\"\"\"\n return -cmp((t1['days'], t1['hours'], t1['minutes']),\n (t2['days'], t2['hours'], t2['minutes']))\n\n sorters = {\n \"SLA\": ascendingTimeOrder,\n \"FTS\": descendingTimeOrder,\n \"REV\": descendingTimeOrder,\n \"UNA\": descendingTimeOrder\n }\n\n for category in sorters:\n dbd_issues[category].extend(updated_issues[category])\n dbd_issues[category].sort(sorters[category])",
"def update_current_measure_durations(\n current_measure_durations: List[float], next_duration: float\n) -> List[float]:\n duration_of_one_measure = 1\n extended_duration = sum(current_measure_durations) + next_duration\n if extended_duration < duration_of_one_measure:\n current_measure_durations.append(next_duration)\n elif extended_duration == duration_of_one_measure:\n current_measure_durations = []\n else:\n syncopated_duration = extended_duration - duration_of_one_measure\n current_measure_durations = [syncopated_duration]\n return current_measure_durations",
"def process_feature_durations(self, docFeatList):\n \n for feat in docFeatList:\n tlink = feat.getTlink()\n if not tlink or tlink.getType()!='DURATION':\n continue\n# \n tdurs = [t for t in tlink.getTimexes() if t.getType()=='DUR']\n if not tdurs: continue\n \n timexes = [t for t in tlink.getTimexes() if t.getDateTime()]\n if not timexes: continue\n \n tdur = tdurs[0]\n \n if len(timexes)==1:\n dt = timexan.getRelativeDatetime(tdur.getString(), 'after', timexes[0].getDateTime())\n tdur.setDateTime(dt) \n tlink.setType('BETWEEN')\n continue\n \n timexes2 = []\n for t in timexes:\n dt = timexan.getRelativeDatetime(tdur.getString(), 'after', t.getDateTime())\n timexes2.append(timexan.Timex3(0, 0, '', dt, tdur.getString()))\n tlink.setTimexes2(timexes2)\n tlink.setType('MULTI_DURATIONS')\n \n return docFeatList",
"def _update_(self, data):\n # imports\n from intervals import Interval, LinkedIntervalSet\n \n # create a LinkedIntervalSet to store the time-based states for this\n # hero\n states = [Interval(dict((HER_KWD_MAP.get(k, k), d[1][k]) for k in d[1]), d[0]) for d in data]\n self.states = LinkedIntervalSet.from_starts(states)",
"def schedule_pulls():\n for repo in Repository.objects.all():\n # distribute pulls by delaying evenly across an hour\n pk_hash = hashlib.sha256(str(repo.pk).encode())\n delay_s = int(pk_hash.hexdigest(), 16) % (60 * 60)\n\n pull.apply_async(\n args=(repo.remote_id, repo.provider),\n countdown=delay_s,\n expires=delay_s + (60 * 60),\n )",
"def update_view_times(app):\n app.logger.info('Scheduler update_view_times running: %s' % post_view_times_counter)\n d = dict(post_view_times_counter)\n post_view_times_counter.clear()\n for k, v in d.items():\n p = Post.find_one({'_id': k})\n if p:\n try:\n p.viewTimes += v\n p.save()\n except:\n app.logger.exception('Failed when updating the viewTime for album %s' % p._id)",
"def progress_bar_update() -> str:\n # As we get updates only when the progress bar is updated we need to fix the 'duration' and 'time remaining' parts\n # (time never stops)\n now = datetime.now()\n result = []\n for pb_id in sorted(_DASHBOARD_TQDM_DICT.keys()):\n progress = _DASHBOARD_TQDM_DICT.get(pb_id)\n if progress['success'] and progress['n'] != progress['total']:\n progress['duration'] = str(now - progress['started_raw']).rsplit('.', 1)[0]\n progress['remaining'] = (str(progress['finished_raw'] - now).rsplit('.', 1)[0]\n if progress['finished_raw'] is not None and progress['finished_raw'] > now\n else '-')\n result.append(progress)\n\n return jsonify(result=result)",
"def pull_requests_model(self, entry_info, repo_id):\n github_url = entry_info['given']['github_url']\n\n logging.info('Beginning collection of Pull Requests...\\n')\n logging.info(f'Repo ID: {repo_id}, Git URL: {github_url}\\n')\n record_model_process(self, repo_id, 'pull_requests')\n\n owner, repo = self.get_owner_repo(github_url)\n\n url = (f'https://api.github.com/repos/{owner}/{repo}/pulls?state=all&' +\n 'direction=asc&per_page=100&page={}')\n\n # Get pull requests that we already have stored\n # Set pseudo key (something other than PK) to \n # check dupicates with\n table = 'pull_requests'\n table_pkey = 'pull_request_id'\n update_col_map = {'pr_src_state': 'state'} \n duplicate_col_map = {'pr_src_id': 'id'}\n\n #list to hold pull requests needing insertion\n prs = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, \n where_clause='WHERE repo_id = {}'.format(repo_id),\n value_update_col_map={'pr_augur_contributor_id': float('nan')})\n\n # Discover and remove duplicates before we start inserting\n logging.info(\"Count of pull requests needing update or insertion: \" + str(len(prs)) + \"\\n\")\n\n for pr_dict in prs:\n\n pr = {\n 'repo_id': repo_id,\n 'pr_url': pr_dict['url'],\n 'pr_src_id': pr_dict['id'],\n 'pr_src_node_id': None,\n 'pr_html_url': pr_dict['html_url'],\n 'pr_diff_url': pr_dict['diff_url'],\n 'pr_patch_url': pr_dict['patch_url'],\n 'pr_issue_url': pr_dict['issue_url'],\n 'pr_augur_issue_id': None,\n 'pr_src_number': pr_dict['number'],\n 'pr_src_state': pr_dict['state'],\n 'pr_src_locked': pr_dict['locked'],\n 'pr_src_title': pr_dict['title'],\n 'pr_augur_contributor_id': find_id_from_login(self, pr_dict['user']['login']),\n 'pr_body': pr_dict['body'],\n 'pr_created_at': pr_dict['created_at'],\n 'pr_updated_at': pr_dict['updated_at'],\n 'pr_closed_at': pr_dict['closed_at'],\n 'pr_merged_at': pr_dict['merged_at'],\n 'pr_merge_commit_sha': pr_dict['merge_commit_sha'],\n 'pr_teams': None,\n 'pr_milestone': pr_dict['milestone']['title'] if pr_dict['milestone'] else None,\n 'pr_commits_url': pr_dict['commits_url'],\n 'pr_review_comments_url': pr_dict['review_comments_url'],\n 'pr_review_comment_url': pr_dict['review_comment_url'],\n 'pr_comments_url': pr_dict['comments_url'],\n 'pr_statuses_url': pr_dict['statuses_url'],\n 'pr_meta_head_id': None,\n 'pr_meta_base_id': None,\n 'pr_src_issue_url': pr_dict['issue_url'],\n 'pr_src_comments_url': pr_dict['comments_url'], # NOTE: this seems redundant\n 'pr_src_review_comments_url': pr_dict['review_comments_url'], # this too\n 'pr_src_commits_url': pr_dict['commits_url'], # this one also seems redundant\n 'pr_src_statuses_url': pr_dict['statuses_url'],\n 'pr_src_author_association': pr_dict['author_association'],\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': 'GitHub API'\n }\n\n if pr_dict['flag'] == 'need_insertion':\n logging.info(f'PR {pr_dict[\"id\"]} needs to be inserted\\n')\n\n result = self.db.execute(self.pull_requests_table.insert().values(pr))\n logging.info(f\"Added Pull Request: {result.inserted_primary_key}\")\n self.pr_id_inc = int(result.inserted_primary_key[0])\n\n elif pr_dict['flag'] == 'need_update':\n result = self.db.execute(self.pull_requests_table.update().where(\n self.pull_requests_table.c.pr_src_id==pr_dict['id']).values(pr))\n logging.info(\"Updated tuple in the pull_requests table with existing pr_src_id: {}\".format(\n pr_dict['id']))\n self.pr_id_inc = pr_dict['pkey']\n\n else:\n logging.info(\"PR does not need to be inserted. Fetching its id from DB\")\n pr_id_sql = s.sql.text(\"\"\"\n SELECT pull_request_id FROM pull_requests\n WHERE pr_src_id={}\n \"\"\".format(pr_dict['id']))\n\n self.pr_id_inc = int(pd.read_sql(pr_id_sql, self.db).iloc[0]['pull_request_id'])\n\n self.query_labels(pr_dict['labels'], self.pr_id_inc)\n self.query_pr_events(owner, repo, pr_dict['number'], self.pr_id_inc)\n self.query_pr_comments(owner, repo, pr_dict['number'], self.pr_id_inc)\n self.query_reviewers(pr_dict['requested_reviewers'], self.pr_id_inc)\n self.query_pr_meta(pr_dict['head'], pr_dict['base'], self.pr_id_inc)\n\n logging.info(f\"Inserted PR data for {owner}/{repo}\")\n self.results_counter += 1\n\n register_task_completion(self, entry_info, repo_id, 'pull_requests')",
"def duration_filter(ids_and_durations, minute_threshold):\n delta_sec = minute_threshold * 60 # Threshold passed in second to fit 'timedelta'.\n\n short_videos = [video[0] for video in ids_and_durations if video[1] <= timedelta(seconds=delta_sec)]\n long_videos = [video[0] for video in ids_and_durations if video[1] > timedelta(seconds=delta_sec)]\n\n to_print = f\"- END OF THE RETRIEVING PROCESS -\\n\\n\" \\\n f\"Number of short videos: {len(short_videos)}\\n\" \\\n f'{pformat([f\"https://www.youtube.com/watch?v={element[0]}\" for element in short_videos])}\\n\\n' \\\n f\"Number of long videos: {len(long_videos)}\\n\" \\\n f'{pformat([f\"https://www.youtube.com/watch?v={element[0]}\" for element in long_videos])}\\n'\n\n print(to_print)\n\n return {\"short_videos\": short_videos, \"long_videos\": long_videos, \"logs\": to_print}",
"def talk_durations(self):\n c = self.conn.cursor()\n\n c.execute('''SELECT strftime('%s', MAX(`date`)) - strftime('%s', MIN(`date`)) AS duration\n FROM messages\n WHERE gmail_labels LIKE '%Chat%'\n GROUP BY gmail_thread_id\n HAVING duration > 0;''')\n\n data = {'<= 1 min.': 0, '1 - 10 mins.': 0,\n '10 - 30 mins.': 0, '30 mins. - 1 hr.': 0,\n '> 1 hr.': 0}\n for row in c.fetchall():\n if row[0] <= 60:\n data['<= 1 min.'] += 1\n elif row[0] <= 600:\n data['1 - 10 mins.'] += 1\n elif row[0] <= 1800:\n data['10 - 30 mins.'] += 1\n elif row[0] <= 3600:\n data['30 mins. - 1 hr.'] += 1\n else:\n data['> 1 hr.'] += 1\n\n trace = pgo.Pie(\n labels=data.keys(),\n values=data.values(),\n marker=dict(\n colors=[\n self.config.get('color', 'primary'),\n self.config.get('color', 'secondary'),\n ]\n )\n )\n\n layout_args = plotly_default_layout_options()\n layout_args['title'] = 'Chat Durations'\n del layout_args['xaxis']\n del layout_args['yaxis']\n\n layout = pgo.Layout(**layout_args)\n\n return plotly_output(pgo.Figure(data=[trace], layout=layout))",
"def toss_review_duration(repo_id, begin_date=None, end_date=None):\n if not begin_date:\n begin_date = '1970-1-1 00:00:01'\n if not end_date:\n end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n pr_acceptance_rate_sql = s.sql.text(\"\"\"\n SELECT SUM\n ( EXTRACT ( EPOCH FROM ( pr_merged_at - pr_created_at ) ) ) / COUNT ( * ) AS duration\n FROM\n pull_requests\n JOIN repo ON pull_requests.repo_id = repo.repo_id\n WHERE\n pull_requests.repo_id = :repo_id\n AND pr_merged_at IS NOT NULL\n AND pr_created_at BETWEEN :begin_date\n AND :end_date\n \"\"\")\n \n results = pd.read_sql(pr_acceptance_rate_sql, engine, params={'repo_id': repo_id,\n 'begin_date': begin_date, 'end_date': end_date})\n if results.iloc[0]['duration'] is None:\n results.iloc[0]['duration'] = -1\n else:\n results.iloc[0]['duration'] = results.iloc[0]['duration'] / 60 / 60 / 24\n return results",
"def update_query(self, **updates):\r\n self._url_updates.update(updates)",
"def get_ratio_metrics(\n ratio_metric_specs: Dict[iter8id, RatioMetricSpec], \n counter_metric_specs: Dict[iter8id, CounterMetricSpec], \n counter_metrics: Dict[iter8id, Dict[iter8id, CounterDataPoint]], \n versions: Iterable[Version],\n start_time: datetime) -> Dict[iter8id, Dict[iter8id, RatioDataPoint]]:\n rmd = {version.id: {} for version in versions} # initialize rmd\n\n # populate rmd\n for ratio_metric_spec in ratio_metric_specs.values():\n query_spec = RatioQuerySpec(\n version_label_keys = versions[0].version_labels.keys(),\n numerator_template = counter_metric_specs[ratio_metric_spec.numerator].query_template,\n denominator_template = counter_metric_specs[ratio_metric_spec.denominator].query_template,\n start_time = start_time\n )\n prmq = PrometheusRatioMetricQuery(query_spec, versions)\n current_time = datetime.now(timezone.utc)\n rmd_from_prom = prmq.query_from_spec(current_time)\n\n for version in versions:\n if version.id in rmd_from_prom:\n rmd[version.id][ratio_metric_spec.id] = rmd_from_prom[version.id]\n else:\n if version.id in counter_metrics and counter_metrics[version.id][ratio_metric_spec.denominator].value:\n rmd[version.id][ratio_metric_spec.id] = RatioDataPoint(\n value = 0,\n timestamp = current_time,\n status = StatusEnum.zeroed_ratio\n )\n else:\n rmd[version.id][ratio_metric_spec.id] = RatioDataPoint(\n value = None,\n timestamp = current_time,\n status = StatusEnum.absent_version_in_prom_response\n )\n \"\"\"if a version cannot be found in the list of ratio metrics returned by prometheus, then the value of the ratio is set to zero if denominator is non-zero, and is set to None otherwise.\n \"\"\"\n\n return rmd",
"def update_timer_details(twitchid, id, *, title, delta, maxtime, styling):\n\twith postgres, postgres.cursor() as cur:\n\t\tcur.execute(\"update mustard.timers set title=%s, delta=%s, maxtime=%s, styling=%s where id=%s and twitchid=%s\",\n\t\t\t(title, delta, maxtime, styling, id, twitchid))\n\t\tif not cur.rowcount: raise ValueError(\"Timer not found, or not owned by that user\")",
"def _create_pull_requests_descriptions(self, pull_requests: list) -> list:\n descriptions = []\n for pull_request in pull_requests:\n description = deepcopy(self.blocks['description'])\n reviewers = map(\n lambda name: self._get_user_mention(name),\n pull_request['reviewers'],\n )\n description['text']['text'] = ' '.join(reviewers)\n description['accessory']['url'] = pull_request['url']\n descriptions.append(description)\n return descriptions",
"def set_duration(self, duration_secs):\n raise UnsupportedOperation('Duration not implemented')",
"def update(self, runningrates, rspec):\n # cache share for later comparison\n runningrates['share'] = self.Share\n\n # Query Node Manager for max rate overrides\n self.updateSliceTags(rspec)\n\n usedbytes = runningrates['usedbytes']\n usedi2bytes = runningrates['usedi2bytes']\n\n # Check limits.\n if usedbytes >= (self.bytes + (self.ThreshKByte * 1024)):\n sum = self.bytes + (self.ThreshKByte * 1024)\n maxbyte = self.MaxKByte * 1024\n bytesused = usedbytes - self.bytes\n timeused = int(time.time() - self.time)\n # Calcuate new rate. in bit/s\n new_maxrate = int(((maxbyte - bytesused) * 8)/(period - timeused))\n # Never go under MinRate\n if new_maxrate < (self.MinRate * 1000):\n new_maxrate = self.MinRate * 1000\n # State information. I'm capped.\n self.capped += True\n else:\n # Sanity Check\n new_maxrate = self.MaxRate * 1000\n self.capped += False\n\n if usedi2bytes >= (self.i2bytes + (self.Threshi2KByte * 1024)):\n maxi2byte = self.Maxi2KByte * 1024\n i2bytesused = usedi2bytes - self.i2bytes\n timeused = int(time.time() - self.time)\n # Calcuate New Rate.\n new_maxi2rate = int(((maxi2byte - i2bytesused) * 8)/(period - timeused))\n # Never go under MinRate\n if new_maxi2rate < (self.Mini2Rate * 1000):\n new_maxi2rate = self.Mini2Rate * 1000\n # State information. I'm capped.\n self.capped += True\n else:\n # Sanity\n new_maxi2rate = self.Maxi2Rate * 1000\n self.capped += False\n\n # Check running values against newly calculated values so as not to run tc\n # unnecessarily\n if (runningrates['maxrate'] != new_maxrate) or \\\n (runningrates['minrate'] != self.MinRate * 1000) or \\\n (runningrates['maxexemptrate'] != new_maxi2rate) or \\\n ('minexemptrate' in runningrates and runningrates['minexemptrate'] != self.Mini2Rate * 1000) or \\\n (runningrates['share'] != self.Share):\n # Apply parameters\n bwlimit.set(xid = self.xid, dev = dev_default,\n minrate = self.MinRate * 1000,\n maxrate = new_maxrate,\n minexemptrate = self.Mini2Rate * 1000,\n maxexemptrate = new_maxi2rate,\n share = self.Share)\n\n # Notify slice\n if self.capped == True:\n self.notify(new_maxrate, new_maxi2rate, usedbytes, usedi2bytes)",
"def update_stats(self, responses, no_responses):\n slowest_rtt = 0.0\n slowest_ip = None\n fastest_rtt = 9999999.9\n fastest_ip = None\n rtt_total = 0.0\n\n for ip, rtt in responses.items():\n rtt_total += rtt\n if rtt > slowest_rtt:\n slowest_rtt = rtt\n slowest_ip = ip\n elif rtt < fastest_rtt:\n fastest_rtt = rtt\n fastest_ip = ip\n\n sorted_rtts = sorted(responses.values())\n l = len(sorted_rtts)\n if l == 0:\n median_rtt = 0.0\n elif l % 2 == 1:\n # Odd number: Median is the middle element\n median_rtt = sorted_rtts[int(l / 2)]\n else:\n # Even number (average between two middle elements)\n median_rtt = (sorted_rtts[int(l / 2) - 1] +\n sorted_rtts[int(l / 2)]) / 2.0\n\n now = datetime.datetime.now().isoformat()\n m = {\n \"time\" : now,\n \"num_responses\" : len(responses),\n \"num_no_responses\" : len(no_responses),\n \"slowest\" : {\n \"ip\" : slowest_ip,\n \"rtt\" : slowest_rtt\n },\n \"fastest\" : {\n \"ip\" : fastest_ip,\n \"rtt\" : fastest_rtt\n },\n \"average_rtt\" : rtt_total / len(responses),\n \"median_rtt\" : median_rtt\n }\n\n self.measurements.insert(0, m)\n self.measurements = self.measurements[:self.max_num_measurements]",
"def update(ID, **updates):\n # Filter out any None values.\n review_updates = {k:v for k,v in updates.items() if v}\n\n if len(review_updates) > 0:\n data = json_encode(review_updates)\n gh_request('POST', '/repos/:user/:repo/pulls/:id', uri_vars={'id': ID}, body=data)\n printers.print_review_updated()",
"def update_statuses(interval: int, devices_list: list):\n s.enter(interval, 0, update_statuses, (interval, devices_list))\n\n for device in devices_list:\n update_status(device)\n\n # Zrownoleglone odswiezanie statusu\n # with concurrent.futures.ThreadPoolExecutor(4) as executor:\n # executor.map(update_status, devices_list)",
"def update_completion_time(tests_dataframe):\r\n tests_dataframe['time_test_arrives_lab'] = pd.to_datetime(tests_dataframe['time_test_arrives_lab'])\r\n hours = 5\r\n processing_time = datetime.timedelta(hours = hours)\r\n tests_dataframe['completion_time'] = tests_dataframe['time_test_arrives_lab'] + processing_time\r\n return tests_dataframe",
"def _addTiming(self, key, duration):\n pass",
"def __init__(\n self,\n last_modified: dt.datetime,\n expires: dt.datetime,\n updated_at: dt.datetime,\n units: Dict[str, str],\n intervals: List[Interval],\n ):\n self.last_modified = last_modified\n self.expires = expires\n self.updated_at = updated_at\n self.units = units\n self.intervals = intervals",
"def __getHoldingsUpdate(self, dirPath=None):\n retD = {}\n dirPath = dirPath if dirPath else self.__sandboxPath\n try:\n updateTypeList = [\"added\", \"modified\", \"obsolete\"]\n contentTypeList = [\"entries\", \"mr\", \"cs\", \"sf\", \"nef\", \"nmr-str\"]\n contentNameD = {\n \"entries\": \"coordinates\",\n \"mr\": \"NMR restraints\",\n \"cs\": \"NMR chemical shifts\",\n \"sf\": \"structure factors\",\n \"nef\": \"Combined NMR data (NEF)\",\n \"nmr-str\": \"Combined NMR data (NMR-STAR)\",\n }\n #\n for updateType in updateTypeList:\n uD = {}\n for contentType in contentTypeList:\n fp = os.path.join(dirPath, \"update-lists\", updateType + \"-\" + contentType)\n if not self.__mU.exists(fp):\n continue\n entryIdL = self.__mU.doImport(fp, \"list\")\n #\n for entryId in entryIdL:\n entryId = entryId.strip().upper()\n uD.setdefault(entryId, []).append(contentNameD[contentType])\n for entryId in uD:\n uType = \"removed\" if updateType == \"obsolete\" else updateType\n # retD[entryId] = {\"update_id\": updateId, \"entry_id\": entryId, \"update_type\": uType, \"repository_content_types\": uD[entryId]}\n retD[entryId] = {\"update_type\": uType, \"repository_content_types\": uD[entryId]}\n return retD\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n return retD",
"def _gather_durations(ret, minion_id):\n if isinstance(ret.data, dict) and isinstance(\n ret.data.get(minion_id, None), dict\n ):\n duration = 0\n for _, state_ret in ret.data[minion_id].items():\n try:\n duration += state_ret[\"duration\"]\n except KeyError:\n break\n else:\n return duration\n pytest.skip(\"Something went wrong with the states, skipping.\")",
"def push_rspecs(host, auth, rspecs):\n for rspec in rspecs:\n description = rspec[\"fields\"][\"description\"]\n click.echo(f\"Pushing {rspec['key']} \", err=True)\n data = {\n \"update\": {\n \"description\": [\n {\n \"set\": description\n }\n ],\n }\n }\n result = requests.put(\n f\"{host}/rest/api/latest/issue/{rspec['key']}\",\n json=data,\n auth=auth\n )\n result.raise_for_status()",
"def update_rate_limit_status(self):\n headers = {\"Authorization\": self._get_authorization_header()}\n res = requests.get(f\"{BASE_URL}/rate_limit\", headers=headers)\n if res.status_code != 200:\n log.abort_and_exit(\"GHUB\", f\"Failed to update rate limit status, status code {res.status_code}.\")\n data = res.json()[\"rate\"]\n self._rate_limit_status = {\n \"limit\": int(data[\"limit\"]),\n \"used\": int(data[\"used\"]),\n \"remaining\": int(data[\"remaining\"]),\n \"reset_at_utc\": int(data[\"reset\"]),\n \"reset_in_sec\": int(data[\"reset\"] - round(time.time())),\n \"last_update\": round(time.time())\n }"
] | [
"0.53073287",
"0.5146095",
"0.51271504",
"0.4886934",
"0.48314863",
"0.4814732",
"0.48069423",
"0.47682565",
"0.4729387",
"0.46937558",
"0.46904564",
"0.46614078",
"0.46457514",
"0.4609241",
"0.459688",
"0.45498896",
"0.45457125",
"0.45383173",
"0.4523992",
"0.45236656",
"0.44871238",
"0.44831178",
"0.44726533",
"0.44642082",
"0.4463765",
"0.44429383",
"0.44368672",
"0.44361842",
"0.44249576",
"0.44160968"
] | 0.64028376 | 0 |
Add a protein with a PDB, code and | def add_prot(code, target, xtal_path, xtal, input_dict):
# Split code by : before the get or create operation and use the first part of the name (split[0])
# code is normally the xtal directory in the aligned folder, but this may have been modified to have
# an alternate name added to it - in the form 'directory:alternate_name'.
code_first_part = code.split(":")[0]
proteins = Protein.objects.filter(code__contains=code_first_part, target_id=target)
if proteins.exists():
new_prot = proteins.first()
logger.debug("Pre-existing Protein (%s)", new_prot)
else:
new_prot = Protein.objects.get_or_create(code=code, target_id=target)
logger.debug("New Protein (code='%s' target_id='%s')", code, target)
new_prot = new_prot[0]
new_prot.apo_holo = True
# Check filepaths of all associated files.
filepaths = {
'pdb_info': ('pdbs', get_path_or_none(xtal_path, xtal, input_dict, "APO")),
'bound_info': ('bound', get_path_or_none(xtal_path, xtal, input_dict, "BOUND")),
'cif_info': ('cifs', get_path_or_none(xtal_path, xtal, input_dict, "CIF")),
'mtz_info': ('mtzs', get_path_or_none(xtal_path, xtal, input_dict, "MTZ")),
'map_info': ('maps', get_path_or_none(xtal_path, xtal, input_dict, "PMAP")),
'sigmaa_info': ('maps', get_path_or_none(xtal_path, xtal, input_dict, "SIGMAA")),
'diff_info': ('maps', get_path_or_none(xtal_path, xtal, input_dict, "DIFF")),
'event_info': ('maps', get_path_or_none(xtal_path, xtal, input_dict, "EVENT")),
'trans_matrix_info': ('trans', get_path_or_none(xtal_path, xtal, input_dict, "TRANS")),
'pdb_header_info': ('pdbs', get_path_or_none(xtal_path, xtal, input_dict, "HEADER")),
'apo_desolve_info': ('pdbs', get_path_or_none(xtal_path, xtal, input_dict, "DESOLV")),
}
to_unpack = {k: v for k, v in filepaths.items() if v[1] is not None}
for key in to_unpack.keys():
save_path = os.path.join(to_unpack[key][0], to_unpack[key][1].split('/')[-1])
path = default_storage.save(save_path, open(to_unpack[key][1], 'rb'))
setattr(new_prot, key, path)
new_prot.save()
return new_prot | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_protein( self, protein ):\n v = get_vertex( protein )\n if v: return v # already added",
"def dna_to_protein(seq):\n\n # Verify a convertible sequence\n if len(seq) % 3 != 0:\n raise RuntimeError('Total number of bases must be a multiple of 3')\n\n # Iterate through adding the proteins\n protein = ''\n for i in range(0, len(seq), 3):\n protein += bioinfo_dicts.codons[seq[i:i+3]]\n return protein",
"def add_program(prog_name, prog_desc, cmd_line_prefix) :\n\n cur = conn.cursor() # database table cursor\n\n # insert the new program into programs table\n cur.execute(\"INSERT INTO programs (program_name, description, cmd_line_prefix) VALUES (?, ?, ?)\",\n (prog_name, prog_desc, cmd_line_prefix) )\n\n # finalize the database data addition\n conn.commit()",
"def ProteinRead(pdb_file, Include_dAA = True, IncludeWATER = False):\n # structure from input file or fetched if not present\n if(pdb_file[-4:] == '.pdb' or pdb_file[-3:] == '.gz'):\n ppdb = PandasPdb().read_pdb(pdb_file)\n else:\n ppdb = PandasPdb().fetch_pdb(pdb_file)\n \n # lists for standard and d-AA used to save structure to dataset \n standardAA = ['ALA','ARG','ASN','ASP','CYS','GLN','GLU','GLY','HIS','ILE','LEU','LYS','MET','PHE','PRO','SER','THR','TRP','TYR','VAL']\n d_AA = ['DAL','DAR','DSG','DAS','DCY','DGN','DGL','GLY','DHI','DIL','DLE','DLY','MED','DPN','DPR','DSN','DTH','DTR','DTY','DVA']#scan takes into account only standard amino acids\n\n for aa in standardAA: #ATOM entries, excluding water molecules \n if(aa==standardAA[0]):\n ppdb_ATOM = ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == aa] \n else:\n ppdb_ATOM = pd.concat([ppdb_ATOM, ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == aa]], ignore_index=True) \n\n if(Include_dAA):\n for i in range(0,len(d_AA)): \n if(d_AA[i]!='GLY'):\n ppdb_d_AA = pd.concat([ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == d_AA[i]],ppdb.df['HETATM'][ppdb.df['HETATM']['residue_name'] == d_AA[i]]], ignore_index=True)\n pd.options.mode.chained_assignment = None \n ppdb_d_AA['residue_name'].iloc[:] = standardAA[i] #dAA considered as standard one for scan \n ppdb_ATOM = pd.concat([ppdb_ATOM, ppdb_d_AA], ignore_index=True) \n\n ppdb_PROTEIN = ppdb_ATOM #protein atoms saved here \n ppdb_WATER = pd.concat([ppdb.df['HETATM'][ppdb.df['HETATM']['residue_name'] == 'HOH'],ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == 'HOH'],ppdb.df['HETATM'][ppdb.df['HETATM']['residue_name'] == 'WAT'],ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == 'WAT']], ignore_index=True) #oxygen atoms of water molecules\n #can be both HETATM (standard pdb file) or ATOM (vmd output)\n if(len(ppdb_WATER)>0 and IncludeWATER):\n pd.options.mode.chained_assignment = None \n ppdb_WATER['residue_name'].iloc[:] = 'HOH'\n ppdb_WATER['chain_id'].iloc[:] = 'water'\n ppdb_ATOM = pd.concat([ppdb_ATOM, ppdb_WATER], ignore_index=True)\n\n Chains = []\n for i in range(0,len(ppdb_ATOM)):\n if(ppdb_ATOM['chain_id'].iloc[i] in Chains):\n continue\n else:\n Chains.append(ppdb_ATOM['chain_id'].iloc[i]) \n return ppdb_ATOM, Chains",
"def AddPseudoCode(self, pcode):\n self.buffers[self.buffergrade].append(pcode)",
"def load(self, pdbfile, pdbcode=None, includeH=True):\n\n self.pdbfile = pdbfile\n if pdbcode is None:\n self.pdbcode = pdbfile\n else:\n self.pdbcode = pdbcode\n\n # Does not handle file-not-found exceptions: this is done up-front\n f = open (pdbfile, \"r\")\n lines = f.readlines ()\n f.close ()\n\n self.atomcoords = []\n self.atmnames = []\n self.atmsymbols = []\n self.resnames = []\n self.resnum = []\n self.atmkeys = []\n self._residueInfo = dict ()\n self._residuenames = dict ()\n self._atomInfo = []\n count = -1\n reH = re.compile ('H')\n for line in lines:\n line = line.strip ()\n\n # Filter for ATOM\n if not ((line[0:4] == 'ATOM')): continue\n coords = [float (line[30:38]), float (line[38:46]), float (line[46:54])]\n name = line[12:16]\n symbol = line[13:14]\n resname = line[17:20]\n resID = int (line[22:26])\n\n # Account for PDB files in which the element symbol is shifted from column 14\n # VMD writes such PDB files, ptraj does not\n # Fully compliant PDB files should also have the element in columns 77-78\n # Option \"nowrap\" to ptraj's \"trajout\" command may well control this behaviour\n if ((symbol != 'H') and reH.match (name)): symbol = 'H'\n if not includeH and (symbol == 'H'): continue\n count = count + 1\n\n self.atomcoords.append (coords)\n self.atmnames.append (name)\n self.atmsymbols.append (symbol)\n self.atmkeys.append (0)\n\n self.resnames.append (resname)\n self.resnum.append (resID)\n\n self._atomInfo.append (dict (id=count, coords=coords, name=name, symbol=symbol, residue=resID, residue_name=resname,key=0))\n if not self._residueInfo.has_key (resID):\n self._residueInfo[resID] = dict (id=resID, atomID=[], name=resname)\n self._residuenames[resID] = dict (id=resID, name=resname)\n self._residueInfo[resID]['atomID'].append (count)\n\n self.nAtoms = len (self.atmnames)\n self.nCoords = 3 * self.nAtoms\n self.framebytes = (self.nCoords) * 8 + (\n self.nCoords / 10 + 1) # Numeric fields + EOL characters (in crd format)\n if self.nCoords % 10 == 0: self.framebytes -= 1 # Special case if ncoords exactly divisible by 10\n self.moltype = None\n self.initialized = True",
"def make_aa_pdb(PA_seq, name):\n generic_to_specific_PA(PA_seq.upper(), name)\n gen_PA(name)\n os.system('vmd -dispdev text -e gen_%s.pgn'%name)",
"def main():\n\n # Define the names of required input files, and other main configuration variables\n protein_w_underscores = os.getcwd().split('/')[-1]\n protein = protein_w_underscores.replace('_', ' ')\n pdbfile = 'pdb_structure.pdb' # the name of the PDB file\n pdbchain = None # chain in pdbfile -- there is only one chain, so not relevant here\n seqfile = 'protseq.txt' # file containing the protein sequence\n ddgdatafile = 'ddG_data.txt' # file containing the literature-culled ddG values\n ddgdatafile_warning = False # warn if ddgdatafile has conflicting ddG values for a mutation\n alignment_file = \"uniref_alignment-gaps_lt_0.1-identities_gt_0.5.fasta\" # file with aligned sequences\n phylip_path = '/Users/bloom/phylip-3.67/exe/' # path to phylip phylogeny program\n\n # Define the names of files that will be created by the script if they do not already exist\n cupsatfile = 'CUPSAT_ddGs.txt' # contains the ddG values from CUPSAT\n treefile = \"tree.newick\" # phylogenetic tree created by phylip\n phylipsequencefile = \"phylip_sequence_file\" # phylip input sequence file\n phylipdistancefile = \"phylip_distance_file\" # phylip distance matrix\n pipsddgsfile = \"pips_ddgs.txt\" # pips ddgs file\n regularizingpriorpipsddgsfile = 'pips_ddgs_with_regularizing_priors.txt' # pips ddgs file calculated with regularizing priors\n hydrophobicitypriorpipsddgsfile = 'pips_ddgs_with_hydrophobicity_priors.txt' # pips ddgs file calculated with hydrophobicity priors\n\n # Begin execution of the program\n seq = open(seqfile).read().strip() # read in protein sequence\n\n # Get the ddG values from CUPSAT and store in the dictionary cupsat_ddgs. Note that\n # in this and all subsequent ddG dictionaries, the first residue is numbered as 0.\n print \"\\nObtaining CUPSAT ddG values...\"\n sys.stdout.flush()\n if os.path.isfile(cupsatfile): # ddG values already obtained, just read from file\n (datetime, cupsat_ddgs) = pips.ddg_inference.ReadDDGs(cupsatfile)\n print \"Read the stored CUPSAT values from %s from the file %s.\" % (datetime, cupsatfile)\n else: # we need to obtain the ddG values from the CUPSAT webserver\n datetime = time.asctime()\n print \"Beginning to calculate and download CUPSAT ddGs at %s...\" % datetime\n sys.stdout.flush()\n cupsat_ddgs = pips.cupsat.RunCUPSAT(pdbfile, seq, pdbchain)\n pips.ddg_inference.WriteDDGs(cupsat_ddgs, cupsatfile, datetime)\n print \"Completed download of CUPSAT ddG values, stored in the file %s.\" % cupsatfile\n rescaled_cupsat_ddgs = pips.ddg_inference.RescaleDDGs(cupsat_ddgs, 10.0, '10TH_TO_90TH', recenter=5.0, min_max=(-3.0, 13.0)) \n\n # Read the literature-culled ddG data from ddgdatafile and store in the dictionary ddg_data\n print \"\\nReading the literature-culled ddG data from %s...\" % ddgdatafile\n sys.stdout.flush()\n ddgmatch = re.compile(\"^(?P<wt>[A-Y])(?P<r>\\d+)(?P<mut>[A-Y])\\s+(?P<ddg>\\-{0,1}\\d+\\.\\d+)$\")\n ddg_data = {}\n for r in range(len(seq)):\n rdict = {}\n wt = seq[r]\n for aa in pips.ddg_inference.AminoAcids():\n if aa != wt:\n rdict[aa] = []\n ddg_data[r] = (wt, rdict)\n for line in open(ddgdatafile).readlines(): # loop over all lines in ddgdatafile\n if line[0] == '#':\n continue # line is a comment\n m = ddgmatch.search(line.strip()) # match the ddG value\n if not m:\n raise ValueError, \"Cannot read ddG value of %s\" % line\n (wt, r, mut, ddg) = (m.group('wt'), int(m.group('r')), m.group('mut'), float(m.group('ddg')))\n r -= 1 # we decrement r because we are calling the first residue 0\n if seq[r] != wt:\n raise ValueError, \"Wildtype residue does not match protein sequence in %s\" % line\n ddg_data[r][1][mut].append(ddg) \n nddgs = 0\n ddgslist = []\n for (r, (wt, rddgs)) in ddg_data.iteritems():\n for mut in rddgs.iterkeys():\n if not rddgs[mut]:\n rddgs[mut] = None # no ddG value\n else:\n nddgs += 1\n ddg0 = rddgs[mut][0]\n allthesame = True\n for ddgi in rddgs[mut][1 : ]: # see if all ddG values are the same for mutation\n if ddgi != ddg0:\n allthesame = False\n if allthesame: # all of the ddG values are the same, take this value\n rddgs[mut] = ddg0\n ddgslist.append(ddg0)\n else: # ddG values differ, print warning and take the average value\n ddg = pips.stats.Mean(rddgs[mut])\n if ddgdatafile_warning:\n print \"WARNING: Mutation %s%d%s has multiple ddG values of\" % (wt, r + 1, mut),\n for ddgi in rddgs[mut]:\n print \"%.2f\" % ddgi,\n print \"--- taking the average value of %.2f.\" % ddg\n sys.stdout.flush()\n rddgs[mut] = ddg\n ddgslist.append(ddg)\n print \"Read a total of %d different ddG values from %s. The mean value is %.2f, the maximum value is %.2f, and the minimum value is %.2f.\" % (nddgs, ddgdatafile, pips.stats.Mean(ddgslist), max(ddgslist), min(ddgslist))\n\n # Read the aligned sequences (into sequences), give short names for phylip\n sequences = pips.fasta.Read(alignment_file)\n nsequences = len(sequences)\n sequences = [(\"SEQ%d\" % (i + 1), sequences[i][1]) for i in range(nsequences)] # rename \n pips.fasta.Write(sequences, 'renamed_alignment.fasta')\n sequences = pips.align.StripGapsToFirstSequence(sequences) \n print \"\\nThere are %d sequences in the alignment.\" % nsequences\n\n # Construct the phylogenetic tree\n if os.path.isfile(treefile):\n print \"A phylogenetic tree has already been constructed for these sequences, and is being read from %s.\" % treefile\n newick_tree = open(treefile).read()\n else:\n print \"Constructing a phylogenetic tree for these sequences...\"\n sys.stdout.flush()\n pips.phylip.WritePhylipSequenceFile(sequences, phylipsequencefile)\n open(phylipdistancefile, 'w').write(pips.phylip.Protdist(phylipsequencefile, phylip_path))\n newick_tree = pips.phylip.DistanceTree(phylipdistancefile, phylip_path, molecular_clock=True, neighbor_joining=True)\n print \"Finished constructing the phylogenetic tree, writing it to %s.\" % treefile\n sys.stdout.flush()\n open(treefile, 'w').write(newick_tree)\n\n # Perform the pips analysis\n sequences = pips.fasta.UnknownsToGaps(sequences) # replace unknown amino acids with gaps\n random.seed(1) # seed the random number generator to make output predictable\n (datetime, pips_ddgs) = pips.ddg_inference.ReadDDGs(pipsddgsfile)\n\n # Read things in with the new pips\n tree = pips.tree.Tree(newick_tree, tipnames_sequences=sequences) # phylogenetic tree data\n ddgset = pips.ddg_inference.DDGSet(seq, tree, ('TRANSITION_TRANSVERSION_RATIO', 0.5), ('SPECIFIED', pips_ddgs, 0, 0), ('BETA', 3, ('KYTE_DOOLITTLE_HYDROPHOBICITY', 1, 0)), 5.0, underflow=5, runtestcode=False)\n ddgset.MaximizePosterior(nrandomstarts=1, printprogress=True)\n new_pips_ddgs = ddgset.DDGDict()\n pips.ddg_inference.WriteDDGs(new_pips_ddgs, 'new_pips_ddgs.txt', time.asctime())\n\n # Get the consensus ddG\n consensus_ddgs = pips.ddg_inference.ConsensusDDGs(seq, sequences, pseudocounts=1)\n\n sys.exit()\n\n # Perform analysis of correlations, and make pylab plots\n print \"\\nAnalysis of correlations to experimental ddG values...\"\n ddgtypes = ['actual', 'CUPSAT', 'consensus', '\\\\begin{tabular}{c} PIPS with \\\\\\\\ informative prior \\end{tabular}', '\\\\begin{tabular}{c} PIPS with \\\\\\\\ regularizing prior \\end{tabular}', '\\\\begin{tabular}{c} PIPS with \\\\\\\\ hydrophobicity prior \\end{tabular}']\n zippedlists = pips.ddg_inference.ZippedDDGLists(ddg_data, cupsat_ddgs, consensus_ddgs, pips_ddgs, pips_ddgs_regularizing, pips_ddgs_hydrophobicity)\n mutations = zippedlists[0]\n nmutations = len(mutations)\n ddgs = dict([(ddgtypes[i], zippedlists[i + 1]) for i in range(len(ddgtypes))])\n pylab.rc('text', usetex=True)\n nplots = len(ddgtypes) - 1 # number of different plots\n invnplots = 1.0 / nplots\n (xscale, yscale) = (2.8, 2.5) # each plot covers a rectangle of this size, in inches\n bottom = 1.06\n (tmargin, bmargin, lmargin, rmargin) = (0.03, 0, 0.22, 0.03)\n fig = pylab.figure(figsize=(xscale * (1 + lmargin + rmargin), 3 * yscale * (1 + tmargin + bmargin) * bottom))\n figaxes = pylab.axes([0, 0, 1, 1])\n figaxes.axison = False\n iplot = 0\n maxticks = 5\n (xmin, xmax) = (int(round(min(ddgs['actual'])) - 1), int(round(max(ddgs['actual'])) + 1))\n xtick = 1\n while (xmax - xmin) / float(xtick) > maxticks:\n xtick += 1\n nxticks = int(math.ceil((xmax - xmin) / float(xtick)))\n xticks = [x for x in range(xmin, xmin + nxticks * xtick + 1, xtick)]\n xticklocator = matplotlib.ticker.FixedLocator(xticks)\n xtickformatter = matplotlib.ticker.FixedFormatter([\"%d\" % x for x in xticks])\n for ddgtype in ddgtypes[1 : ]:\n if ddgtype == ddgtypes[-1]:\n xlabel = 'experimental $\\Delta\\Delta G$ values'\n else:\n xlabel = ''\n (r, p, npoints) = pips.stats.PearsonCorrelation(ddgs['actual'], ddgs[ddgtype])\n axes = pylab.axes([lmargin, 1.0 - invnplots * (1 + iplot + bmargin) / bottom, 1.0 - rmargin - lmargin, invnplots * (1.0 - tmargin - bmargin) / bottom], xlabel=xlabel, ylabel=ddgtype)\n nolabels = matplotlib.ticker.NullFormatter()\n (ymin, ymax) = (int(round(min(ddgs[ddgtype])) - 1), int(round(max(ddgs[ddgtype])) + 1))\n ytick = 1\n while (ymax - ymin) / float(ytick) > maxticks:\n ytick += 1\n nyticks = int(math.ceil((ymax - ymin) / float(ytick)))\n yticks = [y for y in range(ymin, ymin + nyticks * ytick + 1, ytick)]\n yticklocator = matplotlib.ticker.FixedLocator(yticks)\n ytickformatter = matplotlib.ticker.FixedFormatter([\"%d\" % y for y in yticks])\n axes.xaxis.set_major_locator(xticklocator)\n axes.yaxis.set_major_locator(yticklocator)\n axes.yaxis.set_major_formatter(ytickformatter)\n if ddgtype != ddgtypes[-1]:\n axes.xaxis.set_major_formatter(nolabels)\n else:\n axes.xaxis.set_major_formatter(xtickformatter)\n iplot += 1\n pylab.text(0.64, 0.14, '$R^2 = %.2f$' % r**2, transform=axes.transAxes, ha='left', va='top', size=14)\n pylab.scatter(ddgs['actual'], ddgs[ddgtype], figure=fig, axes=axes)\n pylab.savefig(\"%s_vertical_plot.eps\" % protein_w_underscores)\n\n pylab.show()",
"def ParserPDB(a):\n\tcontenu=list()\n\tmon_fichier=open(a,\"r\")\n\tfor line in mon_fichier.readlines():\n\t\tcontenu.append(line.strip()) #met le contenu du fichier pdb dans la liste \"contenu\"\n\n\tacidea=dict()\n\t\n\n\n\tfor chain in range(len(contenu)): #On parcourt cette liste contenant tout le fichier pdb\n\t\tif contenu[chain][0:5]==\"MODEL\":\n\t\t\tnewProt = contenu[chain][7:14]\n\t\t\t\n\t\t\tif newProt not in acidea.keys():\n\t\t\t\tacidea[newProt]={}\n\t\t\t\t\n\t\tif contenu[chain][0:4]==\"ATOM\": #Si la ligne commence par \"ATOM\" \n\t\t\tChaine = contenu[chain][21]\n\t\t\t\n\t\t\tif Chaine not in acidea[newProt].keys(): #Si la chaine ( A, B ... ) existe pas deja \n\t\t\t\tacidea[newProt][Chaine] = {} #creation du dictionnaire qui a pour nom les caractères a la ligne 21 ( Chaine)\n\t\t\t\n\t\t\tPosi = contenu[chain][24:26]\n\t\t\tif Posi not in acidea[newProt][Chaine].keys(): #Si la position pour une chaine n'existe pas deja (ex : -3 dans la chaine A)\n\t\t\t\tacidea[newProt][Chaine][Posi]={} # creation du dictionnaire poisition dans le dictionnaire chaine \n\t\t\t\n\t\t\tresidu = contenu[chain][12:16]\n\t\t\tif residu not in acidea[newProt][Chaine][Posi].keys(): #si le residu n'existe pas deja pour une chaine et une position donnée (ex : un CO de la chaine A a la position -3)\n\t\t\t\tacidea[newProt][Chaine][Posi][residu]= {} #Creation du dictionnaire nom de l'atome, contenu dans le dictionnaire position lui meme contenu dans le dictionnaire chaine\t\n\t\t\t\n\t\t\t#repartition de l'information dans le dictionnaire.\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"x\"] = float(contenu[chain][32:38]) #Mise des information de X dans le dictionnaire atome\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"y\"] = float(contenu[chain][40:46]) #Mise des information de Y dans le dictionnaire atome\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"z\"] = float(contenu[chain][48:54]) #Meme chose pour Z\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"Id\"] = contenu[chain][9:11] #Meme chose pour Identifiant\n\n\treturn( acidea)",
"def link_protein(self, protein):\n if self.protein is None:\n self.protein = protein\n protein.link_gene(self)",
"def try_protein(self):\n location = [0, 0]\n fold = 0\n \n # loop over aminoacids of the data and add info to aminoacids object\n for i, char in enumerate(self.data):\n aminoacid_number = i\n aminoacid_type = char \n \n # make aminoacid object and add to aminoacids list\n aminoacid = Aminoacid(aminoacid_type, aminoacid_number, location, fold)\n self.occupied.append(aminoacid.location)\n self.aminoacids.append(aminoacid)\n\n # make a line orientation as default\n location = [0, len(self.data) + i]\n return",
"def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-p\", dest=\"pdbfile\", help=\"pdbfile\")\n\tparser.add_option(\"-P\", dest=\"pdblist\", help=\"pdblist\")\n\tparser.add_option(\"-t\", dest=\"transpose\", help=\"transpose\", action=\"store_true\")\n\tparser.add_option(\"-n\", dest=\"number\", help=\"number\", action=\"store_true\")\n\tparser.add_option(\"-r\", dest=\"range\", help=\"range\")\n\tparser.add_option(\"-s\", dest=\"selection\", help=\"selection\")\n\tparser.set_description(main.__doc__)\n\t(options,args) = parser.parse_args()\n\n\tpdbfiles = []\n\tif options.pdblist:\n\t\tpdbfiles = files_from_list(options.pdblist)\n\telif options.pdbfile:\n\t\tpdbfiles.append(options.pdbfile)\t\n\telse:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\t\n\tif options.selection:\n\t\tsele = Selection()\n\t\tsele.makeSelection(options.selection)\n\n\tseq_min = 1\n\tseq_max = 1\n\tif options.range:\n\t\t(min,max) = string.split(arg, \"-\")\n\t\tseq_min = int(min)\n\t\tseq_max = int(max)\n\n\tprotein = Molecule()\n\tSeq = \"\"\n\tfor pdb in pdbfiles:\n\t\tprotein.readPDB(pdb)\n\t\tif options.selection:\n\t\t\tnewmol = sele.apply_selection(protein)\n\t\t\tSeq = newmol.sequence()\n\t\telse:\n\t\t\tSeq = protein.sequence()\n\n\t\tif options.range:\n\t\t\tSeq = Seq[seq_min:seq_max]\n\n\t\tif options.transpose:\n\t\t\tfor i in range(len(Seq)):\n\t\t\t\tprint Seq[i]\n\t\telse:\n\t\t\tprint Seq\n\n\t\tprotein.clear()",
"def placePeptide(sequence, phiPsis, model=\"scratch\", position=None,\n\t\t\t\t\t\trotlib=None, chainID='A'):\n\n\tif not sequence:\n\t\traise ValueError(\"No sequence supplied\")\n\tsequence = sequence.upper()\n\tif not sequence.isupper():\n\t\traise ValueError(\"Sequence contains non-alphabetic characters\")\n\tfrom chimera.resCode import protein1to3\n\tfor c in sequence:\n\t\tif c not in protein1to3:\n\t\t\traise ValueError(\"Unrecognized protein 1-letter code:\"\n\t\t\t\t\t\t\t\t\" %s\" % c)\n\tif len(sequence) != len(phiPsis):\n\t\traise ValueError(\"Number of phi/psis not equal to\"\n\t\t\t\t\t\t\t\" sequence length\")\n\tif isinstance(model, basestring):\n\t\tmodel = _newModel(model)\n\tneedFocus = False\n\tif position is None:\n\t\tif len(chimera.openModels.list()) == 1:\n\t\t\tneedFocus = True\n\t\txf = model.openState.xform\n\t\tposition = xf.inverse().apply(\n\t\t\t\tPoint(*chimera.viewer.camera.center))\n\tprev = [None] * 3\n\tpos = 1\n\tfrom Midas.addAA import DIST_N_C, DIST_CA_N, DIST_C_CA, DIST_C_O\n\tfrom chimera.molEdit import findPt, addAtom, addDihedralAtom\n\tserialNumber = None\n\tresidues = []\n\tfor c, phiPsi in zip(sequence, phiPsis):\n\t\tphi, psi = phiPsi\n\t\twhile model.findResidue(chimera.MolResId(chainID, pos)):\n\t\t\tpos += 1\n\t\tr = model.newResidue(protein1to3[c], chainID, pos, ' ')\n\t\tresidues.append(r)\n\t\tfor backbone, dist, angle, dihed in (\n\t\t\t\t('N', DIST_N_C, 116.6, psi),\n\t\t\t\t('CA', DIST_CA_N, 121.9, 180.0),\n\t\t\t\t('C', DIST_C_CA, 110.1, phi)):\n\t\t\tif prev[0] == None:\n\t\t\t\tpt = Point(0.0, 0.0, 0.0)\n\t\t\telif prev[1] == None:\n\t\t\t\tpt = Point(dist, 0.0, 0.0)\n\t\t\telif prev[2] == None:\n\t\t\t\tpt = findPt(prev[0].coord(), prev[1].coord(),\n\t\t\t\t\tPoint(0.0, 1.0, 0.0), dist, angle, 0.0)\n\t\t\telse:\n\t\t\t\tpt = findPt(prev[0].coord(), prev[1].coord(),\n\t\t\t\t\tprev[2].coord(), dist, angle, dihed)\n\t\t\ta = addAtom(backbone, Element(backbone[0]), r, pt,\n\t\t\t\tserialNumber=serialNumber, bondedTo=prev[0])\n\t\t\tserialNumber = a.serialNumber + 1\n\t\t\tprev = [a] + prev[:2]\n\t\to = addDihedralAtom(\"O\", Element(\"O\"), prev[0], prev[1],\n\t\t\tprev[2], DIST_C_O, 120.4, 180.0 + psi, bonded=True)\n\t# C terminus O/OXT at different angle than mainchain O\n\tmodel.deleteAtom(o)\n\taddDihedralAtom(\"O\", Element(\"O\"), prev[0], prev[1],\n\t\t\tprev[2], DIST_C_O, 117.0, 180.0 + psi, bonded=True)\n\taddDihedralAtom(\"OXT\", Element(\"O\"), prev[0], prev[1], prev[2],\n\t\t\t\t\tDIST_C_O, 117.0, psi, bonded=True)\n\tfrom Rotamers import useBestRotamers\n\t# have to process one by one, otherwise side-chain clashes will occur\n\tkw = {}\n\tif rotlib:\n\t\tkw['lib'] = rotlib\n\tfor r in residues:\n\t\tuseBestRotamers(\"same\", [r], criteria=\"cp\", log=False, **kw)\n\t\t\t\t\n\t# find peptide center\n\tcoords = []\n\tfor r in residues:\n\t\tcoords.extend([a.coord() for a in r.atoms])\n\tcenter = Point(coords)\n\tcorrection = position - center\n\tfor r in residues:\n\t\tfor a in r.atoms:\n\t\t\ta.setCoord(a.coord() + correction)\n\tfrom Midas import ksdssp\n\tksdssp([model])\n\tif needFocus:\n\t\tchimera.runCommand(\"focus\")\n\treturn residues",
"def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-p\", dest=\"pdbfile\", help=\"pdbfile\")\n\tparser.add_option(\"-P\", dest=\"pdblist\", help=\"pdblist\")\n\tparser.add_option(\"-g\", dest=\"grid\", help=\"grid\")\n\tparser.add_option(\"-o\", dest=\"outfile\", help=\"outfile\")\n\tparser.add_option(\"-s\", dest=\"selection\", help=\"selection\")\n\tparser.set_description(main.__doc__)\n\t(options, args) = parser.parse_args()\n\n\tpdbfiles = []\n\tif options.pdblist:\n\t\ttry:\n\t\t\tLIST = open(options.pdblist, 'r')\n\t\texcept:\n\t\t\tprint \"unable to open pdblist\"\n\t\t\tsys.exit()\n\n\t\tfor line in LIST.readlines():\n\t\t\tline = string.rstrip(line)\n\t\t\tpdbfiles.append(line)\n\telif options.pdbfile:\n\t\tpdbfiles.append(options.pdbfile)\n\telse:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\tif not options.grid or not options.outfile:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\ttry:\n\t\tOUTPUT = open(options.outfile, 'w')\n\texcept:\n\t\tprint \"unable to create outfile\"\n\t\tsys.exit()\n\n\n\tif options.selection:\n\t\tselection = Selection()\n\t\tselection.makeSelection(options.selection)\n\n\tprotein = Molecule()\t\t\n\n\tmygrid = grid()\t\t\n\tmygrid.read(options.grid)\n\n\tfor pdbfile in pdbfiles:\n\t\tprotein.readPDB(pdbfile)\n\t\tif options.selection:\n\t\t\tnewmol = selection.apply_selection(protein)\n\t\telse:\n\t\t\tnewmol = protein.clone()\n\n\n\t\tatomlist = atomsInGrid(mygrid, newmol)\n\t\tOUTPUT.write(pdbfile + \": \" + str(len(atomlist)) + \"\\n\")\n\t\tprint pdbfile,len(atomlist)\n\n\t\tprotein.clear()\n\t\tnewmol.clear()\n\n\tOUTPUT.close()",
"def dna_to_protein(dna: str, start: int=0):\n return rna_to_protein(dna_to_rna(dna, start), start=start)",
"def parse_pdb(self, line):\n if line is not None:\n self.original_text.append(line.rstrip(\"\\r\\n\"))",
"def write_protein_fasta(args, clusters=None, fasta_dir=None):\n row, concat_fasta_path, frags = args\n dotpath = row[\"path\"]\n phylogeny_dict = {\"prot.idx\": row.name, \"path\": dotpath}\n for phy_prop in [name for name in row.index if name.startswith(\"phy.\")]:\n phylogeny_dict[phy_prop] = row[phy_prop]\n inpath = dotpath_to_path(dotpath)\n prot_info = read_tsv_or_parquet(inpath / PROTEINS_FILE)\n prot_info[\"frag.idx\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.idx\"]\n )\n prot_info[\"frag.is_plas\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.is_plas\"]\n )\n prot_info[\"frag.is_scaf\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.is_scaf\"]\n )\n prot_info[\"frag.is_chr\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.is_chr\"]\n )\n prot_info[\"frag.id\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.id\"]\n )\n # Write out updated protein info\n write_tsv_or_parquet(prot_info, inpath / HOMOLOGY_FILE)\n # include phylogeny info in per-sequence info\n for prop in phylogeny_dict:\n prot_info[prop] = phylogeny_dict[prop]\n # write concatenated sequence info\n if clusters is None:\n fasta_path = concat_fasta_path\n info_to_fasta(None, fasta_path, append=True, infoobj=prot_info)\n else:\n for cluster_id, subframe in clusters.groupby(by=[\"cluster_id\"]):\n cluster_info = prot_info[prot_info.index.isin(subframe[\"members\"])]\n fasta_path = fasta_dir / f\"{cluster_id}.fa\"\n info_to_fasta(None, fasta_path, append=True, infoobj=cluster_info)",
"def create_protein(self):\n while self.try_protein() == False:\n self.clear_protein()",
"def make_pdb(self, pdb_path, out_path, chain_letters, overwrite=False):\r\n chain_letters = [chain.upper() for chain in chain_letters]\r\n pdb_fn = os.path.split(pdb_path)[1]\r\n \r\n print \"OUT PATH:\",out_path\r\n\r\n # Skip PDB generation if the file already exists\r\n plural = \"s\" if (len(chain_letters) > 1) else \"\" # for printing\r\n if (not overwrite) and (os.path.isfile(out_path)):\r\n print(\"Chain%s %s of '%s' already extracted to '%s'.\" %\r\n (plural, \", \".join(chain_letters), pdb_fn, out_path))\r\n return out_path\r\n print(\"Extracting chain%s %s from %s...\" % (plural, \", \".join(chain_letters), pdb_fn))\r\n\r\n # Get structure, write new file with only given chains\r\n struct = self.parser.get_structure('protein', pdb_path)\r\n self.writer.set_structure(struct)\r\n self.writer.save(out_path, select=SelectChains(chain_letters))\r\n\r\n return out_path",
"def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-p\", dest=\"pdbfile\", help=\"pdbfile\")\n\tparser.add_option(\"-P\", dest=\"pdblist\", help=\"pdblist\")\n\tparser.set_description(main.__doc__)\n\n\n\t(options, args) = parser.parse_args()\n\n\n\tpdbfiles = []\n\tif options.pdblist:\n\t\tpdbfiles = files_from_list(options.pdblist)\n\telif options.pdbfile:\n\t\tpdbfiles.append(options.pdbfile)\n\telse:\n\t\tparser.print_help()\n\t\tsys.exit()\n\t\t\n\tprotein = Enzyme()\n\tfor file in pdbfiles:\n\t\tprotein.readPDB(file)\n\t\tlig = protein.ligand\n\t\tif lig == None:\n\t\t\tprint \"no ligand found for file:\",file\n\t\t\tsys.exit()\n\n\t\ttot = lig.Erep + lig.Eatr + lig.EhbSC\n\t\tprint file,lig.Erep,lig.Eatr,lig.EhbSC,tot\n\t\tprotein.clear()",
"def addProvenance(self, provenance_on=True):\n self.kwargs['additionalInfo'] = provenance_on",
"def add_code(self, code):\n self.code += code",
"def generate_rbpdb_protein_to_experiment_id():\n rbpdb_protein_experiment_file_path = \\\n \"./website/data/RBPDB_v1.3.1_protExp_human_2012-11-21.tdt\"\n protein_id_to_experimental_ids_dict = {}\n with open(rbpdb_protein_experiment_file_path) as handle:\n line = handle.readline()\n while line:\n columns = line.split(\"\\t\")\n # Here we expect the columns to be:\n # protein_id, experiment_id, homolog, unique_id\n protein_id = columns[0]\n experimental_id = columns[1]\n protein_id_to_experimental_ids_dict[protein_id] = (\n protein_id_to_experimental_ids_dict.get(protein_id, [])\n + [experimental_id]\n )\n line = handle.readline()\n return protein_id_to_experimental_ids_dict",
"def pdbin(self, pdbin):\n self._pdbin = pdbin",
"def proteinTranslation(seq, geneticCode = STANDARD_GENETIC_CODE):\n\n seq = seq.replace('T','U') # Make sure we have RNA sequence\n proteinSeq = []\n \n i = 0\n while i+2 < len(seq):\n \n codon = seq[i:i+3]\n aminoAcid = geneticCode[codon]\n \n if aminoAcid is None: # Found stop codon\n break\n\n proteinSeq.append(aminoAcid)\n i += 3\n\n return proteinSeq",
"def _insert_billcode(self):\n # Insert\n if db_billcode.idx_billcode_exists(1) is False:\n record = Billcode(\n code=general.encode(self.reserved),\n name=general.encode(self.reserved))\n database = db.Database()\n database.add(record, 1104)",
"def main():\r\n\r\n # contents = ['ATGGCCATGGCCCCCAGAACTGAGATCAATAGTACCCGTATTAACGGGTGA', 'MA'] # sample input\r\n contents = []\r\n for line in sys.stdin:\r\n contents.append(line.strip())\r\n myPeptide = GenomeEncoding(contents[0], contents[1])\r\n myPeptide.getCodonSeqs()\r\n myPeptide.getRevCodonSeqs()\r\n myPeptide.printEncodePep()",
"def preparePDB(pdbfile, start=3):\n\n print pdbfile\n parser = PDB.PDBParser(QUIET=True)\n structure = parser.get_structure(pdbfile, pdbfile)\n model = structure[0]\n remove = string.uppercase[3:]\n for chain in model.child_list[:]:\n if chain.id in remove:\n model.detach_child(chain.id)\n else:\n for residue in chain.child_list[:]:\n #remove all non-standard residues\n if residue.id[0] != \" \":\n chain.detach_child(residue.id)\n\n removeresidues = [('A', range(86,200)), ('B', range(96,200))]\n for c in removeresidues:\n chain = model[c[0]]\n remove = c[1]\n for residue in chain.child_list[:]:\n id = residue.id\n if id[1] in remove:\n chain.detach_child(id)\n #renumber chain A\n chaina = model['A']\n renumberChain(chaina)\n #renumber chain B\n chainb = model['B']\n i = chaina.child_list[-1].id[1] + 1\n for residue in chainb:\n residue.id = (' ', i, ' ')\n i+=1\n chainb.id = 'A'\n #model.detach_child('B')\n #print '%s chains, length chain a=%s' %(len(model.child_list), len(chaina.child_list))\n #renumber chain c\n renumberChain(model['C'])\n w = PDB.PDBIO()\n w.set_structure(structure)\n name = os.path.splitext(pdbfile)[0]\n filename = name+'_alt.pdb'\n w.save(filename, write_end=1)\n #print 'saved file %s' %filename\n\n #save unbound\n model.detach_child('C')\n filename2 = name+'_unbound.pdb'\n w.save(filename2, write_end=1)\n return filename, filename2",
"def add_code(self, id, code):\n self.codes[id] = code",
"def test_pdbinfo(pose):\n for i in range(1, pose.total_residue() +1):\n print repr(i)\n print pose.all_residues[i].id\n print pose.pdb_info.pose2pdb(i)"
] | [
"0.64399016",
"0.5670033",
"0.54493546",
"0.54008293",
"0.5362843",
"0.5350154",
"0.53209007",
"0.5317977",
"0.53175825",
"0.5303215",
"0.52984405",
"0.5288228",
"0.52671784",
"0.5254593",
"0.522837",
"0.5211319",
"0.5210247",
"0.5205723",
"0.5200957",
"0.5194791",
"0.5192633",
"0.51586485",
"0.514234",
"0.51378363",
"0.5134083",
"0.5133948",
"0.5133817",
"0.51323587",
"0.51208884",
"0.51007944"
] | 0.6883254 | 0 |
Refresh the users for a given project by deleting all of them. Redundant if using iSpyB. | def delete_users(project):
for user_id in project.user_id.all():
project.user_id.remove(user_id.pk)
project.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_all_users(request):\n id_project = request.POST.get(\"project_id\")\n project = UtilsData.get_object_by_type_and_id(\"project\", id_project)\n if request.user.can_delete(project):\n roles = project.affecteds_set.all()\n for role in roles:\n if role.role not in (Affecteds.ROLE.Manager, Affecteds.ROLE.Admin):\n role.role = Affecteds.ROLE.Nill\n role.save()\n return HttpResponse(json.dumps(\"Ok\"),\n content_type=\"application/json\")\n else:\n logger.error(\"user %s try to remove all users to project %d \" % (request.user.username, id_project))\n return HttpResponse(json.dumps(\"error\"),\n content_type=\"application/json\")",
"def reset_all_users():\n for user in User.objects.all():\n user.delete()",
"def test_multiple_users_project_not_delete(self):\n\n project = django_dynamic_fixture.get(Project)\n user1 = django_dynamic_fixture.get(User)\n user2 = django_dynamic_fixture.get(User)\n project.users.add(user1, user2)\n\n project.refresh_from_db()\n assert project.users.all().count() > 1\n # Delete 1 user of the project\n user1.delete()\n\n # The project should still exist and it should have 1 user\n project.refresh_from_db()\n assert project.id\n obj_users = project.users.all()\n assert len(obj_users) == 1\n assert user2 in obj_users",
"def project_refresh_all():\n project_list = Project.objects()\n analyser.add_repos(current_user.username, [repo.project_name for repo in project_list])\n flash('Refresh all successfully!', 'success')\n return redirect(url_for('main.admin_manage'))",
"def delete_all_users(self):\n\n User.query.delete()",
"def delete_all_users():\n\tUser.drop_collection()",
"async def remove_all_projects_for_user(app: web.Application, user_id: int) -> None:\n # recover user's primary_gid\n try:\n project_owner: Dict = await get_user(app=app, user_id=user_id)\n except users_exceptions.UserNotFoundError:\n logger.warning(\n \"Could not recover user data for user '%s', stopping removal of projects!\",\n user_id,\n )\n return\n user_primary_gid: str = str(project_owner[\"primary_gid\"])\n\n # fetch all projects for the user\n user_project_uuids = await app[\n APP_PROJECT_DBAPI\n ].list_all_projects_by_uuid_for_user(user_id=user_id)\n logger.info(\n \"Project uuids, to clean, for user '%s': '%s'\",\n user_id,\n user_project_uuids,\n )\n\n for project_uuid in user_project_uuids:\n logger.debug(\n \"Removing or transfering project '%s'\",\n project_uuid,\n )\n try:\n project: Dict = await get_project_for_user(\n app=app,\n project_uuid=project_uuid,\n user_id=user_id,\n include_templates=True,\n )\n except web.HTTPNotFound:\n logger.warning(\n \"Could not recover project data for project_uuid '%s', skipping...\",\n project_uuid,\n )\n continue\n\n new_project_owner_gid = await get_new_project_owner_gid(\n app=app,\n project_uuid=project_uuid,\n user_id=user_id,\n user_primary_gid=user_primary_gid,\n project=project,\n )\n\n if new_project_owner_gid is None:\n # when no new owner is found just remove the project\n logger.info(\n \"The project can be removed as is not shared with write access with other users\"\n )\n try:\n await delete_project_from_db(app, project_uuid, user_id)\n except ProjectNotFoundError:\n logging.warning(\n \"Project '%s' not found, skipping removal\", project_uuid\n )\n continue\n\n # Try to change the project owner and remove access rights from the current owner\n await replace_current_owner(\n app=app,\n project_uuid=project_uuid,\n user_primary_gid=user_primary_gid,\n new_project_owner_gid=new_project_owner_gid,\n project=project,\n )",
"def tearDown(self):\n User.users_list = []",
"def test_project_get_deleted_upon_user_delete(self):\n\n project = django_dynamic_fixture.get(Project)\n user1 = django_dynamic_fixture.get(User)\n project.users.add(user1)\n\n project.refresh_from_db()\n assert project.users.all().count() == 1\n\n # Delete the user\n user1.delete()\n # The object should not exist\n project = Project.objects.all().filter(id=project.id)\n assert not project.exists()",
"def db_delete_user_data(self):\n util.log(\"Clearing all user data\", util.LogLevel.Info)\n self.db.db_clear_data_user()\n util.log(\"Done\", util.LogLevel.Info)",
"def refresh_user(self):\n if len(self.old_fbids) < 1: return\n fbid = self.old_fbids.pop()\n if not fbid: return # null fbids :\\\n\n # wipe out the old row, we'll insert a new (blank) even if we find nothing\n self.pcur.execute(\"\"\"\n DELETE FROM users WHERE fbid=%s\n \"\"\", (fbid,))\n\n self.seek_user(fbid)\n\n self.pconn.commit()\n info( 'Updated fbid {} from Dynamo'.format(fbid))",
"def delete_proj_user_by_name(self, name):\n conn = pyone.OneServer(\n self.auth_url,\n session=\"{0}:{1}\".format(self.username, self.password)\n )\n userpool = conn.userpool.info(-1,-1,-1)\n for user in userpool.USER:\n if user.get_NAME() == name:\n group = user.get_GROUPS()[0]\n # delete group\n conn.group.delete(group)\n # delete user\n return conn.user.delete(user.get_ID())\n logger.warning(\"Delete user ONE: user does not exist: \", name)",
"def update_users(self):\n user_list = []\n try:\n all_users_list = self.helper.list_all_users()\n users_to_update = []\n for email in all_users_list:\n user_info = self.get_by_id(UserInfo, email)\n if user_info:\n # Only update the model in the Datastore if one of the fields has\n # changed.\n is_user_cloud_admin = self.helper.is_user_cloud_admin(email)\n can_upload_apps = self.helper.can_upload_apps(email)\n owned_apps = self.helper.get_owned_apps(email)\n dash_layout_settings = self.get_dash_layout_settings(user_info)\n stored_layout_settings = user_info.dash_layout_settings\n if stored_layout_settings:\n dash_change = \\\n (dash_layout_settings.get(\"nav\") != stored_layout_settings.get(\n \"nav\")) or \\\n (dash_layout_settings.get(\"panel\") != stored_layout_settings.get(\n \"panel\"))\n else:\n dash_change = True\n\n if user_info.is_user_cloud_admin != is_user_cloud_admin or \\\n user_info.can_upload_apps != can_upload_apps or \\\n dash_change or \\\n user_info.owned_apps != owned_apps:\n user_info.is_user_cloud_admin = is_user_cloud_admin\n user_info.can_upload_apps = can_upload_apps\n user_info.owned_apps = owned_apps\n user_info.dash_layout_settings = dash_layout_settings\n users_to_update.append(user_info)\n\n # Either way, add the user's info to the list of all user's info.\n user_list.append(user_info)\n else:\n user_info = UserInfo(id=email)\n user_info.is_user_cloud_admin = self.helper.is_user_cloud_admin(email)\n user_info.can_upload_apps = self.helper.can_upload_apps(email)\n user_info.owned_apps = self.helper.get_owned_apps(email)\n user_info.dash_layout_settings = self.get_dash_layout_settings(\n user_info=user_info)\n users_to_update.append(user_info)\n user_list.append(user_info)\n ndb.put_multi(users_to_update)\n return user_list\n except Exception as err:\n logging.exception(err)\n return []",
"def remove_user_from_projects(username):\n projects = request.get_json().get(\"projects\", [])\n return jsonify(\n admin.remove_user_from_projects(\n current_app.scoped_session(), username, projects\n )\n )",
"def remove_obsolete_users(self, date_limit):\n for user in User.objects.filter(last_login__lt=date_limit):\n if not ServiceProvider.objects.filter(admins=user):\n self.output(\"Removing user: \" + user.username)\n if not self.list_only:\n user.delete()",
"def tearDown(self):\n User.user_list = []",
"def project_users(): \r\n project_school = api.projects.get_by_slug('ps') \r\n users_list = []\r\n for member in project_school.members:\r\n users_list.append(member.username + '@miem.hse.ru')\r\n \r\n #replaces some users usernames as they are different in taiga than in google groups \r\n for user in range(len(users_list)):\r\n if (users_list[user] == '[email protected]'):\r\n users_list[user] = '[email protected]'\r\n if (users_list[user] == '[email protected]'):\r\n users_list[user] = '[email protected]'\r\n \r\n return users_list",
"def remove_users(self, *users):\r\n pass",
"def delusers(self, args):\n\n if len(args) < 2:\n print(self.addusers.__doc__)\n return\n\n gname = args[0]\n users = args[1:]\n\n g = sr.group(gname)\n\n if not g.in_db:\n print(\"Group '%s' not found.\" % ( gname ))\n return\n\n not_members = g.user_rm( users )\n g.save()\n\n for uname in not_members:\n print(\"Unable to remove non-member '%s' from '%s'\" % ( gname, uname ))",
"def delete_tokens_for_users(self, user_ids, project_id=None):\n if not CONF.token.revoke_by_id:\n return\n for user_id in user_ids:\n self.delete_tokens_for_user(user_id, project_id=project_id)",
"def list_users(ctx, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n pprint(ctx.obj.groups[project.id].databaseUsers.get().data)",
"def tearDown(self):\n User.objects.all().delete()\n Project.objects.all().delete()",
"def tearDown(self):\n\n self.app = None\n users.clear()",
"def resetUsers():\n global pollResults\n pollResults = dict()\n emitResults()",
"def DeleteUser(self, delusercount, deluser):\n for i in range(delusercount):\n login = string.replace(deluser[i]['Login'], ' ', '')\n action = 'userman -D ' + login\n output = commands.getstatusoutput(action)\n print output\n updatecount, update = self.__sqlData[\"UPDATE AccUser SET ToDo = 0 WHERE Login = '%s'\" % (login)]",
"def run_delusers(self, expanded, unexpanded):\n\t\tif not expanded :\n\t\t\treturn self.errormessage('Needs an userid as the first argument')\n\t\tif not self.HasPerms(self.__context.acl_users, 'Manage users') :\n\t\t\treturn -1\n\t\tstatus = 0\n\t\tusernames = []\n\t\tfor username in unexpanded :\n\t\t\tif username not in self.__context.acl_users.getUserNames() :\n\t\t\t\tstatus = status + self.errormessage(\"User %s doesn't exists\" % username)\n\t\t\telse :\n\t\t\t\tusernames.append(username)\n\n\t\tself.__context.REQUEST.set(\"names\", usernames)\n\t\tself.__context.acl_users.manage_users(\"Delete\", REQUEST=self.__context.REQUEST)\n\t\tif usernames :\n\t\t\tself.htmlmessage('Users %s deleted' % string.join(usernames, \", \"))\n\n\t\t# don't be fucked by Zope's automatic redirection\n\t\tself.__context.REQUEST.RESPONSE.setStatus(200)\n\t\treturn status",
"def _RemoveUsers(self, remove_users):\n for username in remove_users:\n self.utils.RemoveUser(username)\n self.user_ssh_keys.pop(username, None)\n self.invalid_users -= set(remove_users)",
"def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200",
"def delete_from_all(self, user_id):\n self.execute(TABELLE['id_users']['delete'], (user_id,))\n self.execute(TABELLE['users']['delete'], (user_id,))\n self.execute(TABELLE['punteggio']['delete'], (user_id,))\n self.execute(TABELLE['items']['delete'], (user_id,))",
"def tearDown(self):\n User.objects.all().delete()"
] | [
"0.7266119",
"0.6693988",
"0.66053855",
"0.6487059",
"0.63297355",
"0.63088167",
"0.62710536",
"0.61194533",
"0.6093415",
"0.6081486",
"0.6009706",
"0.59416425",
"0.59289557",
"0.5863758",
"0.58465534",
"0.58461785",
"0.58394694",
"0.5836747",
"0.5823315",
"0.58197975",
"0.5808992",
"0.58054006",
"0.5797223",
"0.57850534",
"0.57826346",
"0.57552105",
"0.5752667",
"0.57415634",
"0.5730299",
"0.5719859"
] | 0.7483515 | 0 |
Add proposals and visits as projects for a given target. | def get_create_projects(target, proposal_ref, proposal_code='lb'):
# Note that in the loader this is based on information in the PROPOSALS and VISITS files
# TODO Multiple Visits can be defined in a file apparently - future improvement.
# TODO NB LIne above in delete_users - redundant if using ISPYB??.
# For the online loader it comes from the proposal_ref
projects = []
# The first word is the ISPY proposal/visit name that is used as the title of the project.
# It can be set to OPEN in which case there are no users.
visit = proposal_ref.split()[0]
# If the visit is not prefixed by the proposal code
# (typically a 2-letter sequence like "lb") then prefix it.
if visit[0].isdigit():
visit = f"{proposal_code}{visit}"
project = Project.objects.get_or_create(title=visit)[0]
projects.append(project)
# If not open then delete users for the project and re-add them based on supplied fed-ids.
delete_users(project)
# Update project_id on target.
target.project_id.add(project)
# Remaining words in proposal_ref (if any) must be fedid's which are used to find users information.
num_users = 0
for fedid in proposal_ref.split()[1:]:
user = User.objects.get_or_create(username=fedid, password="")[0]
project.user_id.add(user)
num_users += 1
if num_users == 0:
project.open_to_public = True
target.upload_progess = 10.00
target.save()
return projects | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_pp(self, arg):\n self.do_projects(arg)",
"def select_proposals(self):\r\n print \"Selecting proposals... \"\r\n global MAX_NUMBER_PROJECTS\r\n proposals_sorted = sorted(self.proposals, key=lambda project:project.likes, reverse=True)\r\n for i in range(MAX_NUMBER_PROJECTS):\r\n self.projects_for_vote.append(proposals_sorted[i])",
"def project():",
"def project():",
"def project():",
"def add_project(project_info):\n project = project_collection.insert_one(project_info)\n user = user_collection.find_one({\"_id\": project_info[\"owner\"]})\n list1 = user[\"owner\"]\n list1.append(project.inserted_id)\n user_collection.find_one_and_update(\n {\"_id\": project_info[\"owner\"]},\n {\n \"$set\": {\n \"owner\": list1,\n }\n },\n upsert=False,\n )\n\n key = search_collection.find_one({\"_id\": SEARCH_ID})\n for skill in project_info[\"projectSkills\"]:\n try:\n value_list = key[skill]\n value_list.append(project.inserted_id)\n search_collection.find_one_and_update(\n {\"_id\": SEARCH_ID}, {\"$set\": {skill: value_list}}, upsert=False\n )\n except AttributeError:\n value_list = list()\n value_list.append(project.inserted_id)\n search_collection.find_one_and_update(\n {\"_id\": SEARCH_ID},\n {\n \"$set\": {\n skill: value_list,\n }\n },\n upsert=False,\n )\n except KeyError:\n value_list = list()\n value_list.append(project.inserted_id)\n search_collection.find_one_and_update(\n {\"_id\": SEARCH_ID},\n {\n \"$set\": {\n skill: value_list,\n }\n },\n upsert=False,\n )",
"def create_random_proposals(self): \r\n global MAX_NUMBER_PROPOSALS\r\n global LOCATIONS\r\n global CATEGORIES\r\n \r\n for i in range(MAX_NUMBER_PROPOSALS):\r\n description = \"\"\r\n location = locations_rv.rvs(size=1)[0]\r\n category = categories_rv.rvs(size=1)[0]\r\n budget = random.uniform(500000, 1000000)\r\n project = Project(i, description, category, budget, location)\r\n self.proposals.append(project)",
"def select_approved_projects(self):\r\n print \"Selecting approved projects... \"\r\n global ANNUAL_BUDGET\r\n \r\n projects_citizens_sorted = sorted(self.projects_for_vote, key=lambda project:project.units, reverse=True)\r\n projects_reps_sorted = sorted(self.projects_for_vote, key=lambda project:project.p_units, reverse=True)\r\n budget_sum = 0\r\n \r\n for p in projects_citizens_sorted:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n budget_sum = 0\r\n for p in projects_reps_sorted:\r\n if p not in self.projects_approved:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n\r\n \r\n# raw_input(\"select_approved_projects - antes\")\r\n for p in projects_citizens_sorted:\r\n print p\r\n print \"\\nReps\\n\"\r\n for p in projects_reps_sorted:\r\n print p\r\n print \"\\nApproved\\n\"\r\n for p in self.projects_approved:\r\n print p\r\n\r\n raw_input(\"select_approved_projects - depois\")",
"def do_p(self, arg):\n self.do_project(arg)",
"def test_create_project_target_enabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)",
"def label_and_sample_proposals(\n self, proposals: List[Instances], targets: List[Instances]\n ) -> List[Instances]:\n gt_boxes = [x.gt_boxes for x in targets]\n # Augment proposals with ground-truth boxes.\n # In the case of learned proposals (e.g., RPN), when training starts\n # the proposals will be low quality due to random initialization.\n # It's possible that none of these initial\n # proposals have high enough overlap with the gt objects to be used\n # as positive examples for the second stage components (box head,\n # cls head, mask head). Adding the gt boxes to the set of proposals\n # ensures that the second stage components will have some positive\n # examples from the start of training. For RPN, this augmentation improves\n # convergence and empirically improves box AP on COCO by about 0.5\n # points (under one tested configuration).\n if self.proposal_append_gt:\n proposals = add_ground_truth_to_proposals(gt_boxes, proposals)\n\n proposals_with_gt = []\n\n num_fg_samples = []\n num_bg_samples = []\n for proposals_per_image, targets_per_image in zip(proposals, targets):\n has_gt = len(targets_per_image) > 0\n match_quality_matrix = pairwise_iou(\n targets_per_image.gt_boxes, proposals_per_image.proposal_boxes\n )\n matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)\n sampled_idxs, gt_classes, gt_standards = self._sample_proposals2(\n matched_idxs, matched_labels, targets_per_image.gt_classes, targets_per_image.gt_standards\n )\n # Set target attributes of the sampled proposals:\n proposals_per_image = proposals_per_image[sampled_idxs]\n proposals_per_image.gt_classes = gt_classes\n proposals_per_image.gt_standards = gt_standards\n\n # We index all the attributes of targets that start with \"gt_\"\n # and have not been added to proposals yet (=\"gt_classes\").\n if has_gt:\n sampled_targets = matched_idxs[sampled_idxs]\n # NOTE: here the indexing waste some compute, because heads\n # like masks, keypoints, etc, will filter the proposals again,\n # (by foreground/background, or number of keypoints in the image, etc)\n # so we essentially index the data twice.\n for (trg_name, trg_value) in targets_per_image.get_fields().items():\n if trg_name.startswith(\"gt_\") and not proposals_per_image.has(trg_name):\n proposals_per_image.set(trg_name, trg_value[sampled_targets])\n else:\n # gt_boxes = Boxes(\n # targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4))\n # )\n gt_boxes = Boxes(\n torch.zeros(len(sampled_idxs), 4, device=gt_standards.device)\n )\n proposals_per_image.gt_boxes = gt_boxes\n\n num_bg_samples.append((gt_classes == self.num_classes).sum().item())\n num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])\n proposals_with_gt.append(proposals_per_image)\n\n # Log the number of fg/bg samples that are selected for training ROI heads\n storage = get_event_storage()\n storage.put_scalar(\"roi_head/num_fg_samples\", np.mean(num_fg_samples))\n storage.put_scalar(\"roi_head/num_bg_samples\", np.mean(num_bg_samples))\n\n return proposals_with_gt",
"def label_and_sample_proposals_mod(\n\t\tself, proposals: List[Instances], targets: List[Instances]\n\t) -> List[Instances]:\n\t\tgt_boxes = [x.gt_boxes for x in targets]\n\t\t# Augment proposals with ground-truth boxes.\n\t\t# In the case of learned proposals (e.g., RPN), when training starts\n\t\t# the proposals will be low quality due to random initialization.\n\t\t# It's possible that none of these initial\n\t\t# proposals have high enough overlap with the gt objects to be used\n\t\t# as positive examples for the second stage components (box head,\n\t\t# cls head, mask head). Adding the gt boxes to the set of proposals\n\t\t# ensures that the second stage components will have some positive\n\t\t# examples from the start of training. For RPN, this augmentation improves\n\t\t# convergence and empirically improves box AP on COCO by about 0.5\n\t\t# points (under one tested configuration).\n\t\texpansion_scale = 0.05\n\t\tprint(\"proposals before expansion\")\n\t\tprint(proposals)\n\t\tfor i in range(len(proposals)):\n\t\t\th,w = proposals[i].image_size\n\t\t\tprop_boxes = proposals[i].proposal_boxes.tensor\n\t\t\tbw = prop_boxes[:,2] - prop_boxes[:,0]\n\t\t\tbh = prop_boxes[:,3]-prop_boxes[:,1]\n\t\t\tprop_boxes[:,0] = torch.max(prop_boxes[:,0] - (bw*expansion_scale*0.5),torch.zeros_like(prop_boxes[:,0]))\n\t\t\tprop_boxes[:,1] = torch.max(prop_boxes[:,1] - (bh*expansion_scale*0.5),torch.zeros_like(prop_boxes[:,1]))\n\t\t\tprop_boxes[:,2] = torch.min(prop_boxes[:,2] + (bw*expansion_scale*0.5),torch.zeros_like(prop_boxes[:,2]) + w)\n\t\t\tprop_boxes[:,3] = torch.min(prop_boxes[:,3] + (bh*expansion_scale*0.5),torch.zeros_like(prop_boxes[:,3]) + h)\n\t\t\tproposals[i].proposal_boxes = Boxes(prop_boxes)\n\t\tprint(\"proposals after expansion\")\n\t\tprint(proposals)\n\t\tif self.proposal_append_gt:\n\t\t\tproposals = add_ground_truth_to_proposals(gt_boxes, proposals)\n\n\t\tproposals_with_gt = []\n\n\t\tnum_fg_samples = []\n\t\tnum_bg_samples = []\n\t\tfor proposals_per_image, targets_per_image in zip(proposals, targets):\n\t\t\thas_gt = len(targets_per_image) > 0\n\t\t\tmatch_quality_matrix = pairwise_iou(\n\t\t\t\ttargets_per_image.gt_boxes, proposals_per_image.proposal_boxes\n\t\t\t)\n\t\t\tmatched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)\n\t\t\tsampled_idxs, gt_classes = self._sample_proposals_mod(\n\t\t\t\tmatched_idxs, matched_labels, targets_per_image.gt_classes\n\t\t\t)\n\n\t\t\t# Set target attributes of the sampled proposals:\n\t\t\tproposals_per_image = proposals_per_image[sampled_idxs]\n\t\t\tproposals_per_image.gt_classes = gt_classes\n\n\t\t\t# We index all the attributes of targets that start with \"gt_\"\n\t\t\t# and have not been added to proposals yet (=\"gt_classes\").\n\t\t\tif has_gt:\n\t\t\t\tsampled_targets = matched_idxs[sampled_idxs]\n\t\t\t\t# NOTE: here the indexing waste some compute, because heads\n\t\t\t\t# like masks, keypoints, etc, will filter the proposals again,\n\t\t\t\t# (by foreground/background, or number of keypoints in the image, etc)\n\t\t\t\t# so we essentially index the data twice.\n\t\t\t\tfor (trg_name, trg_value) in targets_per_image.get_fields().items():\n\t\t\t\t\tif trg_name.startswith(\"gt_\") and not proposals_per_image.has(trg_name):\n\t\t\t\t\t\tproposals_per_image.set(trg_name, trg_value[sampled_targets])\n\t\t\telse:\n\t\t\t\tgt_boxes = Boxes(\n\t\t\t\t\ttargets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4))\n\t\t\t\t)\n\t\t\t\tproposals_per_image.gt_boxes = gt_boxes\n\n\t\t\tnum_bg_samples.append((gt_classes == self.num_classes).sum().item())\n\t\t\tnum_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])\n\t\t\tproposals_with_gt.append(proposals_per_image)\n\n\t\t# Log the number of fg/bg samples that are selected for training ROI heads\n\t\tstorage = get_event_storage()\n\t\tstorage.put_scalar(\"roi_head/num_fg_samples\", np.mean(num_fg_samples))\n\t\tstorage.put_scalar(\"roi_head/num_bg_samples\", np.mean(num_bg_samples))\n\n\t\treturn proposals_with_gt",
"def get_projects(self, source=\"all\"):\n self.projects = []\n self._project_indices_by_id = {}\n self._project_indices_by_name = {}\n\n if self.hub_type == self.NAMESPACES[\"a.\"]:\n if not self.auth.three_legged:\n self.logger.warning(\n \"Failed to get projects. '{}' hubs only supports 3-legged access token.\".format( # noqa:E501\n self.NAMESPACES[\"a.\"]\n )\n )\n else:\n for project in self.api.dm.get_projects():\n self.projects.append(\n Project(\n project[\"attributes\"][\"name\"],\n project[\"id\"][2:],\n data=project,\n app=self,\n )\n )\n\n self._project_indices_by_id[project[\"id\"][2:]] = (\n len(self.projects) - 1\n )\n self._project_indices_by_name[\n project[\"attributes\"][\"name\"]\n ] = (len(self.projects) - 1)\n\n elif self.hub_type == self.NAMESPACES[\"b.\"]:\n\n if source.lower() in (\"all\", \"docs\"):\n for project in self.api.dm.get_projects():\n self.projects.append(\n Project(\n project[\"attributes\"][\"name\"],\n project[\"id\"][2:],\n data=project,\n app=self,\n )\n )\n\n self._project_indices_by_id[project[\"id\"][2:]] = (\n len(self.projects) - 1\n )\n self._project_indices_by_name[\n project[\"attributes\"][\"name\"]\n ] = (len(self.projects) - 1)\n\n if (\n source.lower() in (\"all\", \"admin\")\n and not self.auth.three_legged\n ):\n\n for project in self.api.hq.get_projects():\n if project[\"id\"] in self._project_indices_by_id:\n self.projects[\n self._project_indices_by_id[project[\"id\"]]\n ].data = project\n else:\n self.projects.append(\n Project(\n project[\"name\"],\n project[\"id\"],\n data=project,\n app=self,\n )\n )\n self._project_indices_by_id[project[\"id\"]] = (\n len(self.projects) - 1\n )\n\n self._project_indices_by_name[project[\"name\"]] = (\n len(self.projects) - 1\n )\n\n elif source.lower() in (\"all\", \"admin\"):\n self.logger.debug(\n \"Failed to get projects. The BIM 360 API only supports 2-legged access tokens\" # noqa:E501\n )",
"def label_and_sample_proposals(\n self, proposals: List[Instances], targets: List[Instances]\n ) -> List[Instances]:\n # Augment proposals with ground-truth boxes.\n # In the case of learned proposals (e.g., RPN), when training starts\n # the proposals will be low quality due to random initialization.\n # It's possible that none of these initial\n # proposals have high enough overlap with the gt objects to be used\n # as positive examples for the second stage components (box head,\n # cls head, mask head). Adding the gt boxes to the set of proposals\n # ensures that the second stage components will have some positive\n # examples from the start of training. For RPN, this augmentation improves\n # convergence and empirically improves box AP on COCO by about 0.5\n # points (under one tested configuration).\n if self.proposal_append_gt:\n proposals = add_ground_truth_to_proposals(targets, proposals)\n\n proposals_with_gt = []\n\n num_fg_samples = []\n num_bg_samples = []\n for proposals_per_image, targets_per_image in zip(proposals, targets):\n has_gt = len(targets_per_image) > 0\n match_quality_matrix = pairwise_iou(\n targets_per_image.gt_boxes, proposals_per_image.proposal_boxes\n )\n matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)\n sampled_idxs, gt_classes = self._sample_proposals(\n matched_idxs, matched_labels, targets_per_image.gt_classes\n )\n\n # Set target attributes of the sampled proposals:\n proposals_per_image = proposals_per_image[sampled_idxs]\n proposals_per_image.gt_classes = gt_classes\n\n if has_gt:\n sampled_targets = matched_idxs[sampled_idxs]\n # We index all the attributes of targets that start with \"gt_\"\n # and have not been added to proposals yet (=\"gt_classes\").\n # NOTE: here the indexing waste some compute, because heads\n # like masks, keypoints, etc, will filter the proposals again,\n # (by foreground/background, or number of keypoints in the image, etc)\n # so we essentially index the data twice.\n for (trg_name, trg_value) in targets_per_image.get_fields().items():\n if trg_name.startswith(\"gt_\") and not proposals_per_image.has(trg_name):\n proposals_per_image.set(trg_name, trg_value[sampled_targets])\n # If no GT is given in the image, we don't know what a dummy gt value can be.\n # Therefore the returned proposals won't have any gt_* fields, except for a\n # gt_classes full of background label.\n\n num_bg_samples.append((gt_classes == self.num_classes).sum().item())\n num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])\n proposals_with_gt.append(proposals_per_image)\n\n # Log the number of fg/bg samples that are selected for training ROI heads\n storage = get_event_storage()\n storage.put_scalar(\"roi_head/num_fg_samples\", np.mean(num_fg_samples))\n storage.put_scalar(\"roi_head/num_bg_samples\", np.mean(num_bg_samples))\n\n return proposals_with_gt",
"def target_add():\r\n try:\r\n target_id = request.post_vars[\"target\"]\r\n group_id = request.post_vars[\"group\"]\r\n except KeyError:\r\n pass\r\n\r\n result = gl.add_to_targetgroup(target_id, group_id)\r\n\r\n if result:\r\n return response.json({'success': 'true'})\r\n\r\n return response.json({'success': 'false'})",
"def projected():\n # pull updated list of donors(class)\n projected_list = create_donors_list()\n print('''Welcome to the Projection Option. Here you can run projections for contributions. \n Help Companies structure their matching donations based on past contribution amounts.\n Simply enter the minumum and maximum donation range that will be matched and see the total contribution:''')\n try:\n minimum_input = float(\n input('Enter a minimum donation amount (0 if none): '))\n maximum_input = float(\n input('Enter a maximum donation amount (0 if none): '))\n factor = float(\n input('Please enter the factor you wish to multiply these donations by >> '))\n except ValueError:\n print('Please follow instructions and enter a number only')\n\n projections = projection(projected_list, factor,\n minimum_input, maximum_input)\n print('\\nProjected contribution value: ${:,.2f}'.format(projections))",
"def add_target(self, target):\n\n # pass specified target parameters to the PED-RPC server.\n target.pedrpc_connect()\n target.set_fuzz_data_logger(fuzz_data_logger=self._fuzz_data_logger)\n\n # add target to internal list.\n self.targets.append(target)",
"def add_marketing_target_to_household(self, household_id, marketing_target):\n root_url = \"/upm/households/{household_id}/locale\".format(household_id=household_id)\n headers = {\"Source-ID\": \"PCT\", \"Source-Type\": \"PCT\"}\n payload = {\"marketingTarget\": str(marketing_target)}\n\n add_marketing_target_to_household_response, http_code = self.request(root_url, headers=headers, json=payload)\n\n return add_marketing_target_to_household_response, http_code",
"def add_project(self, proj, i):\r\n self.__projects[i] = proj",
"def make_project(id):\n return {\n \"type\": \"Project\",\n \"metrics\": [],\n \"tags\": [],\n \"id\": id,\n \"description\": \"\",\n \"applicant\": \"\",\n }",
"def project(projectname,targetamount):\n if (validatename(projectname) and validatenum(targetamount)):\n targetamount=float(targetamount)\n con = lite.connect(databasefile)\n with con:\n cur = con.cursor() \n cur.execute(\"SELECT Id FROM projects where name=?\", (projectname,))\n exists = cur.fetchone()\n if exists:\n click.echo(\"Project name already exists!\")\n sys.exit()\n cur.execute(\"INSERT INTO projects (Name, Tamount) VALUES (?, ?)\", (projectname, targetamount))\n click.echo(\"Added %s project with target of $%-.2f\" % (projectname, targetamount))",
"def project(self, target):\n xp = self.xp\n\n self.data = xp.asarray(self.data, dtype=self.dtype)\n if self.data.ndim == 1:\n self.data = self.data.reshape([2] * self.size)\n\n data = xp.split(self.data, [1], axis=target)\n p = [self._to_scalar(xp.sum(data[i] * xp.conj(data[i])).real) for i in (0, 1)]\n obs = np.random.choice([0, 1], p=p)\n\n if obs == 0:\n self.data = xp.concatenate((data[obs] / math.sqrt(p[obs]), xp.zeros_like(data[obs])), target)\n else:\n self.data = xp.concatenate((xp.zeros_like(data[obs]), data[obs] / math.sqrt(p[obs])), target)\n return obs",
"def qck_gen_proj(self, master):\r\n if not self._check_project_name():\r\n return\r\n\r\n # Clear out driver list and board\r\n self.newProj.board = ()\r\n self.newProj.drvList = []\r\n\r\n # Configure ksdkProj given GUI state\r\n self.localSDK.get_version()\r\n self.newProj.name = self.widgetList[4].get()\r\n self.newProj.setKsdkPath(self.localSDK.path)\r\n self.newProj.sdkVer = self.localSDK.version\r\n self.newProj.useBSP = not self.localSDK.isNewVersion()\r\n\r\n # Add the board\r\n try:\r\n userBoard = int(self.widgetList[6].curselection()[0]) + 1\r\n self.newProj.add_board(userBoard, self.localSDK.brdList)\r\n except IndexError:\r\n tkMessageBox.showinfo(\"No board selected!\",\\\r\n \"Make sure a board has been selected.\")\r\n return\r\n\r\n self.widgetList[10].step(30)\r\n self.widgetList[10].update_idletasks()\r\n\r\n # Quick check to see if this poject already exists\r\n checkPath = self.newProj.sdkPath + '/' + self.newProj.parent.getDirectoryStructureHelper().getUserLinkedExamplesPath(self.newProj.board[1]) + '/' + self.newProj.name\r\n if os.path.isdir(checkPath):\r\n tkMessageBox.showinfo(\"Project exists\",\\\r\n \"A project by this name already exists.\")\r\n return\r\n\r\n # in quick mode there is always generated the board project\r\n self.newProj.isBoardProject = True\r\n \r\n # Add all drivers for this device\r\n self.localSDK.get_drivers()\r\n maskRet = kT.mask_features(kTool.KsdkTools(), self.newProj.sdkPath, self.newProj.sdkVer, \\\r\n self.localSDK.drvList, self.newProj.device[1], self.newProj.device[2])\r\n self.newProj.portCount = maskRet[0]\r\n self.newProj.dmaCount = maskRet[1]\r\n self.newProj.tsiVersion = maskRet[2]\r\n self.newProj.add_all_drv(self.localSDK.drvList)\r\n\r\n kT.debug_log('Port Count: ' + str(self.newProj.portCount))\r\n\r\n #Generate IAR project files\r\n #self.newProj.fast_build_IAR()\r\n self.newProj.workSpace = self.newProj.sdkPath + '/' + self.newProj.parent.getDirectoryStructureHelper().getUserLinkedExamplesPath(self.newProj.board[1]) + '/'\r\n projectPath = self.newProj.workSpace + self.newProj.name\r\n\r\n #Get all include paths lists into one list\r\n includeList = []\r\n index = 0\r\n isPresent = False\r\n while index < len(self.newProj.drvList):\r\n count = 0\r\n while count < len(self.newProj.drvList[index][2]):\r\n isPresent = False\r\n newPath = str(\\\r\n self.newProj.drvList[index][2][count]\\\r\n )\r\n if len(includeList) > 0:\r\n listIndex = 0\r\n while listIndex < len(includeList):\r\n if newPath == includeList[int(listIndex) - 1]:\r\n isPresent = True\r\n listIndex += 1\r\n if not isPresent:\r\n includeList.append(newPath)\r\n count += 1\r\n index += 1\r\n\r\n self.newProj.libList.append('platform')\r\n if not os.path.isdir(projectPath):\r\n os.makedirs(projectPath)\r\n self.newProj.rtos = 'bm'\r\n\r\n if not os.path.isfile(projectPath + '/main.c'):\r\n self.newProj.make_main_file(projectPath, includeList)\r\n if not os.path.isfile(projectPath + '/hardware_init.c'):\r\n self.newProj.make_hw_file(projectPath)\r\n\r\n self.widgetList[10].step(30)\r\n self.widgetList[10].update_idletasks()\r\n\r\n ## Copy over BSP files\r\n if self.newProj.useBSP:\r\n if not os.path.isdir(projectPath + '/board'):\r\n os.mkdir(projectPath + '/board')\r\n bspDir = self.newProj.sdkPath + '/examples/' + self.newProj.board[1]\r\n bspList = kT.list_files(bspDir)\r\n for f in bspList:\r\n if f[-2:] == '.c':\r\n shutil.copyfile(bspDir + '/' + f, projectPath + '/board/' + f)\r\n if f[-2:] == '.h':\r\n shutil.copyfile(bspDir + '/' + f, projectPath + '/board/' + f)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.IARname, self.newProj.device):\r\n print self.newProj.isLinked\r\n if self.localSDK.isNewVersion():\r\n newIar = kIarNew.KsdkIarNew(self.newProj)\r\n else:\r\n newIar = kIar.KsdkIar(self.newProj)\r\n newIar.gen_ewp(self.newProj)\r\n newIar.gen_eww(self.newProj)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.KeilMDK, self.newProj.device):\r\n #Generate MDK project files\r\n if self.localSDK.isNewVersion():\r\n newMdk = kMdkNew.KsdkMdkNew(self.newProj)\r\n else:\r\n newMdk = kMdk.KsdkMdk(self.newProj)\r\n newMdk.gen_proj(self.newProj)\r\n newMdk.gen_wkspace(self.newProj)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.KinetisDesignStudio, self.newProj.device):\r\n #Generate KDS project fiels\r\n print self.newProj.isLinked\r\n if self.localSDK.isNewVersion():\r\n newKds = kKdsNew.KsdkKdsNew(self.newProj)\r\n else:\r\n newKds = kKds.KsdkKds(self.newProj)\r\n\r\n newKds.gen_cproject(self.newProj)\r\n newKds.gen_project(self.newProj)\r\n newKds.gen_working_set(self.newProj)\r\n newKds.gen_debug(self.newProj)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.AtollicStudio, self.newProj.device):\r\n #Generate ATL project files\r\n if self.localSDK.isNewVersion():\r\n newAtl = kAtlNew.KsdkAtlNew(self.newProj)\r\n else:\r\n newAtl = kAtl.KsdkAtl(self.newProj)\r\n newAtl.gen_cproject(self.newProj)\r\n newAtl.gen_project(self.newProj)\r\n newAtl.gen_debug(self.newProj)\r\n newAtl.gen_settings(self.newProj)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.ARMgcc):\r\n #Generate GCC project files\r\n if not self.newProj.fast_build_GCC():\r\n tkMessageBox.showinfo(\"Missing CMake Files\",\\\r\n \"CMake files are missing from your KSDK installation.\")\r\n\r\n #Text for window\r\n genString = 'Your project was created in the following location:\\n'\r\n pathString = ''\r\n pathString += self.newProj.sdkPath + '/' + self.newProj.parent.getDirectoryStructureHelper().getUserLinkedExamplesPath( self.newProj.board[1]) + '/' + self.newProj.name + '/'\r\n genString += pathString\r\n genString += '\\nPress the button below to open project location folder.'\r\n\r\n #Create window to show USER that project has been generated and where it is.\r\n popGen = Toplevel()\r\n if self.newProj.osType == 'Windows':\r\n winH = 100 * WIN_SCALE\r\n winW = 600 * WIN_SCALE\r\n elif self.newProj.osType == 'Darwin':\r\n if platform.mac_ver()[0][:5] == '10.10':\r\n winH = 100\r\n winW = 600\r\n elif platform.mac_ver()[0][:5] == '10.11':\r\n winH = 100\r\n winW = 660\r\n else:\r\n winH = 100\r\n winW = 600\r\n popGen.config(height=winH, width=winW)\r\n popGen.grid()\r\n if self.newProj.osType == 'Linux':\r\n img = Image(\"photo\", data=kImg.boardImages['kds_icon.gif']) # Use the .gif in Linux\r\n popGen.tk.call('wm', 'iconphoto', popGen._w, img)\r\n popGen.title(\"Project created\")\r\n popGen.geometry('%dx%d+%d+%d' % (winW, winH, master.winfo_x() + 20, master.winfo_y() + 20))\r\n popGen.resizable(width=FALSE, height=FALSE)\r\n popGen.configure(background='#E7E7E7')\r\n\r\n genTxt = Label(popGen, text=genString, justify=LEFT)\r\n genTxt.grid(row=0, column=0, columnspan=2, padx=5, pady=5)\r\n\r\n #Create button to open project folder\r\n ## IF we are in windows, we need to replace all '/' with '\\\\'\r\n tempString = pathString[:]\r\n if self.newProj.osType == 'Windows':\r\n pathString = ''\r\n pathString = kT.string_replace(tempString, '/', '\\\\')\r\n\r\n genButton = Button(popGen, text='Open Project Folder', command=lambda: self.view_project(pathString, popGen))\r\n genButton.grid(row=2, column=0, sticky=W, padx=5, pady=5)\r\n\r\n self.widgetList[10].step(35)\r\n self.widgetList[10].update_idletasks()\r\n\r\n # patch to implement automation test\r\n self.pop_gen = popGen\r\n\r\n return",
"def test_add_project(self):\n pass",
"def add_project(self, name, branches):\n prj_e = self._doc.createElement('project')\n prj_e.setAttribute('name', name)\n for branch in branches:\n br_e = self._doc.createElement('branch')\n for key, val in branch.iteritems():\n br_e.setAttribute(key, val)\n prj_e.appendChild(br_e)\n self._doc.firstChild.appendChild(prj_e)",
"def _on_click_browse_to_pt_project(self):\n pass",
"def project(self, value):\n\n if self._project != value:\n self._project = value\n self._update_page()",
"def set_up_as_target(cls, projects):\n source_site = cls.make_site(\n name='Test Source',\n url='http://0.0.0.0',\n mode=SITE_MODE_SOURCE,\n description='',\n secret=build_secret(),\n )\n remote_projects = []\n for project in projects:\n remote_projects.append(\n cls.make_remote_project(\n project_uuid=project.sodar_uuid,\n project=project,\n site=source_site,\n level=SODAR_CONSTANTS['REMOTE_LEVEL_READ_ROLES'],\n )\n )\n return source_site, remote_projects",
"def add_target(self, target):\r\n def add_targets(tgt):\r\n self._targets.update(tgt for tgt in tgt.resolve() if isinstance(tgt, self._target_base))\r\n target.walk(add_targets)",
"def project(self, v):\n return v"
] | [
"0.5410881",
"0.5407288",
"0.52270865",
"0.52270865",
"0.52270865",
"0.5210586",
"0.5194947",
"0.50674295",
"0.505153",
"0.50186455",
"0.49581373",
"0.48688206",
"0.48253044",
"0.48119506",
"0.4789279",
"0.47891718",
"0.47839138",
"0.4761073",
"0.4760999",
"0.4760814",
"0.47577775",
"0.4757296",
"0.47382718",
"0.47309548",
"0.47081518",
"0.4700594",
"0.4695618",
"0.468696",
"0.46748653",
"0.4665205"
] | 0.6467684 | 0 |
Get the vectors for a given molecule | def get_vectors(mols):
vect_types = VectTypes()
for mol in mols:
if "." in mol.smiles:
logger.debug("SKIPPING - FRAGMENT: %s", mol.smiles)
continue
vectors = get_3d_vects_for_mol(mol.sdf_info)
for vect_type in vectors:
vect_choice = vect_types.translate_vect_types(vect_type)
for vector in vectors[vect_type]:
spl_vect = vector.split("__")
smiles = spl_vect[0]
if len(spl_vect) > 1:
vect_ind = int(spl_vect[1])
else:
vect_ind = 0
new_vect = Vector.objects.get_or_create(
smiles=smiles, cmpd_id=mol.cmpd_id, type=vect_choice
)[0]
create_vect_3d(mol, new_vect, vect_ind, vectors[vect_type][vector]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_vectors(self):\n return self.vecs[:]",
"def vector(molec, dihed, nonH, energy):\n #Torison\n if dihed:\n pass\n #XYZ\n else:\n coords = ()\n if nonH:\n for atom in molec.atoms:\n coords += atom.coords\n else:\n for atom in molec.atoms:\n if atom.atomicnum > 1:\n coords += atom.coords\n #Energy\n if energy:\n coords += (molec.energy/10.0,)\n return coords",
"def getVectors(self,graph):\n return [Vector.createFromTwoTuples(graph[i],graph[i+1]) for i in range(len(graph)-1)]",
"def get_vectors(dim, R2):\n\n #collecting base vectors\n base_vecs = []\n numbers = get_set(dim, R2)\n while len(numbers) >= dim:\n vec = get_base_vector(dim, R2, deepcopy(numbers))\n if vec is not False:\n base_vecs += [np.sqrt(vec)]\n numbers.remove(max(numbers))\n #permuting base vectors\n uvecs = []\n for vec in base_vecs:\n for per_vec in permutations(vec):\n uvecs += [per_vec]\n uvecs = list(set(uvecs))\n\n #adding all possible sign options\n vecs = []\n for vec in uvecs:\n for sign in sign_possibilities(dim):\n vecs += [tuple([int(a*b) for a, b in zip(sign, vec)])]\n vecs = list(set(vecs))\n return vecs",
"def vec(self):\r\n\r\n xv = np.arange(self.dx / 2, self.lx, self.dx)\r\n yv = np.arange(-self.ly / 2 + self.dy / 2, self.ly / 2, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz, self.dz)\r\n\r\n if self.ox != 0:\r\n xv = np.arange(self.ox, self.lx + self.ox, self.dx)\r\n yv = np.arange(self.oy, self.ly + self.oy, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz, self.dz)\r\n\r\n return xv, yv, zv",
"def getVectors(self, graph):\n return [Vector.createFromTwoTuples(graph[i], graph[i + 1]) for i in range(len(graph) - 1)]",
"def vector(self):\n \n v_list = Householder.triangle_operation(self)[1]\n \n return(v_list)",
"def boxVectors(self):\n return self.box_vectors",
"def getVectors(self):\n l = len(self.points)\n return [Vector.createFromTwoPoints(self.points[i % l], self.points[(i + 1) % l], \\\n color=self.side_color, width=self.side_width) for i in range(l)]",
"def get_vectors(nodes, mode=\"xform\"):\n for each in nodes:\n position = (0, 0, 0)\n\n if mode == \"xform\":\n position = cmds.xform(\n each,\n query=True,\n translation=True,\n worldSpace=True,\n )\n\n elif mode == \"pivot\":\n position = cmds.xform(\n each,\n query=True,\n translation=True,\n rotatePivot=True,\n worldSpace=True,\n )\n\n # when using xform on component like faces or edge, the returned value\n # will be a list of each vertices position, so we need to average that\n if len(position) > 3:\n vectors = [\n MVector(position[i : i + 3])\n for i in range(0, len(position), 3)\n ]\n result = MVector()\n for vector in vectors:\n result += vector\n position = result / len(vectors)\n\n yield MVector(position)",
"def get_mvector_as_list(input_mvector):\n out_list = [input_mvector.x, input_mvector.y, input_mvector.z]\n\n return out_list",
"def get_vectors(model, corpus_size, vectors_size, vectors_type):\r\n vectors = np.zeros((corpus_size, vectors_size))\r\n for i in range(0, corpus_size):\r\n prefix = vectors_type + '_' + str(i)\r\n vectors[i] = model.docvecs[prefix]\r\n return vectors",
"def get_vectors(self, corpus_size, vectors_size, vectors_type):\n vectors = np.zeros((corpus_size, vectors_size))\n for i in range(0, corpus_size):\n prefix = vectors_type + '_' + str(i)\n vectors[i] = self.model_dbow.docvecs[prefix]\n return vectors",
"def get_vectors(model, corpus_size, vectors_size, vectors_type):\n vectors = np.zeros((corpus_size, vectors_size))\n for i in range(0, corpus_size):\n prefix = vectors_type + '_' + str(i)\n vectors[i] = model.docvecs[prefix]\n return vectors",
"def cxvec(self):\n return np.array([self.cx, self.cy])",
"def normal_vectors(ATOM_list, frame, number_of_atoms, number_of_molecules, number_of_vectors, centerlist, referencelist, boxX, boxY, boxZ):\t\n\tNORMAL_VECTORS_out=open(\"normalvectors.xyz\",'a')\n\t\n\tNORMAL_VECTORS_check=open(\"check_normalvectors.xyz\",'a')\n\t\n\tANGLEDIST_VECTORS_out=open(\"angle_dist_vectors.xyz\",'a')\n\t\n\t\n\tVorzugsrichtung_out=open(\"sum_over_all_normalvectors.dat\",'a')\n\t\n\t\n\t\n\tNORMAL_VECTORS_check.write(str( (number_of_vectors*(2 + int(len(referencelist))) )*number_of_molecules )+'\\n')\n\tNORMAL_VECTORS_check.write(str(frame)+'\\n')\n\t\n\tNORMAL_VECTORS_out.write(str( (number_of_vectors*2)*number_of_molecules )+'\\n')\n\tNORMAL_VECTORS_out.write(str(frame)+'\\n')\n\n\tANGLEDIST_VECTORS_out.write(str( (number_of_vectors*2)*number_of_molecules )+'\\n')\n\tANGLEDIST_VECTORS_out.write(str(frame)+'\\n')\n\t\n\tvorzugsvektor = np.asarray([0,0,0])\n\treference=0\n\t\n\t\n\tfor l in range(0,number_of_molecules):\n\t\tfor vec_count in range(0,number_of_vectors):\n\t\t\tr = []\n\t\t\t#get the 'middle' of the DPI molecule\n\t\t\t\n\t\t\n\t\t\t#get geometric center\n\t\t\tcenter=np.asarray([0,0,0])\n\t\t\t#append all vectors defining the center\n\t\t\tfor i in range(len(centerlist[vec_count])):\n\t\t\t\t# x y z\n\t\t\t\tr.append( np.asarray( [ ATOM_list[centerlist[vec_count][i] + reference][1],ATOM_list[centerlist[vec_count][i] + reference][2],ATOM_list[centerlist[vec_count][i] + reference][3] ] ) ) \n\t\t\t\tcenter = center + r[i]\n\t\t\tcenter = center / len(centerlist[vec_count])\n\t\t\n\t\t\t#calculate all reference vectors\n\t\t\tr=[]\n\t\t\tfor i in range(len(referencelist[vec_count])):\n\t\t\t\tr.append(np.asarray( [ ATOM_list[referencelist[vec_count][i] + reference][1],ATOM_list[referencelist[vec_count][i] + reference][2],ATOM_list[referencelist[vec_count][i] + reference][3] ] ) )\n\t\t\n\t\t\t#get the cross products\n\t\t\tnormal_vec= np.asarray([0,0,0])\n\t\t\tfor i in range(len(r)):\n\t\t\t\tif (i<(len(r)-1)):\n\t\t\t\t\t\n\t\t\t\t\t#minimum image convenction\n\t\t\t\t\tif ( abs(r[i][0]-center[0]) < abs(r[i][0]-center[0]+boxX) ) and ( abs(r[i][0]-center[0]) < abs(r[i][0]-center[0]-boxX) ):\n\t\t\t\t\t\tdx = r[i][0]-center[0]\n\t\t\t\t\telif (abs(r[i][0]-center[0]+boxX) < abs(r[i][0]-center[0]-boxX) ):\n\t\t\t\t\t\tdx = r[i][0]-center[0]+boxX\n\t\t\t\t\telse:\n\t\t\t\t\t\tdx = r[i][0]-center[0]-boxX\n\t\t\t\t\t\n\t\t\t\t\tif ( abs(r[i][1]-center[1]) < abs(r[i][1]-center[1]+boxY) ) and ( abs(r[i][1]-center[1]) < abs(r[i][1]-center[1]-boxY) ):\n\t\t\t\t\t\tdy = r[i][1]-center[1]\n\t\t\t\t\telif (abs(r[i][1]-center[1]+boxY) < abs(r[i][1]-center[1]-boxY) ):\n\t\t\t\t\t\tdy = r[i][1]-center[1]+boxY\n\t\t\t\t\telse:\n\t\t\t\t\t\tdy = r[i][1]-center[1]-boxY\n\t\t\t\t\t\t\n\t\t\t\t\tif ( abs(r[i][2]-center[2]) < abs(r[i][2]-center[2]+boxZ) ) and ( abs(r[i][2]-center[2]) < abs(r[i][2]-center[2]-boxZ) ):\n\t\t\t\t\t\tdz = r[i][2]-center[2]\n\t\t\t\t\telif (abs(r[i][2]-center[2]+boxZ) < abs(r[i][2]-center[2]-boxZ) ):\n\t\t\t\t\t\tdz = r[i][2]-center[2]+boxZ\n\t\t\t\t\telse:\n\t\t\t\t\t\tdz = r[i][2]-center[2]-boxZ\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t#minimum image convenction\n\t\t\t\t\tif ( abs(r[i+1][0]-center[0]) < abs(r[i+1][0]-center[0]+boxX) ) and ( abs(r[i+1][0]-center[0]) < abs(r[i+1][0]-center[0]-boxX) ):\n\t\t\t\t\t\tdx2 = r[i+1][0]-center[0]\n\t\t\t\t\telif (abs(r[i+1][0]-center[0]+boxX) < abs(r[i+1][0]-center[0]-boxX) ):\n\t\t\t\t\t\tdx2 = r[i+1][0]-center[0]+boxX\n\t\t\t\t\telse:\n\t\t\t\t\t\tdx2 = r[i+1][0]-center[0]-boxX\n\t\t\t\t\t\n\t\t\t\t\tif ( abs(r[i+1][1]-center[1]) < abs(r[i+1][1]-center[1]+boxY) ) and ( abs(r[i+1][1]-center[1]) < abs(r[i+1][1]-center[1]-boxY) ):\n\t\t\t\t\t\tdy2 = r[i+1][1]-center[1]\n\t\t\t\t\telif (abs(r[i+1][1]-center[1]+boxY) < abs(r[i+1][1]-center[1]-boxY) ):\n\t\t\t\t\t\tdy2 = r[i+1][1]-center[1]+boxY\n\t\t\t\t\telse:\n\t\t\t\t\t\tdy2 = r[i+1][1]-center[1]-boxY\n\t\t\t\t\t\t\n\t\t\t\t\tif ( abs(r[i+1][2]-center[2]) < abs(r[i+1][2]-center[2]+boxZ) ) and ( abs(r[i+1][2]-center[2]) < abs(r[i+1][2]-center[2]-boxZ) ):\n\t\t\t\t\t\tdz2 = r[i+1][2]-center[2]\n\t\t\t\t\telif (abs(r[i+1][2]-center[2]+boxZ) < abs(r[i+1][2]-center[2]-boxZ) ):\n\t\t\t\t\t\tdz2 = r[i+1][2]-center[2]+boxZ\n\t\t\t\t\telse:\n\t\t\t\t\t\tdz2 = r[i+1][2]-center[2]-boxZ\n\t\t\t\t\t\n\t\t\t\t\tvec1=np.asarray([dx,dy,dz])\n\t\t\t\t\tvec2=np.asarray([dx2,dy2,dz2])\n\t\t\t\t\t#dy = r[i][1]-center[1]\n\t\t\t\t\t#dz = r[i][2]-center[2]\n\t\t\t\t\t#normal_vec = normal_vec + np.cross(r[i]-center, r[i+1]-center)\n\t\t\t\t\tnormal_vec = normal_vec + np.cross(vec1, vec2)/np.linalg.norm( np.cross(vec1, vec2) )\n\t\t\t\telif ( i==len(r) ):\n\t\t\t\t\t\n\t\t\t\t\t#minimum image convenction\n\t\t\t\t\tdx = min( abs(r[i][0]-center[0]), abs(r[i][0]-center[0]+boxX), abs(r[i][0]-center[0]-boxX) )\n\t\t\t\t\tdy = min( abs(r[i][1]-center[1]), abs(r[i][1]-center[1]+boxX), abs(r[i][1]-center[1]-boxX) )\n\t\t\t\t\tdz = min( abs(r[i][2]-center[2]), abs(r[i][2]-center[2]+boxX), abs(r[i][2]-center[2]-boxX) )\n\t\t\t\t\t\n\t\t\t\t\tdx2 = min( abs(r[0][0]-center[0]), abs(r[0][0]-center[0]+boxX), abs(r[0][0]-center[0]-boxX) )\n\t\t\t\t\tdy2 = min( abs(r[0][1]-center[1]), abs(r[0][1]-center[1]+boxX), abs(r[0][1]-center[1]-boxX) )\n\t\t\t\t\tdz2 = min( abs(r[0][2]-center[2]), abs(r[0][2]-center[2]+boxX), abs(r[0][2]-center[2]-boxX) )\n\t\t\t\t\t\n\t\t\t\t\tvec1=np.asarray([dx,dy,dz])\n\t\t\t\t\tvec2=np.asarray([dx2,dy2,dz2])\n\t\t\t\t\t#dy = r[i][1]-center[1]\n\t\t\t\t\t#dz = r[i][2]-center[2]\n\t\t\t\t\t#normal_vec = normal_vec + np.cross(r[i]-center, r[i+1]-center)\n\t\t\t\t\tnormal_vec = normal_vec + np.cross(vec1, vec2)/np.linalg.norm(np.cross(vec1, vec2))\n\t\t\t\t\t#normal_vec = normal_vec + np.cross(r[i]-center, r[0]-center) \n\t\t\n\t\t\t#normalize\n\t\t\tnormal_vec = normal_vec/np.linalg.norm(normal_vec)\n\t\t\n\t\t\t#\t#direction vectors\n\t\t\t#\tdir_vec1 = np.asarray([r[6][0],r[6][1],r[6][2]])\n\t\t\t#\tdir_vec2 = np.asarray([r[7][0],r[7][1],r[7][2]])\n\t\t\t#\tdir_vec3 = np.asarray([r[8][0],r[8][1],r[8][2]])\n\t\t\t#\tdir_vec4 = np.asarray([r[9][0],r[9][1],r[9][2]])\n\t\t\t#\tdir_vec = (dir_vec1 + dir_vec2 + dir_vec3 + dir_vec4)/4.0\n\t\t\t#\tdir_vec = mittelpunkt_dpi - dir_vec\t\t\t\n\n\t\t\t#change direction according to bending\n\t\t\t#\tif normal_vec[0]!=0 and dir_vec[0]!=0:\n\t\t\t#\t\tif normal_vec[0]/abs(normal_vec[0])!=dir_vec[0]/abs(dir_vec[0]):\n\t\t\t#\t\t\tnormal_vec[0] = -normal_vec[0]\n\t\t\t#\tif normal_vec[1]!=0 and dir_vec[1]!=0:\n\t\t#\t\t\tif normal_vec[1]/abs(normal_vec[1])!=dir_vec[1]/abs(dir_vec[1]):\n\t\t#\t\t\t\tnormal_vec[1] = -normal_vec[1]\n\t\t#\t\tif normal_vec[2]!=0 and dir_vec[2]!=0:\n\t\t#\t\t\tif normal_vec[2]/abs(normal_vec[2])!=dir_vec[2]/abs(dir_vec[2]):\n\t\t#\t\t\t\tnormal_vec[2] = -normal_vec[2]\n\n\t\t\t\n\t\t\t#check for a vorzugsvektor\n\t\t\tvorzugsvektor = vorzugsvektor + normal_vec\n\t\t\tNORMAL_VECTORS_out.write('C' +'\t'+ str((center)).replace('[', '').replace(']','')+ '\\n')\n\t\t\tNORMAL_VECTORS_out.write('O' +'\t'+ str((center+normal_vec)).replace('[', '').replace(']','')+ '\\n')\n\t\t\tANGLEDIST_VECTORS_out.write('C' +'\t'+ str((center)).replace('[', '').replace(']','')+ '\\n')\n\t\t\tANGLEDIST_VECTORS_out.write('H' +'\t'+ str((normal_vec)).replace('[', '').replace(']','')+ '\\n')\n\t\t\tNORMAL_VECTORS_check.write('C' +'\t'+ str((center)).replace('[', '').replace(']','')+ '\\n')\n\t\t\tNORMAL_VECTORS_check.write('O' +'\t'+ str((center+normal_vec)).replace('[', '').replace(']','')+ '\\n')\n\t\t\tNORMAL_VECTORS_check.write('H' +'\t'+ str((normal_vec)).replace('[', '').replace(']','')+ '\\n')\n\t\t\tfor i in range(len(referencelist[vec_count])):\n\t\t\t\tNORMAL_VECTORS_check.write('N' +'\t'+ str(ATOM_list[referencelist[vec_count][i] + reference][1])+'\t'+ str(ATOM_list[referencelist[vec_count][i] + reference][2])+'\t'+ str(ATOM_list[referencelist[vec_count][i] + reference][3])+ '\\n')\n\n\t\t#always skip 1 entire molecule forward\n\t\treference=reference + number_of_atoms/number_of_molecules\n\t\t\n\t#calculate a vorzugsvektor as sum over DIPBI normal vectors divided by number of DPBI vectors\n\tvorzugsvektor = vorzugsvektor/(number_of_vectors*frames*number_of_molecules)\n\tVorzugsrichtung_out.write(str(frame) + ' ' + str((vorzugsvektor)).replace('[', '').replace(']','')+'\\n')\n\tNORMAL_VECTORS_out.close()\n\tNORMAL_VECTORS_check.close()\n\treturn",
"def vector(self) -> np.ndarray:\n link_vectors = [link.vector for link in self.links]\n v = np.array(link_vectors).ravel()\n return v",
"def unit_vectors(self):\n # return {'comp1': CartesianRepresentation(...),\n # 'comp2': CartesianRepresentation(...),\n # 'comp3': CartesianRepresentation(...)}\n raise Exception(\"Not yet implemented\")",
"def call_single_vec(self, input_value):\n _, eigVectors = self.getEigen(input_value)\n return eigVectors[:,:,-1]",
"def vector(self):\n return self.__vector",
"def Cvec(self):\n return vec(self.xc, self.yc)",
"def get_periodic_box_vectors(self):\n return self._periodic_box_vectors",
"def get_volumes(self, dim):\n cdef np.ndarray[float64, mode='c', ndim=1] out\n\n if dim == 0:\n raise ValueError('vertices have no volume!')\n\n else:\n out = np.empty((self.mesh.topology.num[dim],),\n dtype=np.float64)\n mesh_get_volumes(self.mesh, &out[0], dim)\n\n return out",
"def get_box_vectors(file):\n box_vectors = [None,None,None]\n with open(file,\"rt\") as fin:\n for line in fin:\n if line[0:6] == \"CRYST1\":\n x_length = float(line[9:14])\n y_length = float(line[18:23])\n z_length = float(line[27:33])\n box_vectors = [x_length,y_length,z_length]\n return(box_vectors)\n return(box_vectors)",
"def vec(self):\n return np.matrix(self.val.ravel()).transpose()",
"def create_vectors(self):\n self.localStatistics = []\n self.lastStatistics = []\n self.globalV = []\n self.estimate = []\n self.delta = []\n self.drift = []\n self.slack = [] # only for coordBased model",
"def vec(self) -> Vec:\n return Vec(self.x, self.y, self.z)",
"def V_vect(self, distances):\n distances_norm2 = norm2(distances)\n distances_norm = np.sqrt(distances_norm2)\n isColliding = self.isColliding(distances_norm)\n\n # Collision term proportional to d**2 (cutoff)\n v_colliding = -distances_norm2/self.d_coll**2 + 1.5+0.5 * \\\n (self.d_attr/self.d_coll)**(2*self.n) - (self.d_attr/self.d_coll)**self.n\n v_colliding *= isColliding\n\n # Interaction potential: d - ln d\n v_interact = 0.5*self.d_attr**(2*self.n)/(np.identity(np.shape(distances_norm2)[1])[None, :, :]+distances_norm2)**self.n - self.d_attr**self.n/(\n np.identity(np.shape(distances_norm2)[1])[None, :, :]+distances_norm2)**(self.n/2) + 0.5\n v_interact *= (1 - isColliding)\n\n v = v_colliding + v_interact\n\n # A particle does not interact with itself\n for i in range(len(v)):\n np.fill_diagonal(v[i], 0)\n return v",
"def get_vector(self, name: str) -> ndarray:\n vector_path = self._get_path(name)\n vector_df = DataFrame(read_csv(vector_path, header=None))\n ixs = list(map(lambda i: i - 1, vector_df.iloc[:, 0]))\n vals = vector_df.iloc[:, 1]\n return util.create_array(ixs, vals, self.number_of_nodes)",
"def getVector(text):\n url = cfg.use_vectoriser\n res = requests.post(url, json={'text': text, 'access_key': cfg.vectoriser_access_key})\n res_dictionary = res.json()\n return res_dictionary['vectors']"
] | [
"0.7083323",
"0.7006027",
"0.6598478",
"0.6481406",
"0.6439225",
"0.6435624",
"0.6397429",
"0.6388611",
"0.6374452",
"0.6355553",
"0.63145626",
"0.6299127",
"0.6278399",
"0.6161161",
"0.61597615",
"0.6104503",
"0.60972047",
"0.6094927",
"0.60678005",
"0.60490125",
"0.60312015",
"0.5982828",
"0.5979147",
"0.5953703",
"0.593126",
"0.5923228",
"0.59221244",
"0.5919157",
"0.59121275",
"0.58824605"
] | 0.7255804 | 0 |
search for a molgroup by list of coordinates | def search_for_molgroup_by_coords(coords, target):
x = coords[0]
y = coords[1]
z = coords[2]
limit_list = []
for coord in x, y, z:
lower, upper = get_coord_limits(coord)
limit_list.append([lower, upper])
search = MolGroup.objects.filter(target_id__title=target, x_com__gte=limit_list[0][0], x_com__lte=limit_list[0][1],
y_com__gte=limit_list[1][0], y_com__lte=limit_list[1][1],
z_com__gte=limit_list[2][0],
z_com__lte=limit_list[2][1])
if len(search) == 1:
mol_group = search[0]
else:
return None
return mol_group | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_groups_from_ctypes(self, mesh, ctypes):\n raise NotImplementedError",
"def find_exact_match(row, groups, match_cols):\n index = tuple(row[t] for t in match_cols)\n try:\n group = groups.get_group(index)\n except KeyError:\n return []\n clus = list(set(group.hotel_cluster))\n return clus",
"def get_valid_locations(location_list, grid, shape):",
"def find_coordinates(hmms, bit_thresh):\n # get coordinates from cmsearch output\n seq2hmm = parse_hmm(hmms, bit_thresh)\n seq2hmm = best_model(seq2hmm)\n group2hmm = {} # group2hmm[seq][group] = [model, strand, coordinates, matches, gaps]\n for seq, info in list(seq2hmm.items()):\n group2hmm[seq] = {}\n # info = [model, [[hit1], [hit2], ...]]\n for group_num, group in enumerate(hit_groups(info[1])):\n # group is a group of hits to a single 16S gene\n # determine matching strand based on best hit\n best = sorted(group, reverse = True, key = itemgetter(-1))[0]\n strand = best[5]\n coordinates = [i[0] for i in group] + [i[1] for i in group]\n coordinates = [min(coordinates), max(coordinates), strand]\n # make sure all hits are to the same strand\n matches = [i for i in group if i[5] == strand]\n # gaps = [[gstart, gend], [gstart2, gend2]]\n gaps = check_gaps(matches)\n group2hmm[seq][group_num] = [info[0], strand, coordinates, matches, gaps]\n return group2hmm",
"def FindPointsWithinRadius(self, p_float, , vtkIdList):\n ...",
"def _get_group(self, x, y, traversed):\n loc = self[x, y]\n\n # Get surrounding locations which have the same color and whose\n # coordinates have not already been traversed\n locations = [\n (p, (a, b))\n for p, (a, b) in self._get_surrounding(x, y)\n if p is loc and (a, b) not in traversed\n ]\n\n # Add current coordinates to traversed coordinates\n traversed.add((x, y))\n\n # Find coordinates of similar neighbors\n if locations:\n return traversed.union(*[\n self._get_group(a, b, traversed)\n for _, (a, b) in locations\n ])\n else:\n return traversed",
"def findNearset(x,y,lon,lat):\n dist = np.sqrt( (lon - x)**2 + (lat - y)**2)\n\n return np.argwhere(dist==dist.min())[0][0]",
"def cluster(self):\n\n result_nominatim = self.nominatim()\n try:\n coord = [(float( i['lat'] ), float( i['lon'] )) for i in result_nominatim]\n except:\n return None\n #print( \"coord\", coord )\n kms_per_radian = 6371.0088\n # Augmenter cette valeur augmente le nombre d'éléments dans un cluster et change les résultats\n epsilon = 2 / kms_per_radian\n # Adapter le nombre de clusters (min_sample) au nombre d'entités dans array ?\n db = DBSCAN( eps=epsilon, min_samples=1, algorithm='ball_tree',\n metric='haversine' ).fit( np.radians( coord ) )\n cluster_labels = db.labels_\n #print( \"cluster\", cluster_labels )\n num_clusters = len( set( cluster_labels ) )\n #print( \"num clusters\", num_clusters )\n counts = np.bincount( cluster_labels )\n #print( \"count\", counts )\n maxi = np.argmax( counts )\n #print( \"maxi\", maxi )\n itemindex = np.where( cluster_labels == maxi )[0]\n #print( \"itemindex\", itemindex )\n\n lat: List[float] = [float( result_nominatim[index]['lat'] ) for index in itemindex]\n lon: List[float] = [float( result_nominatim[index]['lon'] ) for index in itemindex]\n\n # on récupère la moyenne des coordonnées du plus gros cluster. Cette moyenne équivaut au centroide :\n # https://gis.stackexchange.com/questions/12120/calculate-midpoint-from-a-series-of-latitude-and-longitude-coordinates\n\n average = {\"lat\": sum( lat ) / len( lat ), \"lon\": sum( lon ) / len( lon )}\n\n #print( list( zip( cluster_labels, [x['display_name'] for x in results] ) ) )\n #print( \"plus proche de moyenne\", closest( results, average ) )\n return closest( result_nominatim, average )",
"def select_point_from_group(group, pts):\n\n p_in_group = []\n\n for i in range(len(group)):\n p_in_group.append(pts[group[i]])\n \n return p_in_group",
"def select_point_from_group(group, pts):\n\n p_in_group = []\n\n for i in range(len(group)):\n p_in_group.append(pts[group[i]])\n \n return p_in_group",
"def find_mgrs_intersection_100km_single(footprint, gzd):\n\n polygon_geom = ogr.CreateGeometryFromWkt(footprint)\n\n file_path = unzip_mgrs_shapefile(gzd)\n\n # 2. Load the shp file and run intersection check on each feature\n shapefile_driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n\n grid_ds = shapefile_driver.Open(str(file_path), 0)\n\n layer = grid_ds.GetLayer()\n\n # transform coords from local UTM proj to lat long\n sourceSR = layer.GetSpatialRef()\n targetSR = osr.SpatialReference()\n targetSR.ImportFromEPSG(4326) # WGS84\n coordTrans = osr.CoordinateTransformation(sourceSR, targetSR)\n\n intersect_list = []\n\n for f in layer:\n geom = f.GetGeometryRef()\n geom.Transform(coordTrans)\n\n intersect_result = geom.Intersection(polygon_geom)\n\n if not intersect_result.IsEmpty():\n print(\"FOUND INTERSECT\")\n print(f.GetField('100kmSQ_ID'))\n intersect_list.append(f'{gzd}{f.GetField(\"100kmSQ_ID\")}')\n\n # all done!\n grid_ds = None\n\n clean_data_dir()\n\n return intersect_list",
"def cluster_mols(rd_mols, mols, target):\n id_mols = [x.pk for x in mols]\n out_data = run_lig_cluster(rd_mols, id_mols)\n for clust_type in out_data:\n for cluster in out_data[clust_type]:\n # look for molgroup with same coords - need to implement tolerance?\n mol_group = search_for_molgroup_by_coords(coords=[out_data[clust_type][cluster][\"centre_of_mass\"][0],\n out_data[clust_type][cluster][\"centre_of_mass\"][1],\n out_data[clust_type][cluster][\"centre_of_mass\"][2]],\n target=target.title)\n if not mol_group:\n mol_group = MolGroup()\n if clust_type != \"c_of_m\":\n mol_group.group_type = \"PC\"\n else:\n mol_group.group_type = \"MC\"\n mol_group.target_id = target\n mol_group.x_com = out_data[clust_type][cluster][\"centre_of_mass\"][0]\n mol_group.y_com = out_data[clust_type][cluster][\"centre_of_mass\"][1]\n mol_group.z_com = out_data[clust_type][cluster][\"centre_of_mass\"][2]\n mol_group.description = clust_type\n mol_group.save()\n for mol_id in out_data[clust_type][cluster][\"mol_ids\"]:\n if mol_id not in [a['id'] for a in mol_group.mol_id.values()]:\n this_mol = Molecule.objects.get(id=mol_id)\n mol_group.mol_id.add(this_mol)",
"def FindDistributedPoints(self, p_int, , vtkIdList, p_int_1):\n ...",
"def hassimilarcluster(ind, clusters):\n item = op.itemgetter\n global opt\n found = False\n tx = min(clusters[ind],key=item(0))[0]\n ty = min(clusters[ind],key=item(1))[1]\n for i, cl in enumerate(clusters):\n if i != ind:\n cx = min(cl,key=item(0))[0]\n cy = min(cl,key=item(1))[1]\n dx, dy = cx - tx, cy - ty\n specdist = Hausdorff_distance(clusters[ind],cl,None,(dx,dy))\n if specdist <= int(opt.rgsim):\n found = True\n break\n return found",
"def __make_group_by_atom(self, group_name, name_list):\r\n pass",
"def findSubsetIndices(grdMODEL, min_lat, max_lat, min_lon, max_lon):\n\n\n if min_lon<0 and max_lon>0:\n splitExtract = True; Turns=2\n grdMODEL.splitExtract=splitExtract\n else:\n splitExtract = False; Turns=1\n grdMODEL.splitExtract=splitExtract\n grdMODEL.lon = np.where(grdMODEL.lon>180,grdMODEL.lon-360,grdMODEL.lon)\n \n # Array to store the results returned from the function\n res=np.zeros((Turns,4),dtype=np.float64)\n \n lats=grdMODEL.lat[:,0]\n lons=grdMODEL.lon[0,:]\n\n \n for k in range(Turns):\n\n if k==0 and splitExtract == True:\n minLon=min_lon; maxLon=0\n minLon=minLon+360\n maxLon=maxLon+360\n elif k==1 and splitExtract == True:\n minLon=0; maxLon=max_lon\n else:\n minLon=min_lon; maxLon=max_lon\n \n distances1 = []\n distances2 = []\n indices=[]\n index=1\n for point in lats:\n s1 = max_lat-point # (vector subtract)\n s2 = min_lat-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n \n distances1 = []\n distances2 = []\n index=1\n \n for point in lons:\n s1 = maxLon-point # (vector subtract)\n s2 = minLon-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n \n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n minJ=indices[1][2]\n maxJ=indices[0][2]\n minI=indices[3][2]\n maxI=indices[2][2]\n \n res[k,0]=minI; res[k,1]=maxI; res[k,2]=minJ; res[k,3]=maxJ;\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n grdMODEL.indices=res",
"def xy_to_group(xy, board):\n group = {xy}\n inspected = set([])\n to_inspect = group - inspected\n while to_inspect:\n for stone in to_inspect:\n inspected.add(stone)\n group |= xy_adjacents(stone, board, filter_by=\"friend\")\n to_inspect = group - inspected\n return group",
"def find_centroid_for_each(self):",
"def find_mgrs_intersection_large(footprint):\n\n polygon_geom = ogr.CreateGeometryFromWkt(footprint)\n\n mgrs_grid_file_dir = Path(DATA_DIR, 'MGRS_100kmSQ_ID')\n\n mgrs_master_shp_file = Path(mgrs_grid_file_dir, 'mgrs_gzd_final.shp')\n\n shapefile_driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n\n print(mgrs_master_shp_file)\n\n grid_ds = shapefile_driver.Open(str(mgrs_master_shp_file), 0)\n\n layer = grid_ds.GetLayer()\n\n # sourceSR = layer.GetSpatialRef()\n # targetSR = osr.SpatialReference()\n # targetSR.ImportFromEPSG(4326) # WGS84\n # coordTrans = osr.CoordinateTransformation(sourceSR, targetSR)\n\n feature_count = layer.GetFeatureCount()\n print(f\"Number of features in {os.path.basename(mgrs_master_shp_file)}: {feature_count}\")\n layerDefinition = layer.GetLayerDefn()\n\n for i in range(layerDefinition.GetFieldCount()):\n print(layerDefinition.GetFieldDefn(i).GetName())\n\n intersect_list = []\n\n for f in layer:\n # print feature.GetField(\"STATE_NAME\")\n # geom = feature.GetGeometryRef()\n # print(geom.Centroid().ExportToWkt())\n # geom_list.append(geom)\n # print(geom)\n\n geom = f.GetGeometryRef()\n # print(geom)\n # print(polygon_geom)\n intersect_result = geom.Intersection(polygon_geom)\n # print(intersect_result)\n if not intersect_result.IsEmpty():\n print(\"FOUND INTERSECT\")\n print(f.GetField('gzd'))\n intersect_list.append(f.GetField('gzd'))\n\n # # iterate over the geometries and dissolve all into one\n # layer = dataSource.GetLayer()\n # layerDefinition = layer.GetLayerDefn()\n\n # for i in range(layerDefinition.GetFieldCount()):\n # print(layerDefinition.GetFieldDefn(i).GetName())\n\n # Collect all Geometry\n # geomcol = ogr.Geometry(ogr.wkbGeometryCollection)\n # Create the feature and set values\n # featureDefn = outLayer.GetLayerDefn()\n # feature = ogr.Feature(featureDefn)\n\n # multipoly = ogr.Geometry(ogr.wkbMultiPolygon)\n # for feature in inLayer:\n # geomcol.AddGeometry(feature.GetGeometryRef())\n\n # geom_list = []\n\n\n # geomcol.AddGeometry(geom)\n\n # if problem_child:\n # # print(file_stem_split)\n # # print(geom)\n # print(geom.IsValid())\n # # print(idx)\n # pass\n # if file_stem_split in ['59Q']:\n # print(geom)\n # print(geom.GetGeometryName())\n\n\n\n\n # spatial_ref = osr.SpatialReference()\n # spatial_ref.ImportFromEPSG(4326)\n\n # outLayer = outDataSource.CreateLayer(\"gzd_8x6_degree\", spatial_ref, geom_type=ogr.wkbMultiPolygon)\n\n # # Add an ID field\n # idField = ogr.FieldDefn(\"id\", ogr.OFTInteger)\n # outLayer.CreateField(idField)\n # gzdField = ogr.FieldDefn(\"gzd\", ogr.OFTString)\n # outLayer.CreateField(gzdField)\n\n # for idx, file_name in enumerate(Path(target_dir).iterdir()):\n # problem_child = False\n # print(idx)\n # if file_name.suffix == '.zip':\n # # print(file_name)\n # print('Found a zip file')\n # file_name_only = file_name.name\n # file_name_stem = file_name.stem\n # # print(file_name_only)\n # # print(file_name_stem)\n\n # file_stem_split = file_name_stem.split('_')[-1]\n\n # print(file_stem_split)\n\n # # if file_stem_split not in ['59Q']:\n # # continue\n\n # if file_stem_split in ['Antarctica', 'Arctic']:\n # continue\n\n # with zipfile.ZipFile(file_name, 'r') as zf:\n # # zf.extractall('temp_unzip')\n # actual_file_stem = \"\"\n # for zip_info in zf.infolist():\n # print(zip_info.filename)\n\n # if zip_info.filename[-1] == '/':\n # continue\n\n # zip_info.filename = zip_info.filename.split('/')[-1]\n # if actual_file_stem == \"\":\n # actual_file_stem = zip_info.filename.split('.')[0]\n # zf.extract(zip_info, temp_zip_dir)\n\n # if actual_file_stem != file_name_stem:\n # file_name_stem = actual_file_stem\n\n # file_path = Path(temp_zip_dir, file_name_stem + '.shp')\n\n # print(file_path)\n\n # dataSource = outDriver.Open(str(file_path), 0) # 0 means read-only. 1 means writeable.\n\n # if dataSource is None:\n # print(f'Could not open {file_path}')\n # else:\n # print(f'Opened {file_path}')\n # layer = dataSource.GetLayer()\n\n # sourceSR = layer.GetSpatialRef()\n # targetSR = osr.SpatialReference()\n # targetSR.ImportFromEPSG(4326) # WGS84\n\n # coordTrans = osr.CoordinateTransformation(sourceSR, targetSR)\n\n # if file_stem_split in ['01R', '01S', '01K', '01J', '01H']:\n # # print('WE GOT A PROBLEM CHILD HERE')\n # # print('\\n\\n\\n\\n')\n # problem_child = True\n\n\n # featureCount = layer.GetFeatureCount()\n # print(f\"Number of features in {os.path.basename(file_path)}: {featureCount}\")\n\n # # # iterate over the geometries and dissolve all into one\n # # layer = dataSource.GetLayer()\n # # layerDefinition = layer.GetLayerDefn()\n\n # # for i in range(layerDefinition.GetFieldCount()):\n # # print(layerDefinition.GetFieldDefn(i).GetName())\n\n # # Collect all Geometry\n # geomcol = ogr.Geometry(ogr.wkbGeometryCollection)\n # # Create the feature and set values\n # featureDefn = outLayer.GetLayerDefn()\n # feature = ogr.Feature(featureDefn)\n\n # multipoly = ogr.Geometry(ogr.wkbMultiPolygon)\n # # for feature in inLayer:\n # # geomcol.AddGeometry(feature.GetGeometryRef())\n\n # # geom_list = []\n # for f in layer:\n # # print feature.GetField(\"STATE_NAME\")\n # # geom = feature.GetGeometryRef()\n # # print(geom.Centroid().ExportToWkt())\n # # geom_list.append(geom)\n # # print(geom)\n\n # geom = f.GetGeometryRef()\n # geom.Transform(coordTrans)\n\n # # geomcol.AddGeometry(geom)\n\n # if problem_child:\n # # print(file_stem_split)\n # # print(geom)\n # print(geom.IsValid())\n # # print(idx)\n # pass\n # if file_stem_split in ['59Q']:\n # print(geom)\n # print(geom.GetGeometryName())\n\n # if idx > 18 and idx < 1176:\n # if file_stem_split in ['59Q']:\n # print(geom)\n # print(geom.GetGeometryName())\n # if geom.GetGeometryName() == 'MULTIPOLYGON':\n # for i in range(0, geom.GetGeometryCount()):\n # g = geom.GetGeometryRef(i)\n # # print(g.GetGeometryName())\n # multipoly.AddGeometry(g)\n\n\n # else:\n # multipoly.AddGeometry(geom)\n # elif idx > 1176:\n # sub_geom = geom.GetGeometryRef(0)\n # # print(sub_geom)\n # # print(sub_geom.GetPointCount())\n\n # outRing = ogr.Geometry(ogr.wkbLinearRing)\n # # outRing.AddPoint(1154115.274565847, 686419.4442701361)\n\n # # Create inner ring\n # # innerRing = ogr.Geometry(ogr.wkbLinearRing)\n # # innerRing.AddPoint(1149490.1097279799, 691044.6091080031)\n\n # # Create polygon\n # poly = ogr.Geometry(ogr.wkbPolygon)\n # # poly.AddGeometry(outRing)\n # # poly.AddGeometry(innerRing)\n\n # for i in range(0, sub_geom.GetPointCount()):\n # # GetPoint returns a tuple not a Geometry\n # pt = sub_geom.GetPoint(i)\n\n # if pt[0] < 0:\n # point_x = pt[0] * -1\n # point_y = pt[1]\n # else:\n # point_x = pt[0]\n # point_y = pt[1]\n\n # outRing.AddPoint(point_x, point_y)\n # # print(\"%i). POINT (%f %f)\" %(i, point_x, point_y))\n\n # poly.AddGeometry(outRing)\n\n\n\n # multipoly.AddGeometry(poly)\n # else:\n # sub_geom = geom.GetGeometryRef(0)\n # # print(sub_geom)\n # # print(sub_geom.GetPointCount())\n\n # outRing = ogr.Geometry(ogr.wkbLinearRing)\n # # outRing.AddPoint(1154115.274565847, 686419.4442701361)\n\n # # Create inner ring\n # # innerRing = ogr.Geometry(ogr.wkbLinearRing)\n # # innerRing.AddPoint(1149490.1097279799, 691044.6091080031)\n\n # # Create polygon\n # poly = ogr.Geometry(ogr.wkbPolygon)\n # # poly.AddGeometry(outRing)\n # # poly.AddGeometry(innerRing)\n\n # for i in range(0, sub_geom.GetPointCount()):\n # # GetPoint returns a tuple not a Geometry\n # pt = sub_geom.GetPoint(i)\n\n # if pt[0] > 0:\n # point_x = pt[0] * -1\n # point_y = pt[1]\n # else:\n # point_x = pt[0]\n # point_y = pt[1]\n\n # outRing.AddPoint(point_x, point_y)\n # # print(\"%i). POINT (%f %f)\" %(i, point_x, point_y))\n\n # poly.AddGeometry(outRing)\n\n\n\n # multipoly.AddGeometry(poly)\n\n # # layer.ResetReading()\n\n # # print(feature)\n\n # cascade_union = multipoly.UnionCascaded()\n # feature.SetGeometry(cascade_union)\n\n # # convexhull = geomcol.ConvexHull()\n # # feature.SetGeometry(convexhull)\n\n # file_name_split = file_name_stem.split('_')\n # gzd_from_file_name = file_name_split[-1]\n # # print(idx + 1)\n\n # feature.SetField(\"id\", idx + 1)\n # feature.SetField(\"gzd\", gzd_from_file_name)\n\n # outLayer.CreateFeature(feature)\n\n # feature = None\n\n # # Save and close DataSource\n # dataSource = None\n\n # for file_name in Path(temp_zip_dir).iterdir():\n # os.remove(file_name)\n\n # # if idx > 20:\n # # break\n\n # # all done!\n grid_ds = None\n\n return intersect_list",
"def _bycoord(self, coord):\n query = \"\"\"SELECT * \n FROM ppmxl \n WHERE circle(coord,0.0006) @> circle(point(%f,%f),0) LIMIT 1;\"\"\" % coord\n result = self.corot.query(query)\n return result",
"def test_get_grouped_distances_within(self):\r\n groupings = get_grouped_distances(self.dist_matrix_header,\r\n self.dist_matrix, self.mapping_header, self.mapping,\r\n self.field, within=True)\r\n expected = [\r\n ('Control', 'Control', [0.625, 0.623, 0.60999999999999999,\r\n 0.57699999999999996, 0.61499999999999999,\r\n 0.64200000000000002, 0.67300000000000004,\r\n 0.68200000000000005, 0.73699999999999999,\r\n 0.70399999999999996]),\r\n ('Fast', 'Fast', [0.71799999999999997, 0.66600000000000004,\r\n 0.72699999999999998, 0.59999999999999998,\r\n 0.57799999999999996, 0.623])]\r\n self.assertEqual(groupings, expected)",
"def analyze_pins(self, pin_name):\n debug.info(2,\"Analyzing pin groups for {}.\".format(pin_name)) \n pin_set = self.pins[pin_name]\n\n # This will be a list of pin tuples that overlap\n overlap_list = []\n\n # Sort the rectangles into a list with lower/upper y coordinates\n bottom_y_coordinates = [(x.by(), x, \"bottom\") for x in pin_set]\n top_y_coordinates = [(x.uy(), x, \"top\") for x in pin_set]\n y_coordinates = bottom_y_coordinates + top_y_coordinates\n y_coordinates.sort(key=lambda x: x[0])\n\n # Map the pins to the lower indices\n bottom_index_map = {x[1]:i for i,x in enumerate(y_coordinates) if x[2]==\"bottom\"}\n top_index_map = {x[1]:i for i,x in enumerate(y_coordinates) if x[2]==\"bottom\"}\n\n # Sort the pin list by x coordinate\n pin_list = list(pin_set)\n pin_list.sort(key=lambda x: x.lx())\n\n # for shapes in x order\n for pin in pin_list:\n # start at pin's lower y coordinate\n bottom_index = bottom_index_map[pin]\n compared_pins = set()\n for i in range(bottom_index,len(y_coordinates)):\n compare_pin = y_coordinates[i][1]\n # Don't overlap yourself\n if pin==compare_pin:\n continue\n # Done when we encounter any shape above the pin\n if compare_pin.by() > pin.uy():\n break\n # Don't double compare the same pin twice\n if compare_pin in compared_pins:\n continue\n compared_pins.add(compare_pin)\n # If we overlap, add them to the list\n if pin.overlaps(compare_pin):\n overlap_list.append((pin,compare_pin))\n\n # Initial unique group assignments\n group_id = {}\n gid = 1\n for pin in pin_list:\n group_id[pin] = gid\n gid += 1\n \n for p in overlap_list:\n (p1,p2) = p\n for pin in pin_list:\n if group_id[pin] == group_id[p2]:\n group_id[pin] = group_id[p1]\n \n\n # For each pin add it to it's group\n group_map = {}\n for pin in pin_list:\n gid = group_id[pin]\n if gid not in group_map:\n group_map[gid] = pin_group(name=pin_name, pin_set=[], router=self)\n # We always add it to the first set since they are touching\n group_map[gid].pins.add(pin)\n\n self.pin_groups[pin_name] = list(group_map.values())",
"def get_city_points(city):\n for item in coordinate_list:\n if item[0] == city:\n return (item[1], item[2])",
"def group_cells_by_spatial(self, cell_ids):\n\t\tcell_ids = np.asarray(cell_ids, dtype=np.uint64)\n\t\tcell_id_xy = self.static_cell_for_cell(cell_ids)\n\n\t\tret = {}\n\t\tfor cell_id in set(cell_id_xy):\n\t\t\tret[cell_id] = cell_ids[cell_id_xy == cell_id]\n\n\t\treturn ret",
"def basicGetPointsGeodesic(self):\n\n # more geodesic, distance=2 (complicated because random)\n data = numpy.array([[0, 1, 1, 1, 2, 2, 2, 0],\n [0, 1, 1, 1, 2, 2, 2, 0],\n [0, 1, 1, 1, 2, 2, 2, 0]])\n labels = Labels(data=data)\n result = labels.getPoints(ids=[1], mode='geodesic', distance=2, \n connectivity=1)\n result = result.tolist()\n if len(result) == 5:\n desired = [[0, 1], [0, 3], [1, 2], [2, 1], [2, 3]]\n elif len(result) == 4:\n desired = [[0, 2], [1, 1], [1, 3], [2, 2]]\n elif len(result) == 3:\n if [1, 2] in result:\n if [0, 1] in result:\n desired = [[0, 1], [1, 2], [2, 3]] \n elif [0, 3] in result:\n desired = [[0, 3], [1, 2], [2, 1]]\n elif [0, 1] in result:\n if [0, 3] in result:\n desired = [[0, 1], [0, 3], [2, 2]]\n elif [2, 1] in result:\n desired = [[0, 1], [2, 1], [1, 3]]\n else:\n desired = [[0, 1], [1, 3], [2, 2]]\n elif [2, 3] in result:\n if [0, 3] in result:\n desired = [[0, 3], [1, 1], [2, 3]]\n elif [2, 1] in result:\n desired = [[0, 2], [2, 1], [2, 3]]\n else:\n desired = [[2, 3], [1, 1], [0, 2]] \n elif [0, 3] in result:\n desired = [[0, 3], [1, 1], [2, 2]]\n elif [2, 1] in result:\n desired = [[2, 1], [1, 3], [0, 2]]\n for des in desired:\n np_test.assert_equal(des in result, True)\n for res in result:\n np_test.assert_equal(res in desired, True)\n\n # mode geodesic, distance=3, inset\n labels = Labels(data=data[1:3, 2:8])\n labels.setInset([slice(1, 3), slice(2, 8)])\n result = labels.getPoints(ids=[2], mode='geodesic', distance=3, \n connectivity=1)\n result = result.tolist()\n if len(result) == 1:\n np_test.assert_equal(result[0][1], 5)\n elif len(result) == 2:\n desired = []\n if [1, 4] in result:\n desired = [[1, 4], [2, 6]]\n elif [2, 4] in result:\n desired = [[2, 4], [1, 6]]\n for des in desired: \n np_test.assert_equal(des in result, True)\n for res in result:\n np_test.assert_equal(res in desired, True)",
"def test_get_groupings_within_tiny_dataset(self):\r\n self.assertEqual(_get_groupings(self.tiny_dist_matrix_header,\r\n self.tiny_dist_matrix, self.tiny_groups, within=True), [])",
"def get_locations_by_ids(self, id_list):",
"def make_set(g, nodes):\n s = Set()\n names = nodes['names']\n for ii,name in enumerate(names):\n \"\"\" \n We will assume node is entirely contained\n in group if they have one atom in common\n \"\"\" \n atoms = mdn.dic2list(nodes[name]['atoms'])\n atom0 = atoms[0]\n if (atom0 in mdn.dic2list(g['atoms'])):\n s.add(ii)\n return s",
"def _osm_component(geotype, lat, lon, search_pairs, buf=20):\n nearby = '(around:%s,%s,%s)' % (float(buf), lat, lon)\n if search_pairs == {}:\n query = geotype + nearby + ';'\n else:\n fields = ['[\"%s\"~\"%s\"]' % (k, v) for k, v in search_pairs.items()]\n field_str = ''.join(fields)\n query = geotype + field_str + nearby + ';'\n\n return query + 'out geom;'",
"def find_mgrs_intersection_100km(footprint, gzd_list):\n\n total_mgrs_100km_list = []\n\n for gzd in gzd_list:\n sub_list = find_mgrs_intersection_100km_single(footprint, gzd)\n for mgrs_id in sub_list:\n total_mgrs_100km_list.append(mgrs_id)\n\n return total_mgrs_100km_list"
] | [
"0.59095055",
"0.57509834",
"0.56964475",
"0.5656222",
"0.5626549",
"0.5608268",
"0.559361",
"0.5469414",
"0.54517496",
"0.54517496",
"0.5378664",
"0.5353408",
"0.5333075",
"0.53231955",
"0.5305053",
"0.5302384",
"0.5295839",
"0.5273223",
"0.526449",
"0.5255169",
"0.52304506",
"0.5230307",
"0.52142066",
"0.5206794",
"0.5206492",
"0.5202765",
"0.51931864",
"0.51891184",
"0.51852506",
"0.5183057"
] | 0.7494911 | 0 |
Calculate the centre of the site's molecules based on the centre of mass | def calc_site_centre(rd_mols):
coms = [centre_of_mass(mol) for mol in rd_mols]
centre = centre_of_points(coms)
logger.debug('CENTRE: %s', centre)
return centre | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_molecule_center_of_mass(self):\n center_of_mass = np.zeros([3], dtype=float)\n masses = self._prmtop[\"MASS\"]\n for atom_ind in range(len(self._crd)):\n center_of_mass += masses[atom_ind] * self._crd[atom_ind]\n total_mass = masses.sum()\n if total_mass == 0:\n raise RuntimeError(\"zero total mass\")\n return center_of_mass / total_mass",
"def center_of_mass(molecule):\n xcom=ycom=zcom=0\n totm = 0\n for atom in get_atoms(molecule):\n m = get_mass(atom)\n x,y,z = get_xyz(atom)\n xcom += m*x\n ycom += m*y\n zcom += m*z\n totm += m\n xcom /= totm\n ycom /= totm\n zcom /= totm\n return xcom,ycom,zcom",
"def centre_of_mass(mol):\n\n numatoms = mol.GetNumAtoms()\n conf = mol.GetConformer()\n if not conf.Is3D():\n return 0\n # get coordinate of each atoms\n pts = np.array([list(conf.GetAtomPosition(atmidx)) for atmidx in range(numatoms)])\n atoms = [atom for atom in mol.GetAtoms()]\n mass = Descriptors.MolWt(mol)\n # get center of mass\n center_of_mass = np.array(np.sum(atoms[i].GetMass() * pts[i] for i in range(numatoms))) / mass\n return center_of_mass",
"def get_center_of_mass_allies(self,obs):",
"def compute_center(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n sel = PymolPlugin.PymolPlugin().get_model('all')\r\n cnt = len(sel.atom)\r\n\r\n else:\r\n sel = ChimeraPlugin.ChimeraPlugin().select()\r\n cnt = len(ChimeraPlugin.ChimeraPlugin().current_atoms())\r\n\r\n cent_x = 0\r\n cent_y = 0\r\n cent_z = 0\r\n\r\n if cnt == 0:\r\n return 0, 0, 0\r\n\r\n if mole_object.plugin_type == \"PyMOL\":\r\n\r\n for a in sel.atom:\r\n cent_x += a.coord[0]\r\n cent_y += a.coord[1]\r\n cent_z += a.coord[2]\r\n\r\n else:\r\n\r\n for a in ChimeraPlugin.ChimeraPlugin().current_atoms():\r\n cent_x += a.coord()[0]\r\n cent_y += a.coord()[1]\r\n cent_z += a.coord()[2]\r\n\r\n cent_x /= cnt\r\n cent_y /= cnt\r\n cent_z /= cnt\r\n\r\n self.point_x.component('entryfield').setentry(cent_x)\r\n self.point_y.component('entryfield').setentry(cent_y)\r\n self.point_z.component('entryfield').setentry(cent_z)\r\n\r\n self.show_crisscross(mole_object)",
"def barycentre (liste_objets):\r\n x = 0\r\n y = 0\r\n summass = 0\r\n for i in liste_objets:\r\n x += i.mass * i.posx\r\n y += i.mass * i.posy\r\n summass += i.mass\r\n x /= summass\r\n y /= summass\r\n return x,y,summass",
"def center_of_mass(self, tolerance=1e-9):\n props = GProp_GProps()\n brepgprop_VolumeProperties(self.topods_solid(), props, tolerance)\n com = props.CentreOfMass()\n return geom_utils.gp_to_numpy(com)",
"def calculate_center_of_mass(symbols, coordinates):\n\n total_mass = calculate_molecular_mass(symbols)\n\n mass_array = np.zeros([len(symbols),1])\n\n for i in range(len(symbols)):\n mass_array[i] = atomic_weights[symbols[i]]\n\n center_of_mass = sum(coordinates * mass_array) / total_mass\n\n return center_of_mass",
"def center_of_mass(self, entity, geometric=False):\n\n # Structure, Model, Chain, Residue\n if isinstance(entity, Entity.Entity):\n atom_list = entity.get_atoms()\n # List of Atoms\n elif hasattr(entity, \"__iter__\") and [x for x in entity if x.level == \"A\"]:\n atom_list = entity\n # Some other weirdo object\n else:\n raise ValueError(\n f\"Center of Mass can only be calculated from the following objects:\\n\"\n f\"Structure, Model, Chain, Residue, list of Atoms.\"\n )\n\n masses = []\n positions = [[], [], []] # [ [X1, X2, ..] , [Y1, Y2, ...] , [Z1, Z2, ...] ]\n\n for atom in atom_list:\n masses.append(atom.mass)\n\n for i, coord in enumerate(atom.coord.tolist()):\n positions[i].append(coord)\n\n # If there is a single atom with undefined mass complain loudly.\n if \"ukn\" in set(masses) and not geometric:\n raise ValueError(\n f\"Some atoms don't have an element assigned.\\n\"\n f\"Try adding them manually or calculate the geometrical center of mass instead.\"\n )\n\n if geometric:\n return [sum(coord_list) / len(masses) for coord_list in positions]\n else:\n w_pos = [[], [], []]\n for atom_index, atom_mass in enumerate(masses):\n w_pos[0].append(positions[0][atom_index] * atom_mass)\n w_pos[1].append(positions[1][atom_index] * atom_mass)\n w_pos[2].append(positions[2][atom_index] * atom_mass)\n\n return [sum(coord_list) / sum(masses) for coord_list in w_pos]",
"def center_of_mass(elements, coordinates):\n mass = molecular_weight(elements)\n mass_array = np.array([[atomic_mass[i.upper()]] * 3 for i in elements])\n mass_coordinates = coordinates * mass_array\n return (np.sum(mass_coordinates, axis=0) / np.array([mass, mass, mass]))",
"def center_of_mass(self, matrix):\n # Changing the positions of all objects relative to center of mass, in origo.\n x, y, z = np.sum(matrix[:, 0].reshape(self.numbodies, 1)*matrix[:, 1:4], axis=0)/(np.sum(matrix[:, 0], axis=0))\n print('Center of mass located at (%.4g, %.4g, %.4g)' %(x, y, z))\n # x-direction\n matrix[:, 1] = matrix[:, 1]-x\n # y-direction\n matrix[:, 2] = matrix[:, 2]-y\n # z-direction\n matrix[:, 3] = matrix[:, 3]-z\n # The Suns initial velocity which makes the total momentum of the system zero\n # velcity_sun = sum(mass_planet_i*veocity_planet_i)/(mass_sun)\n u, v, w = np.sum(matrix[:, 0].reshape(self.numbodies, 1)*matrix[:, 4:7], axis=0)/(matrix[0, 0])\n print('The initial velocity of the Sun (%.4g, %.4g, %.4g)' %(u, v, w))\n matrix[0, 4:7] = u, v, w\n # Returning the modified matrix\n return matrix",
"def getcenter(self):\n return self.centro.cartesianas()",
"def centerOfMass(data):\r\n dd = []\r\n for d in data:\r\n dd.append(d.coordinate)\r\n\r\n data = dd\r\n data = np.array(data)\r\n n = len(data)\r\n x = sum(data[:,0])\r\n y = sum(data[:,1])\r\n z = sum(data[:,2])\r\n x/=n\r\n y/=n\r\n z/=n\r\n return x,y,z,n",
"def find_center_of_mass(selection='(all)', state=-1):\n state = utils.int_to_state(state)\n model = cmd.get_model(selection, state=state)\n com = cpv.get_null()\n # iterate all atoms and add vectors of center of mass of each atoms\n for atom in model.atom:\n com = cpv.add(com, atom.coord)\n com = cpv.scale(com, 1.0 / len(model.atom))\n return com",
"def center_of_mass(xy, masses):\n return np.sum(masses.reshape(len(xy), 1) * xy.astype(np.float), axis=0) / float(np.sum(masses))",
"def center(self, center_mass=False):\n if center_mass:\n com = self.center_of_mass\n self.xyz -= com\n else:\n self.xyz -= self.xyz.mean(0)",
"def centre_of_mass(image, black_blob=False):\r\n image = image.copy()\r\n shape = image.shape\r\n if black_blob:\r\n image = 255-image\r\n centre = np.array([0, 0]).astype(float)\r\n\r\n #------------------------------START YOUR CODE-----------------------------#\r\n s = np.sum(image)\r\n indices = np.mgrid[0:image.shape[0],0:image.shape[1]]\r\n ys = np.sum(indices[0]*image)\r\n xs = np.sum(indices[1]*image)\r\n\r\n # Equivalent, but slower\r\n #xs = 0.0\r\n #ys = 0.0\r\n #s = 0.0 \r\n #for y in range(shape[0]):\r\n # for x in range(shape[1]):\r\n # p = image[y, x]\r\n # xs += x*p\r\n # ys += y*p\r\n # s += p\r\n\r\n centre = np.array([ ys/s, xs/s ])\r\n #-------------------------------END YOUR CODE------------------------------#\r\n return centre.astype(int)",
"def _center(self, forces):\n\t\t\n\t\tzipped = zip(self.grid.corners(), forces)\n\t\treturn self._weightedAverage(zipped)",
"def get_center_coordinates(self):\n totalX = 0\n totalY = 0\n totalZ = 0\n for atom in self.get_atoms():\n totalX += atom.get_x()\n totalY += atom.get_y()\n totalZ += atom.get_z()\n \n xCenter = totalX / len(self.get_atoms())\n yCenter = totalY / len(self.get_atoms())\n zCenter = totalZ / len(self.get_atoms())\n \n return xCenter, yCenter, zCenter",
"def _center_position(ephemerides: List[Ephemeris]) -> Tuple[Quantity, Quantity]:\n # find the RA, dec center\n center_ra, center_dec = EphemerisService.center_position(ephemerides)\n\n return center_ra, center_dec",
"def test_get_center_of_mass(self):\n symbols = ['C', 'H', 'H', 'H', 'H']\n coords = np.array([[0.0000000, 0.0000000, 0.0000000],\n [0.6269510, 0.6269510, 0.6269510],\n [-0.6269510, -0.6269510, 0.6269510],\n [-0.6269510, 0.6269510, -0.6269510],\n [0.6269510, -0.6269510, -0.6269510]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, symbols=symbols)\n for cm_coord in center_of_mass:\n self.assertEqual(cm_coord, 0.0)\n\n symbols = ['O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H']\n coords = np.array([[1.28706525, 0.52121353, 0.04219198],\n [0.39745682, -0.35265044, -0.63649234],\n [0.36441173, -1.68197093, 0.08682400],\n [-0.59818222, 0.10068325, -0.65235399],\n [0.74799641, -0.48357798, -1.66461710],\n [0.03647269, -1.54932006, 1.12314420],\n [-0.31340646, -2.38081353, -0.41122551],\n [1.36475837, -2.12581592, 0.12433596],\n [2.16336803, 0.09985803, 0.03295192]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, symbols=symbols)\n self.assertAlmostEqual(center_of_mass[0], 0.7201, 3)\n self.assertAlmostEqual(center_of_mass[1], -0.4880, 3)\n self.assertAlmostEqual(center_of_mass[2], -0.1603, 3)\n\n numbers = [6, 6, 8, 1, 1, 1, 1, 1, 1]\n coords = np.array([[1.1714680, -0.4048940, 0.0000000],\n [0.0000000, 0.5602500, 0.0000000],\n [-1.1945070, -0.2236470, 0.0000000],\n [-1.9428910, 0.3834580, 0.0000000],\n [2.1179810, 0.1394450, 0.0000000],\n [1.1311780, -1.0413680, 0.8846660],\n [1.1311780, -1.0413680, -0.8846660],\n [0.0448990, 1.2084390, 0.8852880],\n [0.0448990, 1.2084390, -0.8852880]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, numbers=numbers)\n self.assertAlmostEqual(center_of_mass[0], -0.0540, 3)\n self.assertAlmostEqual(center_of_mass[1], -0.0184, 3)\n self.assertAlmostEqual(center_of_mass[2], -0.0000, 3)",
"def get_centre(self):\n # just get the centroid\n # perhaps try something like:\n # https://github.com/mapbox/polylabel/blob/master/polylabel.js\n # in the future\n coords = np.array([(n.x, n.y) for n in self.nodes])\n centre_x = coords[:, 0].mean()\n centre_y = coords[:, 1].mean()\n return centre_x, centre_y",
"def get_center_of_mass_enemies(self,obs):",
"def center(self):\n try: \n return self._center\n except AttributeError:\n self._center = vector(ZZ, [0]*self.ambient_dim())\n for v in self.vertex_generator(): self._center += v.vector()\n self._center /= self.n_vertices()\n return self._center",
"def get_centre(self):\n return self.c",
"def CenterOfMassForShape(shape):\n polygons = SplitIntoPolygons(shape)\n total_A = 0\n total_cx = 0\n total_cy = 0\n\n for polygon in polygons:\n cx, cy, A = CenterOfMass(polygon)\n total_cx += A * cx\n total_cy += A * cy\n total_A += A\n\n return (total_cx / total_A, total_cy / total_A)",
"def center(self):\n\n ca_atoms = self.ca_atoms\n ca_atom_vectors = ca_atoms[\"ca.atom\"].to_list()\n ca_atom_vectors = [i for i in ca_atom_vectors if i is not None]\n centroid = self.center_of_mass(ca_atom_vectors, geometric=False)\n centroid = Vector(centroid)\n\n return centroid",
"def get_center_of_masses(self) -> np.array:\n com = np.average(self.obj[:, :2], weights=self.obj[:, 2], axis=0)\n return com",
"def find_center(self):\n x = np.int(np.rint((len(self.grid[0][0]))/2))\n center = np.array([x, x, x])\n self.grid[center[0]][center[1]][center[2]] = 1\n return self.grid, center",
"def calculate_center_of_mass(chainVecs: IMP.algebra.Vector3Ds):\n return IMP.algebra.get_centroid(chainVecs)"
] | [
"0.769952",
"0.7631538",
"0.73653996",
"0.7218101",
"0.7181016",
"0.71449685",
"0.7141054",
"0.7112312",
"0.71018285",
"0.6969243",
"0.6898578",
"0.6887456",
"0.6861607",
"0.67939705",
"0.6758411",
"0.67555857",
"0.66412055",
"0.6636503",
"0.6582574",
"0.6568094",
"0.656536",
"0.6516124",
"0.6464905",
"0.6463946",
"0.6451357",
"0.6448616",
"0.6428138",
"0.64185226",
"0.6405626",
"0.6395332"
] | 0.77714396 | 0 |
search for a molgroup by description | def search_for_molgroup_by_description(description, target):
search = MolGroup.objects.filter(target_id__title=target, description=description)
logger.debug("len(search)=%d", len(search))
if len(search) == 1:
mol_group = search[0]
elif len(search) > 1:
# Note that this will also set the mol_group in the MoleculeTag
# objects fot this mol_group to null.
for molgroup in search:
molgroup.delete()
return None
else:
return None
return mol_group | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_020_query_groups(self):\n\n testflow.step(\"Querying for groups\")\n assert self.query_cli.run(\n what='group'\n )[0], \"Failed to search for groups\"",
"def group_describe(self, group):\n mapped = self.map_vects(datanorm)\n mappednp= np.array(mapped)\n \n groups= mappednp[:,0]\n data['Group'] = pd.Series(groups, index=data.index)\n print(data[data['Group']==group].describe())",
"def corpus_group():",
"def product_group_search(obj, query):\n client = get_client(obj)\n\n pgs = client.product_group_list(q=query)\n\n print(json.dumps(pgs, indent=4))",
"def test_get_group(self):\n pass",
"def make_group_by_keyword(self, keyword):\r\n pass",
"def test_get_tag_group_by(self):\n url = \"?\"\n query_params = self.mocked_query_params(url, OCPTagView)\n handler = OCPTagQueryHandler(query_params)\n tag_keys = handler.get_tag_keys(filters=False)\n\n group_by_key = tag_keys[0]\n group_by_value = \"group_by\"\n url = f\"?group_by[tag:{group_by_key}]={group_by_value}\"\n query_params = self.mocked_query_params(url, OCPCpuView)\n handler = OCPReportQueryHandler(query_params)\n group_by = handler._get_tag_group_by()\n group = group_by[0]\n expected = \"pod_labels__\" + group_by_key\n self.assertEqual(len(group_by), 1)\n self.assertEqual(group[0], expected)",
"def get_nested_groups_names(group):\n return (\n criterion.findtext(\"value\")\n for criterion in group.findall(\"criteria/criterion\") if\n criterion.findtext(\"name\") in (\"Computer Group\", \"Mobile Device Group\")\n and criterion.findtext(\"search_type\") == \"member of\")",
"def test_get_resource_group_by_moid(self):\n pass",
"def getGroup(self, resname, atomname):\n group = \"\"\n if resname in self.map:\n resid = self.map[resname]\n if resid.hasAtom(atomname):\n atom = resid.atoms[atomname]\n group = atom.group\n return group",
"def test_function(self):\n self.ms_client.http_request(method='GET', url_suffix='groups', params={'$orderby': 'displayName'})\n demisto.results('ok')",
"def moc_group(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"moc_group\")",
"def moc_group(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"moc_group\")",
"def fetch_group(query: str) -> Group:\n group_filter = {\"pk\": query} if query.isdigit() else {\"name\": query}\n\n try:\n return Group.objects.get(**group_filter)\n except Group.DoesNotExist:\n raise exceptions.ParseError(\"Unknown group: {}\".format(query))",
"def _search_in_description(self, regexp):\n if self.description:\n match = re.search(regexp, self.description)\n if match:\n return match.group(CUSTOM_ATTRIBUTE)\n return None",
"def search(self, name: str) -> \"Navaids\":\n return self.__class__(\n self.data.query(\n \"description == @name.upper() or name == @name.upper()\"\n )\n )",
"def test_search_with_space(self):\n # Create a group to test searching for groups\n Group.objects.create(name='spam', auto_complete=True)\n Group.objects.create(name='jam', auto_complete=True)\n Group.objects.create(name='bread', auto_complete=True)\n\n url = reverse('search')\n response = self.mozillian_client.get(url, {'q': 'am'})\n\n eq_(response.status_code, 200)\n\n queryset = response.context['people'].object_list\n for up in [self.mozillian.userprofile, self.mozillian2.userprofile]:\n self.assertTrue(up in queryset)\n\n # Assert appropriate group names are found in the document\n self.assertContains(response, 'spam')\n self.assertContains(response, 'jam')\n self.assertNotContains(response, 'bread')",
"def findGroup(self, name):\n for g in self._groups:\n if g.find('name').text.strip() == name:\n return CAGroup(g)\n \n # If we are here the group does not exist\n\n g = ET.SubElement(self._root, 'group')\n n = ET.SubElement(g, 'name')\n n.text = name.strip()\n return CAGroup(g)",
"def test_load_group(query_factory):\n text = \"a [{large|size} {latte|product} with {nonfat milk|option}|product] please\"\n\n processed_query = markup.load_query(text, query_factory)\n entities = processed_query.entities\n\n assert len(entities) == 3\n\n assert entities[0].text == \"large\"\n assert entities[0].entity.type == \"size\"\n assert entities[0].span == Span(2, 6)\n assert entities[0].parent == entities[1]\n\n assert entities[1].text == \"latte\"\n assert entities[1].entity.type == \"product\"\n assert entities[1].span == Span(8, 12)\n assert entities[1].children == (entities[0], entities[2])\n\n assert entities[2].text == \"nonfat milk\"\n assert entities[2].entity.type == \"option\"\n assert entities[2].span == Span(19, 29)\n assert entities[2].parent == entities[1]",
"def get_group_from_alias(self, alias):\r\n group_from_alias = self.fs.query([\r\n Filter('type', '=', 'intrusion-set'),\r\n FilterCasefold('aliases', 'casefold', alias)\r\n ])\r\n\r\n if not group_from_alias:\r\n return \"\"\r\n\r\n return group_from_alias[0][MITRE_GROUP_NAME]",
"def search_for_molgroup_by_coords(coords, target):\n\n x = coords[0]\n y = coords[1]\n z = coords[2]\n\n limit_list = []\n\n for coord in x, y, z:\n lower, upper = get_coord_limits(coord)\n limit_list.append([lower, upper])\n\n search = MolGroup.objects.filter(target_id__title=target, x_com__gte=limit_list[0][0], x_com__lte=limit_list[0][1],\n y_com__gte=limit_list[1][0], y_com__lte=limit_list[1][1],\n z_com__gte=limit_list[2][0],\n z_com__lte=limit_list[2][1])\n\n if len(search) == 1:\n mol_group = search[0]\n else:\n return None\n\n return mol_group",
"def findMnemonicGroups(table):\n groupid = 0\n #container elements\n elms = doc.getElementsByTagName(\"container\") + doc.getElementsByTagName(\"containers\")\n for e in elms:\n items = getItemsInContainer(e)\n getMnemonicsAndWriteData(table, items, groupid)\n groupid = groupid + 1\n \n #dialog elements\n elms = doc.getElementsByTagName(\"dialog\")\n for e in elms:\n items = getItemsInDialog(e)\n getMnemonicsAndWriteData(table, items, groupid)\n groupid = groupid + 1",
"def test_group(self):\n obs_group, obs_nogroup = group(self.seqstruct, 0.75)\n exp_group = {'cluster_337': ['cluster_343', 'cluster_345',\n 'cluster_339'],\n 'cluster_347': ['cluster_338'],\n 'cluster_344': ['cluster_340']}\n exp_nogroup = [self.seqstruct[6], self.seqstruct[8]]\n\n self.assertEqual(obs_group, exp_group)\n self.assertEqual(obs_nogroup, exp_nogroup)",
"def test_get_groups(self):\n pass",
"def test_get_groups(self):\n pass",
"def search(self, sid, group):\n zx = \"\".join([chr(random.randint(97,122)) for i in xrange(0, 11)])\n resdat = self.req(sid, ['{\"a\":\"kA-_jfrF\",\"r\":\"0\",\"t\":2007,\"p\":{\"1000\":[0,0],\"2\":\"kA-_jfrF0\"}}',\n '{\"a\":\"kA-_jfrF\",\"r\":\"1\",\"t\":2602,\"p\":{\"1000\":[0,0],\"2\":\"kA-_jfrF2\",\"3\":\"\",\"4\":{\"2\":25,\"1\":0},\"6\":\"[email protected]\"}}']).read()\n print \"RESULT DATA\",resdat",
"def test_find_description_multi_one(self):\n result = Project.objects.find(['xxx', 'ThisFails'], project_type=None)\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.category_top)",
"def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)",
"def __make_group_by_atom(self, group_name, name_list):\r\n pass",
"def matches(inline,groupby,groupvals):\n for i,m in enumerate(groupby):\n if inline[m] == groupvals[i]:\n continue\n else:\n return False\n return True"
] | [
"0.5843368",
"0.56135756",
"0.5463794",
"0.54140854",
"0.53698933",
"0.5358017",
"0.5354256",
"0.5297383",
"0.5297368",
"0.51956767",
"0.5153298",
"0.51520944",
"0.51520944",
"0.5145656",
"0.5145108",
"0.51268834",
"0.51226366",
"0.5122467",
"0.5106349",
"0.5106155",
"0.5088297",
"0.5080813",
"0.503473",
"0.5033141",
"0.5033141",
"0.50238085",
"0.50236493",
"0.50178987",
"0.5007362",
"0.49991804"
] | 0.74397874 | 0 |
Update/Create mol_groups and molecule_tags with site information | def specifc_site(rd_mols, mols, target, site_description=None):
# look for molgroup with same target and description
mol_group = search_for_molgroup_by_description(target=target.title,
description=site_description)
if not mol_group:
mol_group = MolGroup()
mol_group.group_type = "MC"
mol_group.target_id = target
centre = calc_site_centre(rd_mols)
mol_group.x_com = centre[0]
mol_group.y_com = centre[1]
mol_group.z_com = centre[2]
mol_group.description = site_description
mol_group.save()
# A molecule tag record may exist already, but won't the first time the
# target is loaded.
try:
mol_tag = MoleculeTag.objects.get(tag=site_description,
target_id=target.id)
except MoleculeTag.DoesNotExist:
mol_tag = None
if not mol_tag:
# New site/tag or the tag has been deleted
mol_tag = MoleculeTag()
mol_tag.tag = site_description
mol_tag.category = TagCategory.objects.get(category='Sites')
mol_tag.target = target
mol_tag.mol_group = mol_group
mol_tag.save()
else:
# Tag already exists
# Apart from the new mol_group and molecules, we shouldn't be
# changing anything.
mol_tag.mol_group = mol_group
mol_tag.save()
ids = [m.id for m in mols]
print([a['id'] for a in mol_group.mol_id.values()])
for mol_id in ids:
if mol_id not in [a['id'] for a in mol_group.mol_id.values()]:
logger.debug("mol_group mol_id=%s", mol_id)
this_mol = Molecule.objects.get(id=mol_id)
mol_group.mol_id.add(this_mol)
if mol_id not in [a['id'] for a in mol_tag.molecules.values()]:
logger.debug("mol_tag mol_id=%s", mol_id)
this_mol = Molecule.objects.get(id=mol_id)
mol_tag.molecules.add(this_mol) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_site_in_gme(self, projectNode, countryNodes, site, i):\n core = self.core\n siteNode = core.create_child(projectNode, self.META[\"Site\"])\n position_item = core.get_registry(projectNode, \"position\")\n position_item[\"y\"] = position_item[\"y\"] + 50 * i\n core.set_registry(siteNode, \"position\", position_item)\n core.set_attribute(siteNode, \"name\", str(site[\"Site ID 1\"]))\n core.set_attribute(siteNode, \"siteId2\", str(site[\"Site ID 2\"]))\n core.set_attribute(siteNode, \"city\", site[\"City\"])\n core.set_attribute(siteNode, \"zipCode\", str(site[\"ZIP code\"]))\n core.set_attribute(siteNode, \"address\", site[\"Address\"])\n core.set_attribute(siteNode, \"siteSetup\", site[\"Site setup\"])\n core.set_attribute(siteNode, \"deviceQuantity\", int(site[\"Device quantity\"]))\n core.set_attribute(siteNode, \"installReq\", site[\"Installation required?\"])\n core.set_attribute(siteNode, \"maintReq\", site[\"Maintenance required?\"])\n core.set_attribute(siteNode, \"orderType\", site[\"Order type\"])\n if site[\"Overlay required?\"] == \"yes\":\n core.set_attribute(siteNode, \"overlayReq\", True)\n if site[\"Underlay required?\"] == \"yes\":\n core.set_attribute(siteNode, \"underlayReq\", True)\n if site[\"Order type\"] == \"Bandwidth based\":\n core.set_attribute(siteNode, \"bandwidth\", site[\"Bandwidth category\"])\n if \"Feature set\" in site.keys():\n core.set_attribute(siteNode, \"featureSet\", site[\"Feature set\"])\n core.set_attribute(siteNode, \"maintenanceType\", site[\"On-site maintenance\"])\n\n bundleNode = connect_vendor_bundle_to_site(self, siteNode, projectNode, position_item, site, site[\"Series\"])\n countryNode = connect_country_to_site(self, siteNode, projectNode, position_item, site, countryNodes)\n nodePair = NodePair(bundleNode,countryNode,siteNode)\n return nodePair",
"def process_info(info, site):\n # Urubu doesn't split the 'tags' into multiple strings\n if \"tags\" in info:\n if isinstance(info[\"tags\"], str):\n info[\"tags\"] = info[\"tags\"].split(\", \")\n # Identify to which folder the item belongs (paper, blog, etc)\n if \"type\" not in info:\n info[\"type\"] = \"/{}\".format(info[\"id\"].split(\"/\")[1])\n # Add the current date to the site metadata\n if \"now\" not in site:\n site[\"now\"] = datetime.utcnow()\n # Add the last git commit hash to the site metadata\n if \"commit\" not in site:\n completed = subprocess.run(\n [\"git\", \"rev-parse\", \"--short\", \"HEAD\"], capture_output=True, text=True\n )\n site[\"commit\"] = completed.stdout.strip()",
"def update_metadata(self):\n parser = GenericParser(\n fn_re='{}/(e\\d+s\\d+)_.*/Production.nc'.format(self.data_folder),\n group_names=['sim'],\n group_transforms=[lambda x: x],\n top_fn='',\n step_ps=self.timestep\n )\n meta = gather_metadata('{}/e*/*nc'.format(self.data_folder), parser)\n meta['top_fn'] = sorted(glob('{}/e*/structure.prmtop'.format(self.input_folder)))\n self.meta = meta",
"def populate_common(node, modl_id, model_instance):\n\tfound_common = node.find('Common')\n\tif found_common is not None:\n\t\tcommon = Common()\n\t\tcommon.populate(found_common, modl_id)\n\t\tfound_summary = found_common.find(\"Summary\")\n\n\t\tif found_summary is not None and found_summary is not \"\" :\n\t\t\tif found_summary.text is not None and found_summary.text is not \"\" :\n\t\t\t\tmodel_instance.common_summary = found_summary.text",
"def setup_group_workspaces(context):\n if context.readDataFile(\"marker.txt\") is None:\n return\n\n portal = context.getSite()\n if \"groups\" not in portal.objectIds():\n\n groups = portal[\n portal.invokeFactory(\"Folder\",id=\"groups\")]\n\n # set default properties\n groups.setTitle(\"groups\")\n groups.setDescription(\"Group workspaces container.\")\n groups._getWorkflowTool().doActionFor(groups, \"publish\" \"\")\n groups.setExcludeFromNav(True)\n groups.update() \n logger.info(\"Groups container created.\")",
"def update(self):\n brains = self.query\n items_with_bodytext = ['Document', 'News Item']\n folderish_items = ['Folder', 'nva.flexfolder.flexfolder']\n counter = 1\n objectlist = []\n for i in brains:\n entry = {}\n if i.portal_type in items_with_bodytext:\n obj = i.getObject()\n entry['title'] = obj.Title()\n entry['desc'] = obj.Description()\n entry['text'] = obj.getText()\n entry['marker'] = 'collapse-%s' % counter\n if i.portal_type in folderish_items:\n info = self.createHtmlSnippet(i.getObject())\n if not info:\n info = u'<p>Für weitere Informationen klicken Sie bitte <a class=\"internal-link\" href=\"%s\">hier.</a></p>' %i.getURL() \n entry['title'] = i.Title\n entry['desc'] = i.Description\n entry['text'] = info\n entry['marker'] = 'collapse-%s' % counter\n else:\n info = u'<p>Für weitere Informationen klicken Sie bitte <a class=\"internal-link\" href=\"%s\">hier.</a></p>' %i.getURL() \n entry['title'] = i.Title\n entry['desc'] = i.Description\n entry['text'] = info\n entry['marker'] = 'collapse-%s' % counter\n objectlist.append(entry)\n counter += 1\n self.objectlist = objectlist",
"def set_content(self):\n should_process = False # whether wiki page need to be updated\n soup = BeautifulSoup(self.body, \"html.parser\")\n\n try:\n # Test Cluster Wiki Page\n tag = soup.find(text=self.server_name).find_next('span').find(text=re.compile('4.[0-9].[0-9].[0-9](.[0-9]b[0-9]{1,4})?|UNKNOWN')).find_parent()\n print(str(tag))\n except Exception as e:\n print('No server name Found')\n print('Now Looking for IP address instead\\n')\n try:\n tag = soup.find(text=self.ip_addr).find_next('span').find(text=re.compile('4.[0-9].[0-9].[0-9](.[0-9]b[0-9]{1,4})?')).find_parent()\n print(str(tag))\n except:\n print('No IP address Found')\n print('Now Looking for alias name instead\\n')\n try:\n tag = soup.find_all(text=re.compile(self.alias_name+'.*'))[-1].find_next('span').find(text=re.compile('4.[0-9].[0-9].[0-9](.[0-9]b[0-9]{1,4})?|UNKNOWN')).find_parent()\n print(str(tag))\n except:\n print('No alias name Found')\n return should_process\n\n # compare unravel version in wiki page with local\n if tag.string and tag.string != self.unravel_version:\n tag.string = self.unravel_version\n should_process = True\n elif not tag.string and self.unravel_version:\n tag.string = self.unravel_version\n should_process = True\n\n # Create new wiki page content for update\n self.new_content = str(soup)\n return should_process",
"def update_from_tags():\n tags.update_diagrams()\n tags.update_tiles()",
"def import_sites(input_csv=\"../2012_ROOMS_site_info_sample.csv\"):\n reader = csv.DictReader(open(input_csv))\n for s in reader:\n number = s[\"Site ID\"]\n site = models.NewSite.all().filter('number =', number).get()\n if site:\n logging.info('site %s exists, skipping', number)\n continue\n else:\n site = models.NewSite(number=number)\n site.program = PROGRAM\n site.budget = int(s[\"Budgeted Cost in Campaign\"]) if s[\"Budgeted Cost in Campaign\"] else 0\n\n # Because Python 2.x csv module only reads ascii.\n def clean_s(k):\n return s[k].replace('\\n', ' ').replace('\\xe2', \"'\").replace('\\x80', \"'\").replace('\\x99', '').replace('\\xc3', '').replace('\\x95', '').replace('\\xb1', '').encode('ascii', 'replace')\n\n site.name = clean_s(\"Repair Application: Applicant's Name\")\n site.street_number = clean_s(\"Street Address\")\n site.city_state_zip = \"%s CA, %s\" % (\n clean_s(\"Repair Application: Recipient's City\"), \n clean_s(\"Repair Application: Recipient's Zip Code\"))\n site.applicant = clean_s(\"Repair Application: Applicant's Name\")\n site.applicant_home_phone = clean_s(\"Repair Application: Applicant Home Phone\")\n site.applicant_work_phone = clean_s(\"Repair Application: Applicant Work Phone\")\n site.applicant_mobile_phone = clean_s(\"Repair Application: Applicant Mobile Phone\")\n site.sponsor = clean_s(\"(Sponsor) Campaign Description\")\n site.rrp_test = clean_s(\"Repair Application: RRP Test Results\")\n site.rrp_level = clean_s(\"Repair Application: RRP Result Notes\")\n # site.roof = clean_s(\"Roof?\")\n site.jurisdiction = clean_s(\"Jurisdiction\")\n site.announcement_subject = clean_s(\"Announcement Subject\")\n site.announcement_body = clean_s(\"Announcement Body\")\n site.put()\n logging.info('put site %s', number)",
"def create_site_structure(root, structure):\n for item in structure:\n id = item['id']\n title = item['title']\n description = item.get('description', u'')\n if id not in root:\n if 'creators' not in item:\n item['creators'] = CREATORS\n obj = api.content.create(root, **item)\n # publish private content or make a workflow transition\n if item['type'] not in ['Image', 'File']:\n if '_transition' not in item and api.content.get_state(obj) == 'private':\n api.content.transition(obj, 'publish')\n elif item.get('_transition', None):\n api.content.transition(obj, item['_transition'])\n # constrain types in folder?\n if '_addable_types' in item:\n constrain_types(obj, item['_addable_types'])\n # the content has more content inside? create it\n if '_children' in item:\n create_site_structure(obj, item['_children'])\n # add an image to all news items\n if obj.portal_type == 'News Item':\n if 'image' in item:\n obj.setImage(item['image'])\n # set the default view to object\n if '_layout' in item:\n obj.setLayout(item['_layout'])\n # XXX: workaround for https://github.com/plone/plone.api/issues/99\n obj.setTitle(title)\n obj.setDescription(description)\n obj.reindexObject()\n logger.debug(u' {0} criado e publicado'.format(title))\n else:\n logger.debug(u' pulando {0}; conteúdo existente'.format(title))",
"def update_metadata(self):\n self.data[\"keywords\"] = self.repo.topics(self.data.get(\"keywords\", []))\n self.data[\"description\"] = self.data.get(\"description\") or self.repo.description\n self.data[\"codeRepository\"] = (\n self.data.get(\"codeRepository\") or self.repo.html_url\n )\n self.data[\"name\"] = self.data.get(\"name\") or self.repo.name\n self.data[\"issueTracker\"] = (\n self.data.get(\"issueTracker\") or self.repo.issues_url\n )\n self.data[\"license\"] = self.data.get(\"license\") or self.repo.license",
"def update_meta_data(s, m_list, m_codes):\n # TODO(elia): This is not data about the data (=metadata) but data about component models\n\n s['clf_labels'] = collect_and_verify_clf_classlabels(m_list, m_codes)\n s['FI'] = collect_feature_importances(m_list, m_codes)\n\n return s",
"def setup_openpmd_species_component( self, grp ) :\n self.setup_openpmd_component( grp )",
"def update_site():\n site_path = os.path.join(PROJECTS_ROOT, CURRENT_SITE)\n docs_path = os.path.join(site_path, 'doc_src')\n with cd(site_path):\n run('git pull --all')\n run('workon djangopatterns && pip install -r %s/setup/requirements.txt' % site_path)\n run('workon djangopatterns && %s/manage.py syncdb' % site_path)\n # run('workon djangopatterns && %s/manage.py migrate' % site_path)\n run('workon djangopatterns && %s/manage.py collectstatic --noinput' % site_path)\n run('workon djangopatterns && %s/manage.py compress' % site_path)\n with cd(docs_path):\n run('git pull --all')\n # run('workon djangopatterns && cd doc_src && make clean')\n # run('workon djangopatterns && cd doc_src && make json')\n reload_site()",
"def __build_one_site(root_dir, all_docs, domain_refs, doc_entry_ref, www_root_dir, site_name, github_url, user_group):\n www_build_dir = www_root_dir + site_name + \"/\"\n\n print(\"Reset build directory : \" + www_build_dir)\n shutil.rmtree(www_build_dir, ignore_errors=True)\n try:\n shutil.copytree(\"src/default_www/\", www_build_dir)\n except: # strange behaviour on Windows, try again ...\n shutil.copytree(\"src/default_www/\", www_build_dir)\n \n all_domain_cats = { XML.xpath_plain(all_docs, \".//*[@ref='\"+itm_ref+\"']/@cat\") for itm_ref in domain_refs } - {\"\"}\n \n domain_cats = []\n for cat_ref in all_domain_cats:\n cat_restrict = XML.xpath_plain(all_docs, \".//*[@ref='\"+cat_ref+\"']/@restricted_to\")\n if (cat_restrict == \"\") or (user_group in cat_restrict):\n domain_cats += [cat_ref]\n with_errors = user_group in \"#devs,#admin\" \n path = HTM.store_home_page(www_build_dir, all_docs, \"\", root_dir, github_url, doc_entry_ref, domain_cats, domain_refs, with_errors)\n\n for cat_ref in domain_cats:\n try:\n path = HTM.store_index_page(www_build_dir, all_docs, cat_ref, root_dir, github_url, doc_entry_ref, domain_cats, domain_refs, with_errors)\n except:\n LIB.debug_error()\n LIB.debug(\"### Error for category index : \", cat_ref)\n\n for itm_ref in domain_refs:\n cat_ref = XML.xpath_plain(all_docs, \".//*[@ref='\"+itm_ref+\"']/@cat\")\n if (cat_ref in domain_cats):\n try:\n path = HTM.store_content_page(www_build_dir+cat_ref+\"/\", all_docs, itm_ref, root_dir, github_url, doc_entry_ref, domain_cats, domain_refs, with_errors)\n except:\n LIB.debug_error()\n LIB.debug(\"### Error for file : \", itm)\n\n path = HTM.store_glossary_page(www_build_dir, all_docs, \"\", root_dir, github_url, doc_entry_ref, domain_cats, domain_refs, with_errors)\n\n if with_errors:\n path = HTM.store_error_page(www_build_dir, all_docs, \"\", root_dir, github_url, doc_entry_ref, domain_cats, domain_refs, with_errors)",
"def site2nrml(model, params_dict): \n \"\"\"\n # Some XML definitions\n NAMESPACE = 'http://openquake.org/xmlns/nrml/0.4'\n GML_NAMESPACE = 'http://www.opengis.net/gml'\n SERIALIZE_NS_MAP = {None: NAMESPACE, 'gml': GML_NAMESPACE} \n gml_ns = SERIALIZE_NS_MAP['gml']\n \"\"\"\n \n # Head matter \n root = etree.Element(_tag='nrml', nsmap={'gml': 'http://www.opengis.net/gml'})\n root.set('xmlns', 'http://openquake.org/xmlns/nrml/0.4')\n root.append(etree.Comment('%s' % '%s site model' %(model)))\n \n\n # Define Site Model Name \n sMod = etree.SubElement(root, \"siteModel\")\n sMod.set('name', model + ' Site Model')\n \n # Define sub element\n \n for key in params_dict:\n \n site = etree.SubElement(sMod, \"site\")\n site.set('lon', '%s' % key[0])\n site.set('lat', '%s' % key[1])\n site.set('vs30', '%s' % params_dict[key][0])\n site.set('vs30Type', '%s' % 'inferred')\n site.set('z1pt0', '%s' % '%3.3f' % float(params_dict[key][1]))\n site.set('z2pt5', '%s' % '%3.3f' % float(params_dict[key][2]))\n \n #print(getMinMax(params_dict))\n \n # Form tree and write to xml\n root_tree = etree.ElementTree(root)\n outFile = open((out_directory + '/' + out_filename), 'wb')\n root_tree.write(outFile, encoding=\"utf-8\", xml_declaration=True, pretty_print=True)",
"def cr_update_shelter_population(site_id):\n\n db = current.db\n s3db = current.s3db\n settings = current.deployment_settings\n\n dtable = s3db.cr_shelter_details\n\n # Get the details record\n record = db(dtable.site_id == site_id).select(dtable.id,\n dtable.capacity_day,\n dtable.capacity_night,\n limitby = (0, 1),\n ).first()\n\n if not record:\n # Create one\n dtable.insert(site_id = site_id)\n record = db(dtable.site_id == site_id).select(dtable.id,\n dtable.capacity_day,\n dtable.capacity_night,\n limitby = (0, 1),\n ).first()\n\n # Get population numbers\n rtable = s3db.cr_shelter_registration\n query = (rtable.site_id == site_id) & \\\n (rtable.deleted != True)\n if settings.get_cr_check_out_is_final():\n query &= (rtable.registration_status != 3)\n\n cnt = rtable._id.count()\n rows = db(query).select(rtable.day_or_night,\n cnt,\n groupby = rtable.day_or_night,\n orderby = rtable.day_or_night,\n )\n\n population_day = population_night = 0\n for row in rows:\n reg_type = row[rtable.day_or_night]\n number = row[cnt]\n if reg_type == NIGHT and number:\n population_night = number\n elif reg_type == DAY_AND_NIGHT and number:\n population_day = number\n # population_day is both day /and/ night\n population_night += population_day\n\n # Get allocation numbers\n # @ToDo: deployment_setting to disable Allocations\n atable = s3db.cr_shelter_allocation\n query = (atable.site_id == site_id) & \\\n (atable.status.belongs((1, 2, 3, 4))) & \\\n (atable.deleted != True)\n dcnt = atable.group_size_day.sum()\n ncnt = atable.group_size_night.sum()\n row = db(query).select(dcnt,\n ncnt,\n limitby = (0, 1),\n orderby = dcnt,\n ).first()\n if row:\n if row[dcnt] is not None:\n allocated_capacity_day = row[dcnt]\n else:\n allocated_capacity_day = 0\n if row[ncnt] is not None:\n allocated_capacity_night = row[ncnt]\n else:\n allocated_capacity_night = 0\n else:\n allocated_capacity_day = allocated_capacity_night = 0\n\n # Compute available capacity\n capacity_day = record.capacity_day\n if capacity_day:\n available_capacity_day = capacity_day - \\\n population_day - \\\n allocated_capacity_day\n else:\n available_capacity_day = 0\n capacity_night = record.capacity_night\n if capacity_night:\n available_capacity_night = record.capacity_night - \\\n population_night - \\\n allocated_capacity_night\n else:\n available_capacity_night = 0\n\n if settings.get_cr_shelter_housing_unit_management():\n cr_update_housing_unit_population(site_id)\n\n # Update record\n record.update_record(population_day = population_day,\n population_night = population_night,\n available_capacity_day = available_capacity_day,\n available_capacity_night = available_capacity_night,\n )",
"def updateSites(self):\n updated_entry = self.client.Update(self.entry)\n return updated_entry",
"def seed():\n for fullname in os.listdir(\"static/examples\"):\n filename, extension = os.path.splitext(fullname)\n relpath = \"static/examples/\" + fullname\n if extension == '.json':\n with open(relpath) as f:\n settings = json.load(f)\n title = settings.get('title')\n short_url = filename\n username = None\n \n meta = db.session.query(models.Metadata).filter_by(short_url=short_url).first()\n\n if meta == None:\n new_graph = models.Graph(settings)\n db.session.add(new_graph)\n db.session.commit()\n \n new_meta = models.Metadata(title, new_graph.id, username, short_url=short_url)\n db.session.add(new_meta)\n db.session.commit()\n \n else:\n graph = db.session.query(models.Graph).filter_by(id=meta.graph_id).first()\n graph.settings = settings\n meta.title=title\n db.session.commit()",
"def load_species_groups():\n\n print(\"Species groups\")\n\n SpeciesGroup.query.delete()\n\n with open(\"seed_data/species_group_seed.psv\") as species:\n for row in species:\n species_group_id, species_group_name = row.strip().split(\"|\")\n\n group = SpeciesGroup(species_group_id = species_group_id,\n species_group = species_group_name)\n\n db.session.add(group)\n\n db.session.commit()",
"def update_wpsnl(self, nml):\n nml_share = nml['share']\n self._update_entry(nml_share, 'subgrid_ratio_x', self.subgrid_ratio[0])\n self._update_entry(nml_share, 'subgrid_ratio_y', self.subgrid_ratio[1])\n\n # prevent geogrid from re-processing the grid (HACK: note that all grids must be activated\n # before metgrid runs!)\n self._update_entry(nml_share, 'active_grid', not self.precomputed)\n\n nml_geogrid = nml['geogrid']\n self._update_entry(nml_geogrid, 'geog_data_res', self.geog_res)\n self._update_entry(nml_geogrid, 'parent_id', self.parent_id)\n self._update_entry(nml_geogrid, 'parent_grid_ratio', self.parent_cell_size_ratio)\n self._update_entry(nml_geogrid, 'i_parent_start', self.parent_start[0])\n self._update_entry(nml_geogrid, 'j_parent_start', self.parent_start[1])\n self._update_entry(nml_geogrid, 's_we', 1)\n self._update_entry(nml_geogrid, 's_sn', 1)\n self._update_entry(nml_geogrid, 'e_we', self.domain_size[0])\n self._update_entry(nml_geogrid, 'e_sn', self.domain_size[1])\n\n # only for top-level domains\n if self.dom_id == self.parent_id:\n self._update_entry(nml_geogrid, 'dx', self.cell_size[0])\n self._update_entry(nml_geogrid, 'dy', self.cell_size[1])\n self._update_entry(nml_geogrid, 'map_proj', 'lambert')\n self._update_entry(nml_geogrid, 'ref_lat', self.ref_latlon[0])\n self._update_entry(nml_geogrid, 'ref_lon', self.ref_latlon[1])\n self._update_entry(nml_geogrid, 'truelat1', self.truelats[0])\n self._update_entry(nml_geogrid, 'truelat2', self.truelats[1])\n self._update_entry(nml_geogrid, 'stand_lon', self.stand_lon)",
"def insert_mbifields(config,doctype,inst):\n\t\n\tdocname = get_docname_from_schema(doctype,config)\n\tfieldtext = '<table width=\"100%\" bgcolor=\"#99CC00\" align=\"center\" cellspacing=\"2\" cellpadding=\"2\" border=\"1\"><tr><td align=\"left\"><br /><b>Modify a docname bibliographic information:</b><br /><br /><span style=\"color: red;\">*</span>Reference Number: '\n\tfieldtext = fieldtext.replace(\"docname\",docname)\n\tfieldlevel = \"O\"\n\taction = \"MBI\"\n\tpagenum = \"1\"\n\tfieldname = \"rn\"\n\tfieldshortdesc = fieldname.replace(\"hgf_\",\"\")\n\tfieldcheck = \"\"\n\tinsert_field_onto_submissionpage(doctype, action, pagenum, fieldname, fieldtext, fieldlevel, fieldshortdesc, fieldcheck) #insert into sbmFIELD\t\n\t\n\t## hgf_change\n\tselect_box = '<select visibility=\"hidden\" name=\"mod_%s[]\" size=\"1\"><option value=\"Select:\">Please click continue:</option></select>' %doctype #fake selectbox\n\telname = \"mod_\" + doctype\n\telmarccode = \"\"\n\teltype = \"S\"\n\telsize = \"\"\n\telrows = \"\"\n\telcols = \"\"\n\telmaxlength = \"\"\n\telval = \"\"\n\telfidesc = select_box # select box for modification form\n\telmodifytext = \"\"\n\tinsert_element_details(elname, elmarccode, eltype, elsize, elrows, elcols, elmaxlength, elval, elfidesc, elmodifytext) # inserrt into sbmFIELDDESCR\n\t\n\t\n\tfieldtext = ''\n\tfieldlevel = \"O\"\n\taction = \"MBI\"\n\tpagenum = \"1\"\n\tfieldname = elname\n\tfieldshortdesc = elname.replace(\"hgf_\",\"\")\n\tfieldcheck = \"\"\n\tinsert_field_onto_submissionpage(doctype, action, pagenum, fieldname, fieldtext, fieldlevel, fieldshortdesc, fieldcheck) #insert into sbmFIELD\t\n\t\n\t#mbi_end \n\tfieldtext = '<br /><br /></td></tr></table><br />'\n\tfieldlevel = \"O\"\n\taction = \"MBI\"\n\tpagenum = \"1\"\n\tfieldname = \"mbi_end\"\n\tfieldshortdesc = fieldname.replace(\"hgf_\",\"\")\n\tfieldcheck = \"\"\n\tinsert_field_onto_submissionpage(doctype, action, pagenum, fieldname, fieldtext, fieldlevel, fieldshortdesc, fieldcheck) #insert into sbmFIELD\t",
"def _update_subgrid(grid_path, grid_res, grid_id_name='GRIDMET_ID', \n grid_meta_path=None):\n\n if not os.path.isfile(grid_path):\n raise FileNotFoundError('The file path for the grid fishnet '\\\n +'was invalid or does not exist. ')\n\n # for building from user's grid (not gridMET)\n if grid_meta_path is not None:\n if not Path(grid_meta_path).is_file():\n raise FileNotFoundError('ERROR: Grid metadata file not found')\n # otherwise assume gridMET data\n else:\n # look for pacakged gridmet_cell_data.csv if path not given\n grid_meta_path = get_gridmet_meta_csv(\n gridmet_meta_path=grid_meta_path)\n\n tmp_out = grid_path.replace('.shp', '_tmp.shp')\n\n # load gridMET metadata file for looking up gridMET IDs\n grid_meta_df = pd.read_csv(grid_meta_path)\n # WGS 84 projection\n crs = from_epsg(4326) \n\n # overwrite fishnet grid with updated GRIDMET_ID field\n with fiona.open(grid_path, 'r') as source:\n print(\n 'Adding grid IDs ({}) to fishnet grid, saving to: \\n'.format(\n grid_id_name),\n os.path.abspath(grid_path), '\\n'\n )\n \n n_cells = len([f for f in source])\n print(\n 'Looking up and assigning values for ', n_cells, \n ' gridcells.\\n'\n ) \n \n # Copy the source schema and add GRIDMET_ID property.\n sink_schema = source.schema\n sink_schema['properties'][grid_id_name] = 'int'\n # overwrite file add spatial reference\n with fiona.open(\n tmp_out, \n 'w', \n crs=crs, \n driver=source.driver, \n schema=sink_schema\n ) as sink:\n # add GRIDMET_ID feature to outfile\n for feature in source:\n coords = feature['geometry']['coordinates'][0]\n grid_id = get_cell_ID(\n coords, grid_meta_df, grid_id_name, grid_res\n )\n feature['properties'][grid_id_name] = grid_id\n sink.write(feature)\n # cannot open same file and write to it on Windows, overwrite temp\n root_dir = os.path.split(grid_path)[0]\n for f in os.listdir(root_dir):\n if '_tmp' in f:\n move(OPJ(root_dir, f), OPJ(root_dir, f.replace('_tmp', '')))\n print(\n 'Completed assigning grid IDs to fishnet. \\n'\n )",
"def update_sites(self, sites):\n\n self.labels = {}\n\n with open(sites) as f:\n for line in f:\n (website, label) = line.split()\n self.labels[website] = label\n\n self.sites = list(self.labels.keys())",
"def update_h5store(fileh):\n logger.debug(\"Running update_h5store\")\n root = fileh.root\n\n version = root._v_attrs['version'] if 'version' in root._v_attrs else 0\n \n if version < 1:\n # No version, or new file\n # Ensure that the major nodes exist\n logger.debug('updating to version 1')\n for node in ['spaces', 'datasets', 'tasksets', 'results']:\n if not hasattr(root, node):\n fileh.createGroup( root, node )\n # Check that the dataset nodes are well-formed\n for dsnode in root.datasets:\n if not hasattr(dsnode, 'tokenstreams'):\n logger.debug('Node %s did not have tokenstreams node; adding.', dsnode._v_name)\n fileh.createGroup( dsnode, \"tokenstreams\" )\n if not hasattr(dsnode, 'sequence'):\n logger.debug('Node %s did not have sequence node; adding.', dsnode._v_name)\n fileh.createGroup( dsnode, \"sequence\" )\n if version < 2:\n # In version 2, we introduce the concept of instance spaces, detaching the instance\n # identifiers from the dataset nodes and instead attaching them to the space nodes\n logger.debug('updating to version 2')\n for dsnode in root.datasets:\n # Move the instance id node to spaces\n id_node = dsnode.instance_id\n id_node._v_attrs['size'] = len(dsnode.instance_id)\n id_node._v_attrs['type'] = 'instance'\n id_node._v_attrs['name'] = dsnode._v_name\n id_node._v_attrs['encoding'] = 'utf8' # to be safe, in case we had e.g. utf8 filenames\n fileh.moveNode(dsnode.instance_id, root.spaces, dsnode._v_name)\n # Unless otherwise specified, the instance space is the dataset name\n dsnode._v_attrs['instance_space'] = dsnode._v_name\n\n # Add the instance space metadata to all tasksets\n for tsnode in root.tasksets:\n tsnode._v_attrs.instance_space = tsnode._v_attrs.dataset\n for t in tsnode:\n t._v_attrs.instance_space = t._v_attrs.dataset\n \n # Add the instance space metadata to all results\n for rnode in root.results:\n rnode._v_attrs.instance_space = rnode._v_attrs.dataset\n if hasattr(rnode._v_attrs, 'eval_dataset'):\n rnode._v_attrs.eval_space = rnode._v_attrs.eval_dataset\n for node in rnode:\n if node._v_name == 'summary':\n for summary in node:\n summary._v_attrs.instance_space = summary._v_attrs.dataset\n if hasattr(summary._v_attrs, 'eval_dataset'):\n summary._v_attrs.eval_space = summary._v_attrs.eval_dataset\n else:\n node._v_attrs.instance_space = node._v_attrs.dataset\n if hasattr(node._v_attrs, 'eval_dataset'):\n node._v_attrs.eval_space = node._v_attrs.eval_dataset\n if version < 3:\n # In version 3, we add weights associated with task nodes\n for tsnode in root.tasksets:\n for t in tsnode:\n fileh.createGroup(t, 'weights')\n if version < 4:\n # In version 4, we introduced a node to store splits in datasets\n for dsnode in root.datasets:\n if not hasattr(dsnode, 'split'):\n logger.debug('Node %s did not have split node; adding.', dsnode._v_name)\n fileh.createGroup( dsnode, \"split\" )\n # TODO:\n # Replace all boolean maps for tasks with their equivalent flatnonzero indices\n # Eliminate UUID from taskset and result metadata\n # Get rid of all date attrs\n # Ensure all TSR nodes have a summary node\n \n\n logger.debug(\"updated store from version %d to %d\", version, STORE_VERSION)\n root._v_attrs['version'] = STORE_VERSION\n fileh.flush()",
"def updateMetaAtom (self):\r\n # print (\"Old state DNS: \\n\")\r\n # self.stateDanglingNodes()\r\n synchList = []\r\n synchListState = []\r\n for i in range(len(self.mol)):\r\n for j in range(len(self.mol[i].nodeArray)):\r\n synchList.append(self.mol[i].nodeArray[j])\r\n synchListState.append(synchList[i].state)\r\n #print (\"The original state is: \\n\" + str(synchListState) + \"\\n\")\r\n # Find new state for every node\r\n newStates = []\r\n for i in range(len(synchList)):\r\n oldState = synchList[i].state\r\n synchList[i].calculateState()\r\n newStates.append(synchList[i].state)\r\n synchList[i].state = oldState\r\n \r\n for i in range(len(synchList)):\r\n synchList[i].state = newStates[i]\r\n synchListState[i] = synchList[i].state\r\n \r\n offSet = 0 \r\n for i in range(len(self.mol)):\r\n for j in range(len(self.mol[i].nodeArray)):\r\n self.mol[i].nodeArray[j].state = synchListState[offSet]\r\n offSet += 1\r\n stateMol = []\r\n \r\n for i in range(len(self.mol)):\r\n for j in range(len(self.mol[i].nodeArray)):\r\n stateMol.append(self.mol[i].nodeArray[j].state)\r\n \r\n # print (\"The new state is: \\n\" + str(synchListState) + \"\\n\")\r\n # print (\"The state of the mol array is: \" + str(stateMol) + \"\\n\")\r\n #print (\"Post update \\n\")\r\n self.stateDanglingNodes()\r\n offSet = 0 \r\n oldStateNodes = [] # Store the old state of nodes in molecule\r\n newStateNodes = [] # Stores the new state\r\n # The code below goes through each metaspike and ensures that the dangling nodes have been updated with the correct\r\n # new state\r\n for i in range(len(self.metaSpikes)):\r\n if self.metaSpikes[i].typeSpike == 1:\r\n #print (\"Inside type 1 \\n\")\r\n #print (\"The number of DNs is: \" + str(len(self.metaSpikes[i].danglingNodeList)) + \"\\n\")\r\n for j in range(len(self.metaSpikes[i].danglingNodeList)):\r\n # Find the location of the dangling node in the synch list and change the dangling nodes state to match\r\n # state locted in the synch list\r\n if self.metaSpikes[i].danglingNodeList[j].node in synchList:\r\n oldStateNodes.append(self.metaSpikes[i].danglingNodeList[j].node.state)\r\n indexNode = synchList.index(self.metaSpikes[i].danglingNodeList[j].node)\r\n # print (\"The current value is: \" + str(self.metaSpikes[i].danglingNodeList[j].node.state) + \"\\n\")\r\n # print (\"The index of node is: \" + str(indexNode) + \"\\n\")\r\n # print (\"The new value should be: \" + str(synchListState[indexNode]) + \"\\n\")\r\n self.metaSpikes[i].danglingNodeList[j].changeState(synchListState[indexNode]) \r\n newStateNodes.append(self.metaSpikes[i].danglingNodeList[j].node.state)\r\n # print (\"Node in list \\n\")\r\n else:\r\n # print (\"The number of DTs is: \" + str(len(self.metaSpikes[i].danglingTailList)) + \"\\n\")\r\n #print (\"Inside type 2 \\n\")\r\n # With dangling tails we need an extra for loop to iterate across each nodelist of the tail\r\n for j in range(len(self.metaSpikes[i].danglingTailList)):\r\n for k in range(len(self.metaSpikes[i].danglingTailList[j].nodeList)):\r\n if self.metaSpikes[i].danglingTailList[j].nodeList[k].node in synchList:\r\n oldStateNodes.append(self.metaSpikes[i].danglingTailList[j].nodeList[k].state)\r\n indexNode = synchList.index(self.metaSpikes[i].danglingTailList[j].nodeList[k].node)\r\n self.metaSpikes[i].danglingTailList[j].nodeList[k].changeState(synchListState[indexNode]) \r\n newStateNodes.append(self.metaSpikes[i].danglingTailList[j].nodeList[k].state)\r\n #print (\"Node in list \\n\")\r\n \r\n # print (\"After running update code \\n\")\r\n self.stateDanglingNodes()\r\n # Recalculate the state of the metaatom\r\n self.calculateState()\r\n #print (\"The old state is:\\n\" + str(oldStateNodes) + \"\\n\")\r\n #print (\"The new state is:\\n\" + str(newStateNodes) + \"\\n\")\r\n \r\n \r\n # Next need to give each node in mol its state \r\n \r\n #print (\"Intensity before update: \" + str(self.metaSpikes[i].intensity) + \"\\n\")\r\n #print (\"Intensity after update: \" + str(self.metaSpikes[i].intensity) + \"\\n\")\r\n # Now need to recalculate state\r",
"def get_items(self):\n\n self.logger.info(\"Site-Descriptors Builder Started\")\n\n self.logger.info(\"Setting indexes\")\n\n # All relevant materials that have been updated since site-descriptors\n # were last calculated\n\n q = dict(self.mat_query)\n all_task_ids = list(self.materials.distinct(self.materials.key, q))\n q.update(self.materials.lu_filter(self.site_descriptors))\n new_task_ids = list(self.materials.distinct(self.materials.key, q))\n self.logger.info(\n \"Found {} entirely new materials for site-descriptors data\".format(\n len(new_task_ids)))\n for task_id in all_task_ids:\n if task_id in new_task_ids:\n any_piece = True\n\n else: # Any piece of info missing?\n data_present = self.site_descriptors.query(\n properties=[self.site_descriptors.key, \"site_descriptors\", \"statistics\"],\n criteria={self.site_descriptors.key: task_id}).limit(1)[0]\n any_piece = False\n for k, v in self.all_output_pieces.items():\n if k not in list(data_present.keys()):\n any_piece = True\n break\n else:\n any_piece = False\n for e in v:\n if e not in data_present[k]:\n any_piece = True\n break\n if not any_piece:\n for l in self.sds['csf'].feature_labels():\n for fpi in data_present['site_descriptors']['csf']:\n if l not in fpi.keys():\n any_piece = True\n break\n if any_piece:\n yield self.materials.query(\n properties=[self.materials.key, \"structure\"],\n criteria={self.materials.key: task_id}).limit(1)[0]",
"def analyse_mols(mols, target, specified_site=False, site_description=None):\n rd_mols = [Chem.MolFromMolBlock(x.sdf_info) for x in mols]\n if not specified_site:\n cluster_mols(rd_mols, mols, target)\n else:\n specifc_site(rd_mols, mols, target, site_description)\n\n get_vectors(mols)",
"def update_nodes(nodes, sc, organization, org_id, site_names):\n for node in nodes:\n print(\"=\" * 75)\n print(\"Node:\", node[\"id\"], node[\"serial\"], node[\"model\"])\n print(\"org:\", node[\"org\"], organization)\n print(\"site:\", node[\"site\"])\n print(\"location:\", node[\"location\"])\n\n site_id = node[\"site\"]\n site_name = site_names[site_id]\n print(\"\\nSetting location to '{}'\".format(site_name))\n node[\"location\"] = site_name\n result = sc.put(\"node/\" + node[\"id\"], data=node)\n print(\"updated location:\", result[\"location\"])\n print(\"Response:\", sc.response.status_code, sc.response.reason, \"\\n\")\n print()",
"def customer_group_put(group_info):\n related_groups = customer_group_get_related(group_info[\"group_id\"])\n\n now = datetime.datetime.now()\n f = '%Y-%m-%d %H:%M:%S'\n insert_time = now.strftime(f)\n\n result = {\"success\" : 1, \"message\" : \"Customer Company can not be Updated\"}\n\n for groups in related_groups:\n c_group_info = list(groups)\n #check for the roles\n c_g_id = c_group_info[0]\n c_g_role = c_group_info[1].split(\"(\")[1][:-1]\n c_g_name = c_group_info[1].split(\"(\")[0]\n new_c_g_name = group_info[\"group_name\"] + \"(\"+ c_g_role +\")\"\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n cursor = db.cursor()\n query = \"\"\"\n UPDATE `groups`\n SET\n `group_name` = \"%s\",\n `company_name` = \"%s\",\n `company_address` = \"%s\",\n `company_telephone` = \"%s\",\n `company_fax` = \"%s\",\n `company_website` = \"%s\",\n `company_sales_contact` = \"%s\",\n `company_purchase_contact` = \"%s\",\n `company_business` = \"%s\",\n `company_business_type` = \"%s\",\n `company_sales_email` = \"%s\",\n `company_purchase_email` = \"%s\",\n `company_reg_number` = \"%s\",\n `company_vat_number` = \"%s\",\n `description` = \"%s\"\n WHERE\n `group_id` = \"%s\"\n \"\"\" %(\n new_c_g_name, \n group_info[\"company_name\"],\n group_info[\"company_address\"],\n group_info[\"company_telephone\"],\n group_info[\"company_fax\"],\n group_info[\"company_website\"],\n group_info[\"company_sales_contact\"],\n group_info[\"company_purchase_contact\"],\n group_info[\"company_business\"],\n group_info[\"company_business_type\"],\n group_info[\"company_sales_email\"],\n group_info[\"company_purchase_email\"],\n group_info[\"company_reg_number\"],\n group_info[\"company_vat_number\"],\n group_info[\"description\"],\n c_g_id\n )\n try:\n if cursor.execute(query):\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer Company Updated Successfully\"}\n except Exception as e:\n result = {\"success\" : 1, \"message\" : \"Customer Company can not be Updated. Error \\\"\\'%s\\'\\\" \\\n Query = %s\" % (e, query) }\n finally:\n cursor.close()\n db.close()\n return result"
] | [
"0.55544835",
"0.506571",
"0.50160784",
"0.49909645",
"0.49212745",
"0.48970407",
"0.48649988",
"0.48467103",
"0.48318732",
"0.4804414",
"0.48017174",
"0.4796993",
"0.4781652",
"0.47556576",
"0.4721774",
"0.4715594",
"0.4668902",
"0.46504635",
"0.46460664",
"0.46218956",
"0.46214607",
"0.46067825",
"0.459977",
"0.45951283",
"0.45950514",
"0.4592162",
"0.45895",
"0.45833176",
"0.45788765",
"0.457319"
] | 0.7153863 | 0 |
Check if molecules belong to a cluster or a specific site for a given target | def analyse_mols(mols, target, specified_site=False, site_description=None):
rd_mols = [Chem.MolFromMolBlock(x.sdf_info) for x in mols]
if not specified_site:
cluster_mols(rd_mols, mols, target)
else:
specifc_site(rd_mols, mols, target, site_description)
get_vectors(mols) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has(self, target):\n return target in self.by_target",
"def has(self, target):\r\n return target in self.by_target",
"def slaves_found(self):\n return not (len(self.topology) and self.topology[0][1] == [])",
"def specifc_site(rd_mols, mols, target, site_description=None):\n\n # look for molgroup with same target and description\n mol_group = search_for_molgroup_by_description(target=target.title,\n description=site_description)\n\n if not mol_group:\n mol_group = MolGroup()\n\n mol_group.group_type = \"MC\"\n mol_group.target_id = target\n centre = calc_site_centre(rd_mols)\n mol_group.x_com = centre[0]\n mol_group.y_com = centre[1]\n mol_group.z_com = centre[2]\n mol_group.description = site_description\n mol_group.save()\n\n # A molecule tag record may exist already, but won't the first time the\n # target is loaded.\n\n try:\n mol_tag = MoleculeTag.objects.get(tag=site_description,\n target_id=target.id)\n except MoleculeTag.DoesNotExist:\n mol_tag = None\n\n if not mol_tag:\n # New site/tag or the tag has been deleted\n mol_tag = MoleculeTag()\n mol_tag.tag = site_description\n mol_tag.category = TagCategory.objects.get(category='Sites')\n mol_tag.target = target\n mol_tag.mol_group = mol_group\n mol_tag.save()\n else:\n # Tag already exists\n # Apart from the new mol_group and molecules, we shouldn't be\n # changing anything.\n mol_tag.mol_group = mol_group\n mol_tag.save()\n\n ids = [m.id for m in mols]\n print([a['id'] for a in mol_group.mol_id.values()])\n\n for mol_id in ids:\n if mol_id not in [a['id'] for a in mol_group.mol_id.values()]:\n logger.debug(\"mol_group mol_id=%s\", mol_id)\n this_mol = Molecule.objects.get(id=mol_id)\n mol_group.mol_id.add(this_mol)\n\n if mol_id not in [a['id'] for a in mol_tag.molecules.values()]:\n logger.debug(\"mol_tag mol_id=%s\", mol_id)\n this_mol = Molecule.objects.get(id=mol_id)\n mol_tag.molecules.add(this_mol)",
"def search_cluster_by_node(self, target):\n for i in range(len(self.result)):\n cluster = self.result[i]\n for node in cluster.get_nodes():\n if target == node:\n return i\n return None",
"def is_gentarget(self, target):\r\n raise NotImplementedError",
"def can_communicate_with(self, target):\n if self == target:\n return True\n msg = 'You try to connect topologies belonging to'\n msg += ' two different mpi tasks. Set taskids properly or use'\n msg += ' InterBridge.'\n assert self.task_id() == target.task_id(), msg\n\n # Parent communicator\n # Todo : define some proper conditions for compatibility\n # between topo_from, topo_to and parent:\n # - same size\n # - same domain\n # - common processus ...\n # At the time we check that both topo have\n # the same comm_origin.\n return self.is_consistent_with(target)",
"def check_components(self, data, _cluster, _linked_clusters):\n\n do_not_merge = []\n clustercoords = data[0:2,_cluster.cluster_members]\n _linked_clusters = [_link.antecessor for _link in _linked_clusters]\n\n if _cluster.number_of_members > 50:\n # This is faster for large numbers of cluster_members but slower when\n # number_of_members is small. A value of 50 is arbitrary but selected\n # empirically.\n for _link in _linked_clusters:\n linkcoords = data[0:2,_link.cluster_members]\n concatcoords = np.concatenate([linkcoords.T, clustercoords.T])\n concatcoords = concatcoords.T\n vals, idx, count = np.unique(concatcoords, return_index=True, return_counts=True, axis = 1)\n idx_vals_repeated = np.where(count > 1)[0]\n if np.size(idx_vals_repeated) > 0:\n do_not_merge.append(True)\n else:\n do_not_merge.append(False)\n\n else:\n for _link in _linked_clusters:\n boolval = []\n for j in range(_cluster.number_of_members):\n # Check all cluster components against those belonging to another cluster\n multiple_components = (data[0,_cluster.cluster_members[j]] == data[0,_link.cluster_members]) & \\\n (data[1,_cluster.cluster_members[j]] == data[1,_link.cluster_members])\n if np.any(multiple_components):\n boolval.append(True)\n else:\n boolval.append(False)\n if np.any(boolval):\n do_not_merge.append(True)\n else:\n do_not_merge.append(False)\n boolval = None\n\n return do_not_merge",
"def validate_target(target: str) -> bool:\n try:\n gethostbyname(target)\n except (gaierror, UnicodeError):\n return False\n return True",
"def cluster(self):\n assert False",
"def IsTarget(self, target_name):\n return target_name in self.GetTargets()",
"def contains(collection, target):\n\treturn target in collection",
"def can_prove(self, target):\n return self.prop == target.prop and set(self.hyps).issubset(set(target.hyps))",
"def isInCluster(self):\n logger.debug(\"Checking if %s is a part of cluster\" % self)\n role = self.getClusterRole()\n return role is not None and role != \"DISABLED\"",
"def on_cluster(cmds=[\"sbatch\"]):\n\n def cmd_exists(cmd):\n result = subprocess.call(\n \"type \" + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n return result == 0\n\n for cmd in cmds:\n if cmd_exists(\"sbatch\"):\n return True\n return False",
"def hassimilarcluster(ind, clusters):\n item = op.itemgetter\n global opt\n found = False\n tx = min(clusters[ind],key=item(0))[0]\n ty = min(clusters[ind],key=item(1))[1]\n for i, cl in enumerate(clusters):\n if i != ind:\n cx = min(cl,key=item(0))[0]\n cy = min(cl,key=item(1))[1]\n dx, dy = cx - tx, cy - ty\n specdist = Hausdorff_distance(clusters[ind],cl,None,(dx,dy))\n if specdist <= int(opt.rgsim):\n found = True\n break\n return found",
"def is_vm_in_cluster(cls, cluster_obj, vm_obj):\n match = False\n for clus_vm in cluster_obj.resourcePool.vm:\n if vm_obj == clus_vm:\n match = True\n\n return match",
"def is_cluster_leader(target, schema=None):\n try:\n return cluster_status(target, schema=schema).get('leader') == 'self'\n except subprocess.CalledProcessError:\n return False",
"def goal_check(current_node, target_node):\n if current_node.id == target_node.id:\n return True\n else:\n return False",
"def route_is_contained_in_other_route(route,target):\n id_route = 0\n id_target = 0\n found = True\n while found and id_route < len(route) and id_target < len(target):\n found = False\n while not found and id_target < len(target):\n if route[id_route] == target[id_target]:\n found = True\n else:\n id_target += 1\n id_route += 1\n return found",
"def is_consistent_with(self, target):\n same_parent = self.parent() == target.parent()\n # Note FP. Is it really required to have the\n # same parent? Inclusion of all proc may be enough?\n return npw.equal(self.shape, target.shape).all() and same_parent",
"def issubset(target, reference):\n return set(target).issubset(set(reference))",
"def test_is_remote_target(self):\n self.site.mode = SITE_MODE_SOURCE\n self.site.save()\n self.assertEqual(self.project.is_remote(), True)",
"def matches(self, target):\n raise NotImplementedError()",
"def check_neighbours(self):\n for p in self.targetCell.possibilities:\n if p != 0:\n if p not in self.targetCell.row_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.column_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.box_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n return False",
"def at_target(self):\n return self.location == self.target_location",
"def checkTargets(targets, strings, propagation, output):\n result = \"\"\n #Do not check an URL twice\n #Here, two different pages on the same target can be checked\n #This is because a page can be \"alone\" on a website\n targetViewed = set([])\n for url in targets:\n if url not in targetViewed:\n string, otherLinks, linksViewed = checkSite(url, strings, output)\n result += string\n result += \"\\n\"\n targetViewed = targetViewed | set([url])\n\n #If user want use propagation, add other links to the targets\n if propagation > 0:\n targets += list(otherLinks)\n propagation -= 1\n #Add all viewed links in targetViewed in order to do not check\n #twice the same URL\n targetViewed = targetViewed | linksViewed\n return result",
"def is_peered_with(self, other: SkupperSite) -> bool:\n if not self.cluster.peering:\n return False\n\n for c in self.cluster.peering.connections:\n if (\n isinstance(\n c,\n (\n ClusterPeeringConnectionClusterRequesterV1,\n ClusterPeeringConnectionClusterAccepterV1,\n ),\n )\n ) and c.cluster.name == other.cluster.name:\n return True\n return False",
"def is_target_in(self, newtarget):\n from .utils.shape import HAS_SHAPELY\n # Test if shapely\n if not HAS_SHAPELY:\n print(\"WARNING: could not test if the target is in the image since you do not have SHAPELY\")\n return True\n # Test if WCS \n if not self.has_wcs():\n print(\"WARNING: because there is no wcs solution, \"+\\\n \"I can't test the inclusion of the new astrotarget\")\n return True\n \n return self.wcs.coordsAreInImage(*newtarget.radec)",
"def matches(self, tgt_residence_dir: str) -> bool:"
] | [
"0.59099114",
"0.59013397",
"0.5811146",
"0.57644814",
"0.55539036",
"0.55014306",
"0.5476966",
"0.5355946",
"0.52600724",
"0.52568936",
"0.5252862",
"0.5233927",
"0.5230229",
"0.5219712",
"0.52128905",
"0.52125585",
"0.52089524",
"0.5197318",
"0.5186475",
"0.51463217",
"0.5112654",
"0.5087966",
"0.50844026",
"0.5082801",
"0.5077744",
"0.5069229",
"0.50587183",
"0.50550705",
"0.5041732",
"0.50119776"
] | 0.62854075 | 0 |
rename proteins based on the contents of alternatenames.csv (which has been created from metdata.csv | def rename_proteins(names_csv):
names_frame = pd.read_csv(names_csv)
for _, row in names_frame.iterrows():
mol_target = row['name']
alternate_name = row['alternate_name']
# Remove the replacement of '_0' - this was inconsistently applied as some folders are '_1'
# The Protein code will be modified to be of format 'xtal_directory:alternate_name'
new_name = str(mol_target).strip() + ':' + str(alternate_name).strip()
prots = Protein.objects.filter(code=mol_target)
for prot in prots:
logger.debug("Changing prot.code to '%s'", new_name)
prot.code = new_name
prot.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def final_rename(understat_no_similar, fpl_no_similar, join = 'inner'): \n name_mapper = {'Adrián':'Adrián Bernabé', # Contains both seasons corrections\n 'Alisson':'Alisson Ramses Becker',\n 'Allan':'Allan Marques Loureiro',\n 'André Gomes':'André Filipe Tavares Gomes',\n 'Angelino':'José Ángel Esmorís Tasende',\n 'Bernard':'Bernard Anício Caldeira Duarte', # Everton\n 'Bernardo Silva':'Bernardo Mota Veiga de Carvalho e Silva', # Manchester City\n 'Bernardo':'Bernardo Fernandes da Silva Junior', # \n 'Borja Bastón':'Borja González Tomás',\n 'Chicharito':'Javier Hernández Balcázar',\n 'David Luiz':'David Luiz Moreira Marinho', \n 'Ederson':'Ederson Santana de Moraes',\n 'Emerson':'Emerson Palmieri dos Santos',\n 'Fabinho':'Fabio Henrique Tavares',\n 'Felipe Anderson':'Felipe Anderson Pereira Gomes',\n 'Fred':'Frederico Rodrigues de Paula Santos', # Manchester United\n 'Hélder Costa': 'Hélder Wander Sousa de Azevedo e Costa', # Leeds\n 'Joelinton':'Joelinton Cássio Apolinário de Lira', # Chelsea\n 'Jonny':'Jonathan Castro Otto', # Wolves\n 'Jorginho':'Jorge Luiz Frello Filho', # Chelsea\n 'Jota':'José Ignacio Peleteiro Romallo',\n 'Kepa':'Kepa Arrizabalaga',\n 'Kiko Femenía':'Francisco Femenía Far',\n 'Lucas Moura':'Lucas Rodrigues Moura da Silva',\n 'Pedro': 'Pedro Rodríguez Ledesma', # Chelsea\n 'Raphinha':'Raphael Dias Belloli',\n 'Ricardo Pereira':'Ricardo Domingos Barbosa Pereira',\n 'Rodri':'Rodrigo Hernandez',\n 'Rúben Dias':'Rúben Santos Gato Alves Dias',\n 'Rúben Vinagre':'Rúben Gonçalo Silva Nascimento Vinagre',\n 'Semi Ajayi':'Oluwasemilogo Adesewo Ibidapo Ajayi',\n 'Trézéguet':'Mahmoud Ahmed Ibrahim Hassan', # Aston Villa\n 'Wesley':'Wesley Moraes',\n 'Willian':'Willian Borges Da Silva',\n }\n understat_no_similar['player_name'] = understat_no_similar['player_name'].map(name_mapper)\n manual_merge = pd.merge(fpl_no_similar, understat_no_similar, left_on=['player_name', 'kickoff_time'],\n right_on=['player_name', 'date'], how=join) # Merge using player name and date of game\n return manual_merge",
"def fix_name(row, index, name_map):\n # print(\"Input row: {}\".format(row))\n name = row[index].strip()\n # print(\"Name entry is {}\".format(name))\n if name.endswith(\" (yourself)\"):\n name = name[:-len(\" (yourself)\")]\n # print(\"Shortening to |{}|\".format(name))\n if name not in name_map:\n name_map[name] = name # Initially the identity transform\n row[index] = name_map[name]",
"def renamefile(filename):\n new_data_list = []\n with open(filename, 'r') as f:\n data_list = f.read().split('\\n')\n\n print('Generating new data list..')\n for data in tqdm(data_list):\n if len(data) == 0:\n continue\n data_info = data.split(' ')\n\n #data_info[0] = data_info[0].replace('jpg', 'png')\n #data_info[1] = data_info[1].replace('jpg', 'png')\n for it, name in enumerate(data_info):\n data_info[it] = '/'.join(name.split('/')[1:])\n if data_info[2].find('extras') == -1:\n new_data_list.append(' '.join(data_info))\n\n with open(filename, 'w') as f:\n print('writing new data names..')\n\n for it, data in tqdm(enumerate(new_data_list)):\n if len(data) == 0:\n continue\n\n if it == len(new_data_list)-1:\n f.write(data)\n else:\n f.write(data+'\\n')\n\n print('Done.')",
"def rename_name_gene(listOfFile, PATH_FASTA_RENAME) :\n\n\tprint \"\\n#################\"\n\tprint \"# Rename protein\"\n\tprint \"#################\\n\"\n\n\tcreate_folder(PATH_FASTA_RENAME)\n\n\tnew_listOfFile=[]\n\n\tfor my_file in listOfFile :\n\t\tif os.stat(my_file).st_size != 0 :\n\t\t\tnew_listOfFile.append(my_file)\n\n\tseq_to_rename = find_rename_fasta(new_listOfFile)\n\tdict_count = dict([(sequence[1:].rstrip(\" \"), 0) for sequence in seq_to_rename])\n\tprogression=1\n\tnumber_of_file = len(new_listOfFile)\n\n\tfor my_file in new_listOfFile :\n\n\t\tfile_name = os.path.basename(my_file)\n\n\t\tsys.stdout.write(\"{:.2f}% : {}/{} files renamed\\r\".format(progression/float(number_of_file)*100, progression,number_of_file))\n\t\tsys.stdout.flush()\n\t\tprogression += 1\n\n\t\thandle = open(os.path.join(PATH_FASTA_RENAME, file_name), 'w')\n\t\tfasta_reading = SeqIO.parse(my_file, \"fasta\")\n\n\t\tfor seq in fasta_reading :\n\t\t\tif seq.id in dict_count :\n\t\t\t\tif dict_count[seq.id] == 0 :\n\t\t\t\t\tdict_count[seq.id] += 1\n\t\t\t\telse :\n\t\t\t\t\tdict_count[seq.id] += 1\n\t\t\t\t\tif \"NC_\" in seq.id :\n\t\t\t\t\t\t# NOTE New name : NC_XXXXXX[_numero de systeme si deux systemes trouvés][_Num(et le nombre de fois nom trouvé)]_nomSysteme_D_nomProteine\n\t\t\t\t\t\tseq.id = \"_\".join(seq.id.split(\"_\")[:2])+\"_Num\"+str(dict_count[seq.id])+\"_\"+\"_\".join(seq.id.split(\"_\")[2:])\n\n\t\t\t\t\telse :\n\t\t\t\t\t\t# NOTE New name : NNNN[_numero de systeme si deux systemes trouvés][_Num(et le nombre de fois nom trouvé)]_nomSysteme_V_nomProteine\n\t\t\t\t\t\tseq.id = seq.id.split(\"_\")[0]+\"_Num\"+str(dict_count[seq.id])+\"_\"+\"_\".join(seq.id.split(\"_\")[1:])\n\t\t\t\t\tseq.name = seq.id\n\t\t\t\t\tseq.description = \"\"\n\n\t\t\tSeqIO.write(seq, handle, \"fasta\")\n\n\t\thandle.close()\n\n\tprint\n\tprint \"Done!\"\n\treturn",
"def bulk_rename(current_path,casetype):\n\tclick.echo(current_path)\n\tfilenames = os.listdir(current_path) \n\n\tfor filename in filenames:\n\t\tif filename != 'file_organizer0.03.py':\n\t\t\tif casetype == 'lower':\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.lower())\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").lower())\n\t\t\telif casetype == 'upper':\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.upper())\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").upper())\n\t\t\t\t\n\t\t\telif casetype == 'title':\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.title)\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").title())\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.lower())\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").lower())\n\n\tclick.secho('Finished Renaming to {} case!!'.format(casetype),bg='blue',fg='white')",
"def changeFilenames(speciesfolder, species):\n\tfor filename in os.listdir(speciesfolder):\n\t\tif filename.startswith(\"generic\"):\n\t\t\tnewname = filename.replace(\"generic\", species)\n\t\t\tos.rename(os.path.join(speciesfolder, filename), os.path.join(speciesfolder, newname))",
"def rename(old, new):",
"def rename(old, new):",
"def re_name(name,new_name):\n\n try:\n os.rename(config_tools.full_dest+name,config_tools.full_dest+new_name)\n except OSError:\n print(f\"Не удалось переименовать {name}\")\n else:\n print(f\"{name} успешно переименновавано в {new_name}\")",
"def rename(oldname, newname):",
"def rename_file(fname):\n x,y = load_file(fname)\n date=y[0].split(\".\")\n if len(y[2])<20:\n title=y[2]\n else:\n title=y[2][0:20]\n title=title.replace(\" \",\"_\")\n \n new_name=\"{}{}{}{}.csv\".format(date[2],date[1],date[0],title)\n new_appendix=rename_appendix(y[10],new_name)\n os.rename(fname,new_name)\n replace_line(new_name,10,'Anhang;\"{}\"'.format(new_appendix))\n return new_name",
"def rename_social(file_paths):\n # Set up a counter\n file_counter = 0\n # make a list of failed files\n failed_paths = []\n # for each file path\n for file in file_paths:\n # check if the file is there\n if not os.path.isfile(file):\n failed_paths.append('_'.join(('old', file)))\n continue\n\n # Parse the old file name to check if it is a social experiment or not\n animalnameRegex = re.compile(r'[A-Z][A-Z]_\\d\\d\\d\\d\\d\\d_[a-z]')\n animals = animalnameRegex.findall(file)\n\n if len(animals) > 1:\n # This is a social prey capture test\n first_animal = animals[0]\n # Split the file\n parts = file.split(first_animal)\n # Check to see if previously modified, otherwise make the modification\n if \"social\" in file:\n continue\n else:\n mod = 'social_' + first_animal\n new_path = \"\".join([parts[0], mod, parts[-1]])\n else:\n continue\n\n # check if the new path exists\n if os.path.isfile(new_path):\n failed_paths.append('_'.join(('new', new_path)))\n continue\n # change the file_name\n os.rename(file, new_path)\n # update the counter\n file_counter += 1\n\n print(\"_\".join((\"Total original files: \", str(len(file_paths)), \"Successfully renamed files: \", str(file_counter))))\n return failed_paths",
"def __rename_images(self):\n for idx, image in enumerate(self._values):\n image.partname = '/ppt/media/image%d%s' % (idx+1, image.ext)",
"def renameSample(old_name, new_name):\n def switch(val):\n return new_name.join(val.split(old_name))\n\n with Repo.loadRepo() as repo:\n sample = repo.db.sampleTable.get(old_name)\n print(f'{old_name} -> {new_name}', file=sys.stderr)\n sample.rename(new_name)\n old_file_paths = []\n for result in sample.results():\n new_result_name = switch(result.name)\n result.rename(new_result_name)\n for _, filerec in result.files():\n new_file_name = switch(filerec.name)\n filerec.rename(new_file_name)\n old_file_paths.append(filerec.filepath())\n new_file_path = switch(filerec.filepath())\n filerec.copy(new_file_path)\n filerec.save(modify=True)\n result.save(modify=True)\n sample.save(modify=True)\n\n for old_path in old_file_paths:\n print(old_path)",
"def merge_nonjunk_into_new_name(self, event=None):\n # Delete all original names\n aid_list = self.all_aid_list\n aid_list_filtered = ut.filterfalse_items(\n aid_list, self.ibs.get_annot_isjunk(aid_list)\n )\n # Rename annotations\n self.ibs.set_annot_names_to_same_new_name(aid_list_filtered)\n self.update_callback()\n self.backend_callback()\n self.show_page()",
"def _change_name(self, suff, info_extra):\n if 'cable-ring' in self.path:\n i1 = info_extra['convex_hull_area']\n i2 = info_extra['best_possible_area']\n f = i1 / i2\n suff = suff.replace('.png',\n f'-area-{i1:0.3f}-best-{i2:0.3f}-FRAC-{f:0.3f}.png')\n elif 'cloth-flat' in self.path:\n i1 = info_extra['cloth_coverage']\n suff = suff.replace('.png', f'-coverage-{i1:0.3f}.png')\n elif 'bag-alone' in self.path:\n i1 = info_extra['convex_hull_area']\n i2 = info_extra['best_possible_area']\n suff = suff.replace('.png', f'-area-{i1:0.3f}-best-{i2:0.3f}.png')\n else:\n pass\n return suff",
"def map_similar_names(similarity_matched_df, understat_not_matched, fpl_not_matched, season): \n if season == '2020-21' or '2021-22':\n wrongly_matched_names = ['Adrián', 'Alisson', 'Allan', 'André Gomes', 'Bernard', 'Bernardo', 'Bernardo Silva', 'David Luiz', 'Ederson', 'Emerson', \n 'Fabinho', 'Felipe Anderson', 'Fred', 'Hélder Costa', 'Joelinton', 'Jonny', 'Jorginho', 'Kepa', 'Lucas Moura', 'Raphinha', \n 'Ricardo Pereira', 'Rodri', 'Rúben Dias','Rúben Vinagre', 'Semi Ajayi', 'Trézéguet', 'Wesley', 'Willian'] \n if season == '2019-20':\n wrongly_matched_names = ['Adrián','Alisson','André Gomes','Angelino', 'Bernard', 'Bernardo', 'Bernardo Silva','Borja Bastón', \n 'Chicharito','David Luiz','Ederson', 'Emerson', 'Fabinho', 'Felipe Anderson', 'Fred','Joelinton', 'Jonny',\n 'Jorginho','Jota', 'Kepa','Kiko Femenía','Pedro', 'Ricardo Pereira', 'Rodri','Rúben Vinagre','Trézéguet','Wesley','Willian']\n\n \n \n similar_rename = similarity_matched_df[~similarity_matched_df['understat'].isin(wrongly_matched_names)] # Subset Similar: Similar match\n # no_similar_rename = similarity_matched_df[similarity_matched_df['understat'].isin(wrongly_matched_names)] # Subset Similar: Similar match\n # print(similar_rename.to_latex())\n # print(no_similar_rename.to_latex())\n understat_no_similar = understat_not_matched[understat_not_matched['player_name'].isin(wrongly_matched_names)] # Subset Understat: No similar match\n understat_similar = understat_not_matched[~understat_not_matched['player_name'].isin(wrongly_matched_names)] # Subset Understat: Similar match\n\n fpl_similar = fpl_not_matched[fpl_not_matched['player_name'].isin(similar_rename['fpl'].unique())] # Subset FPL: Similar match\n fpl_no_similar = fpl_not_matched[~fpl_not_matched['player_name'].isin(similar_rename['fpl'].unique())] # Subset FPL: No similar match\n \n name_mapper = dict(zip(similar_rename['understat'], similar_rename['fpl'])) \n understat_similar['player_name'] = understat_similar['player_name'].map(name_mapper) # Renames similarly matched names\n return understat_no_similar, understat_similar, fpl_similar, fpl_no_similar",
"def rename_meta(meta, mapper, ignore_batch_props):\n rename_properties(mapper)\n rename_lib_values(meta['lib']['values'], mapper)\n rename_masks(meta['masks'], mapper, keep_original)\n rename_columns(meta['columns'], mapper, keep_original)\n rename_sets(meta['sets'], mapper, keep_original)\n if 'batches' in meta['sets'] and not ignore_batch_props:\n rename_batch_properties(meta['sets']['batches'], mapper)\n if not keep_original:\n rename_set_items(meta['sets'], mapper)",
"def _rename(name,rename):\n for k in rename.keys():\n if k==name:\n name=rename[k]\n return name",
"def update2(snippet_original, filename, change):\n## THIS REPLACES, NOT APPENDS\n\tlogging.info(\"Reading {} from {}\".format(snippet_original, filename))\n\tlogging.debug(\"Opening file\")\n\twith open(filename, \"r+\") as f:\n\t\treader = csv.reader(f)\n\t\tlogging.debug(\"Reading name/snippet from file\")\n\t\tin_file = False\n\t\tmydict = {}\n\t\tfor row in reader:\n\t\t\tif snippet_original != str(row[1]):\n\t\t\t\tmydict.update({row[0]: row[1]})\n\t\t\telse:\n\t\t\t\tmydict.update({row[0]: change})\n\t\tprint mydict.keys()\n\twith open(filename, \"w\") as f:\n\t\twriter = csv.writer(f)\n\t\tfor key in mydict:\n\t\t\twriter.writerow([str(key),str(mydict[key])])\n\tlogging.debug(\"Read successful\")\n\treturn snippet_original, filename",
"def update_name(name, mapping):\n words_name = name.split(\" \")\n if words_name not in expected:\n for word in words_name:\n if word in mapping:\n name = name.replace(word, mapping[word])\n \n if word == word.lower():\n if word not in allowed_lowercase:\n name = name.replace(word, word.capitalize())\n \n if words_name[0] not in expected:\n if words_name[0] not in mapping:\n if words_name[0] == \"Fernando\":\n name = \"Avenida \" + name\n elif words_name[0] == \"rua\":\n pass\n else:\n name = \"Rua \" + name\n\n return name",
"def rename_file(source, oldname, newname):\n #source = client_variables.output_folder\n renamefiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for renamefile in renamefiles:\n if renamefile.endswith(ext):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)\n elif renamefile.startswith(oldname):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)",
"def playlist_rename(playlists):\n # Deal with old playlist names that permitted spaces\n a, b = \"\", playlists.split(\" \")\n while a not in g.userpl:\n a = (a + \" \" + (b.pop(0))).strip()\n if not b and a not in g.userpl:\n g.message = F('no pl match for rename')\n g.content = g.content or playlists_display()\n return\n\n b = \"-\".join(b)\n g.userpl[b] = Playlist(b)\n g.userpl[b].songs = list(g.userpl[a].songs)\n playlist_remove(a)\n g.message = F('pl renamed') % (a, b)\n save_to_file()",
"def change_to_video_name(csv_name, suffix):\n return csv_name[:-10]+\".\"+suffix",
"def rename_presets(self, preset_ids, new_ids, REQUEST=None):\r\n\r\n raise NotImplementedError",
"def changeFilenames(reduced, path, input):\n file = path + '/' + input\n data = open(file, 'r').readlines()\n os.remove(file)\n\n fh = open(file, 'w')\n for line in data:\n if 'includegraphics' in line:\n for key in reduced:\n if key in line:\n new = line.replace(key, reduced[key])\n fh.write(new)\n\n logging.debug('Changed {0:>s} to {1:>s} '.format(line, new))\n else:\n fh.write(line)\n\n fh.close()",
"def ChangeName(self, newName):\n if newName != \"\":\n newPath = self.format + os.sep + \"playlists\" + os.sep + newName + \".txt\"\n os.replace(self.path, newPath)\n self.path = newPath",
"def rewrite_names_map(names_path, name_map):\n with open(names_path, 'w', newline=\"\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([\"As entered\", \"Normalized\"])\n names = sorted(name_map.keys())\n for name in names:\n writer.writerow([ name, name_map[name] ])\n return",
"def rename_photcat(filt, origin='', revert=True):\n if revert == False:\n os.rename(origin+filt+\"_photcat.dat\", origin+filt+\\\n \"_photcat.store.dat\")\n \n if revert == True:\n os.rename(origin+filt+\"_photcat.store.dat\", origin+filt+\\\n \"_photcat.dat\")",
"def rename(ctx, input_file, output_file):\n ctx.ensure_object(dict)\n ctx.obj[\"reader\"] = PFBReader(input_file)\n ctx.obj[\"writer\"] = PFBWriter(output_file)"
] | [
"0.6262149",
"0.620358",
"0.59225214",
"0.58795476",
"0.5855679",
"0.5780732",
"0.57562983",
"0.57562983",
"0.5748717",
"0.56989646",
"0.56971043",
"0.56632984",
"0.5603336",
"0.5593263",
"0.55612004",
"0.5544447",
"0.55431473",
"0.5540265",
"0.5526973",
"0.55224794",
"0.55170316",
"0.55048776",
"0.54909384",
"0.54845345",
"0.5480306",
"0.54711956",
"0.54657984",
"0.5439303",
"0.53973377",
"0.5386406"
] | 0.80773854 | 0 |
Calculate a relative path from the file path to the media root | def relative_to_media_root(filepath, media_root=settings.MEDIA_ROOT):
relative_path = os.path.relpath(filepath, media_root)
return relative_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_media_path(self, filename):\n return join(settings.CMS_PAGE_MEDIA_PATH, \"%d\" % self.id, filename)",
"def media_path(self):\n return self._path",
"def get_relative_pathname(self):\n return os.path.join(Exam.EXAM_FILES_LOCATION,\n str(self.unique_id)[0:2],\n str(self.unique_id) + self.file_ext)",
"def file_path(self, request, response=None, info=None):\n url = request.url\n media_guid = hashlib.sha1(to_bytes(url)).hexdigest()\n media_ext = os.path.splitext(url)[1]\n if not media_ext.isalnum():\n media_ext = os.path.splitext(urlparse(url).path)[1]\n return \"full/%s%s\" % (media_guid, media_ext)",
"def get_relative_path(self, file_path):\n file_path = os.path.abspath(file_path)\n if self.base_dir is not None:\n file_path = file_path.replace(os.path.abspath(self.base_dir), \"\")\n assert file_path[0] == \"/\"\n file_path = file_path[1:]\n return file_path",
"def _GetRelPath(self, filename):\n assert filename.startswith(self.subdir), (filename, self.subdir)\n return filename[len(self.subdir):].lstrip(r\"\\/\")",
"def get_file_path(filename):\n if 'http' in filename:\n parsed_uri = urlparse(filename)\n f = '/' + parsed_uri.path[1:]\n f = '/'.join(f.split('/')[3:]) # split the xxx dir, remove the leading /\n else:\n filename = ('/' + filename) if filename[0] != '/' else filename # make sure starts with /\n # split local img path from path\n f = filename.replace(settings.FILE_PATH, '/')\n f = f.replace(settings.IMAGE_PATH, '/')\n f = f.replace(settings.DERIVED_PATH, '/')\n f = '/'.join(f.split('/')[2:]) # split the xxx dir, remove the leading /\n\n return f",
"def root_rel_path(self):\n return os.path.dirname(self.image.name)",
"def full_path(self):\n return os.path.join(settings.MEDIA_ROOT, self.path)",
"def compute_path(file: mesonlib.FileOrString) -> str:\n if isinstance(file, File):\n return file.absolute_path(self.source_dir, self.build_dir)\n return os.path.normpath(os.path.join(self.build_dir, file))",
"def _abs_path(rel_path):\n return os.path.join(BASE_DIR, rel_path)",
"def file_path(file_name, path):\n return path.rstrip('\\/') + \"/{0}\".format(file_name) if path else os.getcwd() + \"/{0}\".format(file_name)",
"def _fix_path(self, fil_path):\n\n return fil_path\n wav_folder = '/wav_sounds/'\n ex_path = os.path.realpath(__file__)\n ex_path = '/'.join(ex_path.split('/')[:-3]) + wav_folder + fil_path\n\n return ex_path",
"def get_absolute_pathname(self):\n return os.path.join(settings.PRIVATE_STORAGE_ROOT, self.get_relative_pathname())",
"def get_path_relative_to_http_root(file_path):\n return os.path.relpath(file_path, get_http_path_prefix())",
"def get_abs_path(file_path, relative_path):\n import os\n dir_path = os.path.dirname(file_path)\n abs_path = os.path.join(dir_path, relative_path)\n return abs_path",
"def relative_path(filename):\n length = len(os.path.abspath(DOC_BUILD_DIR)) + 1\n return os.path.abspath(filename)[length:]",
"def get_actual_path(self, path):\n if self._params.path_to_dir[-1] != '/':\n if path:\n path = self._params.path_to_dir + '/' + path\n path = path.replace('//', '/')\n return path",
"def upload_dir(self):\n return os.path.join(settings.MEDIA_ROOT,self.upload_dir_rel())",
"def relative(self, path):\n return re.sub(self.path_regex, '', path).lstrip(os.sep)",
"def path_media(self) -> Path:\n return self.path_supervisor / MEDIA_DATA",
"def absolute_folder_name(self):\n return 'music_decompose/media/{0}'.format(self.media_folder_name)",
"def path_to_related(self, path):\n # self.path = \"...functional/fixtures/img/logo.png\"\n # path = \"...functional/fixtures/docs/index.md\"\n current = self.dir\n\n while not path.startswith(current.dir.path):\n current = current.dir.parent.dir\n\n remaining = current.relative(self.path)\n\n level = current.relative(path).count(os.sep)\n\n way_back = os.sep.join(['..'] * level) or '.'\n result = \"{0}/{1}\".format(way_back, remaining)\n\n return result",
"def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'",
"def media_path_to_url(path):\n media_url = settings.MEDIA_URL\n if media_url.endswith('/'):\n media_url = media_url[:-1]\n return path.replace(settings.MEDIA_ROOT, media_url)",
"def to_file_path(self, resourcePath: str) -> PurePath:\n rel = resourcePath.replace('res://', '')\n return self._root.joinpath(rel)",
"def path(self) -> str:\n return self.src + \"/\"",
"def _get_relative_path(self, abs_path):\r\n relative_path = os.path.relpath(abs_path, settings.PROJECT_ROOT)\r\n return relative_path",
"def _GetRelPath(self, filename):\r\n absname = os.path.join(self.repo_dir, filename)\r\n return os.path.relpath(absname)",
"def file_path(self):\n return posixpath.dirname(self.file_name)"
] | [
"0.716648",
"0.7072237",
"0.7070012",
"0.70034564",
"0.6960707",
"0.69439507",
"0.694342",
"0.6929604",
"0.6818353",
"0.67295825",
"0.67267865",
"0.67257935",
"0.6722937",
"0.6715708",
"0.6683894",
"0.66819495",
"0.66751665",
"0.6658907",
"0.66423994",
"0.66352",
"0.6609155",
"0.65858406",
"0.6574621",
"0.655817",
"0.6547144",
"0.65374875",
"0.6525956",
"0.64937997",
"0.64914185",
"0.6483602"
] | 0.8047508 | 0 |
Analyse all the molecules for a particular target. | def analyse_target(target, aligned_path):
# Get Target from database
target.root_data_directory = relative_to_media_root(aligned_path)
target.save()
mols = list(Molecule.objects.filter(prot_id__target_id=target))
# This can probably be improved to count molecules as they are processed when the code is further refactored
mols_processed = len(mols)
logger.debug("Analysing '%s' molecules for '%s'", len(mols), target.title)
# Do site mapping
if os.path.isfile(os.path.join(aligned_path, 'metadata.csv')):
target.metadata.save(
os.path.basename(os.path.join(aligned_path, 'metadata.csv')),
File(open(os.path.join(aligned_path, 'metadata.csv')))
)
# remove any existing files so that we don't create a messy file when appending
if os.path.isfile(os.path.join(aligned_path, 'hits_ids.csv')):
os.remove(os.path.join(aligned_path, 'hits_ids.csv'))
if os.path.isfile(os.path.join(aligned_path, 'sites.csv')):
os.remove(os.path.join(aligned_path, 'sites.csv'))
if os.path.isfile(os.path.join(aligned_path, 'alternate_names.csv')):
os.remove(os.path.join(aligned_path, 'alternate_names.csv'))
new_frame = pd.read_csv(os.path.join(aligned_path, 'metadata.csv'))
new_frame.sort_values(by='site_name', inplace=True)
# one file for new names
with open(os.path.join(aligned_path, 'alternate_names.csv'), 'a', encoding='utf-8') as f:
f.write('name,alternate_name\n')
for _, row in new_frame.iterrows():
if isinstance(row['alternate_name'], str):
crystal_name = row['crystal_name']
# find the correct crystal
crystal = Protein.objects.filter(code__contains=crystal_name, target_id=target)
alternate_name = row['alternate_name']
# Only take first part of code
for crys in list(set([c.code.split(":")[0] for c in crystal])):
f.write(str(crys) + ',' + str(alternate_name) + '\n')
# hits and sites files
site_mapping = {}
unique_sites = list(set(list(new_frame['site_name'])))
for i in range(0, len(sorted(unique_sites))):
site_mapping[unique_sites[i]] = i
with open(os.path.join(aligned_path, 'hits_ids.csv'), 'a', encoding='utf-8') as f:
f.write('crystal_id,site_number\n')
for _, row in new_frame.iterrows():
crystal_name = row['crystal_name']
crystal = Protein.objects.filter(code__contains=crystal_name, target_id=target)
site = row['site_name']
s_id = site_mapping[site]
for crys in list(set([c.code for c in crystal])):
f.write(str(crys) + ',' + str(s_id) + '\n')
with open(os.path.join(aligned_path, 'sites.csv'), 'a', encoding='utf-8') as f:
f.write('site,id\n')
for key in site_mapping.keys():
f.write(str(key) + ',' + str(site_mapping[key]) + '\n')
if os.path.isfile(os.path.join(aligned_path, 'hits_ids.csv')) and os.path.isfile(
os.path.join(aligned_path, 'sites.csv')):
hits_sites = pd.read_csv(os.path.join(aligned_path, 'hits_ids.csv'))
sites = pd.read_csv(os.path.join(aligned_path, 'sites.csv'))
sites.sort_values(by='site', inplace=True)
# Delete the old mol_groups
# Note that this will not delete associated MoleculeTags - the tags
# will have their mol_group set to Null, but be left on the
# database so that any existing Tags will not be broken.
mol_groups = MolGroup.objects.filter(target_id=target)
for m in mol_groups:
m.delete()
for _, row in sites.iterrows():
description = row['site']
number = row['id']
logger.debug('Processing user input site: %s', description)
matches = []
for _, row in hits_sites.iterrows():
if str(row['site_number']) == str(number):
matches.append(row['crystal_id'])
logger.debug('HIT IDS: %s', matches)
if matches:
mols = list(Molecule.objects.filter(prot_id__target_id=target, prot_id__code__in=matches))
analyse_mols(mols=mols, target=target, specified_site=True, site_description=description)
if os.path.isfile(os.path.join(aligned_path, 'alternate_names.csv')):
rename_proteins(names_csv=os.path.join(aligned_path, 'alternate_names.csv'))
else:
analyse_mols(mols=mols, target=target)
# move anything that's not a directory in 'aligned' up a level
files = (f for f in os.listdir(aligned_path)
if os.path.isfile(os.path.join(aligned_path, f)))
for f in files:
shutil.move(os.path.join(aligned_path, f), os.path.join(aligned_path, f).replace('aligned', ''))
# delete NEW_DATA VISITS PROPOSALS. These are not used by the new loader but might be in
# old data sets.
to_delete = ['NEW_DATA', 'VISITS', 'PROPOSALS']
for file in to_delete:
filepath = os.path.join(aligned_path.replace('aligned', ''), file)
if os.path.isfile(filepath):
os.remove(filepath)
# last step - zip up the input file and move it to the archive
zipped = shutil.make_archive(aligned_path.replace('aligned', ''), 'zip', aligned_path.replace('aligned', ''))
target.zip_archive.name = relative_to_media_root(zipped)
target.save()
return mols_processed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def analyse_mols(mols, target, specified_site=False, site_description=None):\n rd_mols = [Chem.MolFromMolBlock(x.sdf_info) for x in mols]\n if not specified_site:\n cluster_mols(rd_mols, mols, target)\n else:\n specifc_site(rd_mols, mols, target, site_description)\n\n get_vectors(mols)",
"def analyze(self, options, target):\r\n\r\n target = 0\r\n\r\n upf = None\r\n\r\n dwnf = None\r\n\r\n if options.upfile is not None:\r\n\r\n upf = basepath + options.upfile + '.ma'\r\n\r\n if options.downfile is not None:\r\n\r\n dwnf = basepath + options.downfile + '.ma'\r\n\r\n\r\n\r\n for filename in (upf, dwnf):\r\n\r\n # if options.upfile is not None and options.downfile is not None:\r\n\r\n if filename is None:\r\n\r\n break\r\n\r\n im=[]\r\n\r\n self.imageData = []\r\n\r\n print (\"Loading data from %s\" % filename)\r\n\r\n try:\r\n\r\n im = MetaArray(file = filename, subset=(slice(0,2), slice(64,128), slice(64,128)))\r\n\r\n except:\r\n\r\n print(' Error loading upfile: %s' % filename)\r\n\r\n return\r\n\r\n print(' Data loaded')\r\n\r\n target = target + 1\r\n\r\n self.times = im.axisValues('Time').astype('float32')\r\n\r\n self.imageData = im.view(np.ndarray).astype('float32')\r\n\r\n im=[]\r\n\r\n self.analysis_fourier_map(period=self.period, target=target, bins=binsize,)\r\n\r\n if target > 0:\r\n\r\n self.plot_maps(mode = 1, target = target, gfilter = self.gfilter)",
"def analyze(self, event):\n electrons = self.inputCollection(event)\n muons = Collection(event, \"Muon\")\n triggerObjects = self.triggerObjectCollection(event)\n\n selectedElectrons = []\n unselectedElectrons = []\n \n weight_reco_nominal = 1.\n weight_reco_up = 1.\n weight_reco_down = 1.\n\n weight_id_nominal = 1.\n weight_id_up = 1.\n weight_id_down = 1.\n\n for electron in electrons:\n # https://twiki.cern.ch/twiki/bin/view/CMS/CutBasedElectronIdentificationRun2\n if electron.pt>self.electronMinPt \\\n and math.fabs(electron.eta)<self.electronMaxEta \\\n and self.electronID(electron)\\\n and self.triggerMatched(electron, triggerObjects):\n\n dxy = math.fabs(electron.dxy)\n dz = math.fabs(electron.dz)\n \n if math.fabs(electron.eta) < 1.479 and (dxy>0.05 or dz>0.10):\n unselectedElectrons.append(electron)\n continue\n elif dxy>0.10 or dz>0.20:\n unselectedElectrons.append(electron)\n continue\n\n #reject electron if close-by muon\n if len(muons)>0:\n mindr = min(map(lambda muon: deltaR(muon, electron), muons))\n if mindr < 0.05:\n unselectedElectrons.append(electron)\n continue\n\n selectedElectrons.append(electron)\n \n #TODO: electron reco/ID SFs\n \n \n else:\n unselectedElectrons.append(electron)\n\n \n if not Module.globalOptions[\"isData\"] and self.storeWeights:\n \n self.out.fillBranch(self.outputName+\"_weight_reco_nominal\", weight_reco_nominal)\n self.out.fillBranch(self.outputName+\"_weight_reco_up\", weight_reco_up)\n self.out.fillBranch(self.outputName+\"_weight_reco_down\", weight_reco_down)\n\n self.out.fillBranch(self.outputName+\"_weight_id_nominal\",weight_id_nominal)\n self.out.fillBranch(self.outputName+\"_weight_id_up\",weight_id_up)\n self.out.fillBranch(self.outputName+\"_weight_id_down\",weight_id_down)\n\n self.out.fillBranch(\"n\"+self.outputName,len(selectedElectrons))\n\n for variable in self.storeKinematics:\n self.out.fillBranch(self.outputName+\"_\"+variable,map(lambda electron: getattr(electron,variable), selectedElectrons))\n\n setattr(event,self.outputName,selectedElectrons)\n setattr(event,self.outputName+\"_unselected\",unselectedElectrons)\n\n return True",
"def analyse(self):\n pass",
"def update_infos_by_target_analyte(self, transfer_batches):\n for target, transfers in self.group_transfers_by_target_analyte(transfer_batches).items():\n # TODO: The `is_pooled` check is a quick-fix.\n if target.is_pool and self.dilution_settings.is_pooled:\n regular_transfers = [t for t in transfers if not t.source_location.artifact.is_control]\n source_vol_delta = list(set(t.source_vol_delta for t in regular_transfers\n if t.should_update_source_vol))\n # We assume the same delta for all samples in the pool:\n source_vol_delta = utils.single(source_vol_delta)\n # We also assume the same conc for all (or all None)\n target_conc = utils.single(list(set(t.target_conc for t in regular_transfers)))\n target_vol = utils.single(list(set(t.target_vol for t in regular_transfers)))\n yield target, [UpdateInfo(target_conc, target_vol, source_vol_delta)]\n else:\n yield target, [t.update_info for t in transfers]",
"def run_once(self):\n # Track some statistics about artifacts in a summary object.\n summary = collections.Counter()\n\n for source in self.sources:\n # Run the source to collect artifacts.\n self.logger.info(f\"Running source '{source}'\")\n try:\n # get the generator of onions\n onions = self.sources[source].run()\n except Exception as e:\n self.logger.error(e)\n self.logger.error(traceback.print_exc())\n continue\n\n # Process onions with each operator.\n for operator in self.operators:\n self.logger.info(f\"Processing found onions with operator '{operator}'\")\n try:\n self.operators[operator].process(onions)\n # Save the source onion with collected data\n except Exception as e:\n self.logger.error(e)\n self.logger.error(traceback.print_exc())\n continue\n\n\n\n# # Record stats and update the summary.\n# types = artifact_types(doc.get('interestingKeywords'))\n# summary.update(types)\n# for artifact_type in types:\n# self.logger.info(f'types[artifact_type]')\n\n # Log the summary.\n self.logger.info(f\"New artifacts: {dict(summary)}\")",
"def collectTargets(self, output):\n pass",
"def run_all(self):\n\n self.run_mash() ###Run MASH analysis\n self.filter_query() ###Filter fasta sequences out based on p value\n self.build_index(self.filtered_out_path) ###Build index for off-target analysis\n os.remove(self.filtered_out_path) ###Clean up intermediate fasta file\n self.format_gRNA(self.path1) ###Format everything in the right order\n self.run_OTF() ###Run off-target analysis\n self.output_parse() ###Parse output values and update table",
"def count_target(self):\n tally = {}\n for obj in self.target:\n tally[obj] = 0\n\n ind = 0\n for label in self.labelList:\n filename = self.pathLabel + label\n f = open(filename, 'r')\n content = f.read().split('\\n')\n for line in content:\n items = line.split(' ')\n if items[0] in self.target:\n tally[items[0]] += 1\n f.close()\n if ind % 100 == 0:\n print(f'[COUNT] {ind} of {len(self.labelList)} processed')\n ind += 1\n \n print('[COUNT] done counting targets in dataset')\n print(tally)",
"def si(target: QubitSetInput) -> Iterable[Instruction]:\n return [Instruction(Si(), target=qubit) for qubit in QubitSet(target)]",
"def findInteractions( targetGenes, geneTable ):\n pass",
"def process_target(self):\n assert isinstance(self.target, (list, tuple))\n return super().process_target()",
"def process_target(self):\n assert isinstance(self.target, (list, tuple))\n return super().process_target()",
"def analyzeAll(self, program: ghidra.program.model.listing.Program) -> None:\n ...",
"def h(target: QubitSetInput) -> Iterable[Instruction]:\n return [Instruction(H(), target=qubit) for qubit in QubitSet(target)]",
"def vi(target: QubitSetInput) -> Iterable[Instruction]:\n return [Instruction(Vi(), target=qubit) for qubit in QubitSet(target)]",
"def select_for_target(self, target):\n\n return [x for x in self.objects if x.target == target]",
"def execute(self, targets):",
"def analyse_results(self, \n target_dir, \n param_file = \"TRAINED_PARAMS_END.model\",\n w_norm_file = \"W_NORMS.dat\",\n num_to_test = 100,\n get_means_from = {\n 'NOISY' :'RECONST_NOISY_ERRORS.dat',\n 'MISSING':'RECONST_MISSING_ERRORS.dat'\n },\n given_inputs = []):\n \n path_to_json = os.path.join(target_dir, \"PARAMETERS.json\")\n \n with open(path_to_json, 'r') as json_file:\n \n param_dict = json.load(json_file)\n \n self.num_hidden = param_dict['GLOBAL']['num_hidden']\n \n num_runs = param_dict['GLOBAL']['num_runs']\n \n path_to_mix_params = os.path.join(target_dir, \"MIX_PARAMS.dat\")\n \n reg_dict = {}\n \n for f_name in param_dict.keys():\n \n if f_name != \"GLOBAL\":\n reg_dict[param_dict[f_name]['algorithm']] =\\\n param_dict[f_name]['regressor']\n \n self.mixture = False\n \n E_gaps = {}\n \n recon_errors = {}\n \n p_tilda_data = {}\n \n end_w_norms = {}\n \n for field in get_means_from.keys():\n \n recon_errors[field] = {}\n \n if os.path.exists(path_to_mix_params):\n \n self.mixture = True\n \n self.mix_params = np.loadtxt(path_to_mix_params)\n \n for run_ind in range(num_runs):\n \n sub_f1 = \"run%s\"%run_ind\n \n sub_dir = os.path.join(target_dir, sub_f1)\n \n if os.path.isdir(sub_dir): \n print(\"Processing %s\"%sub_dir)\n tr_file = \"TRAIN_IMAGES.dat\"\n check_input_file = os.path.join(sub_dir, tr_file)\n \n if os.path.exists(check_input_file):\n inputs = np.loadtxt(check_input_file)\n # to make fair comparison, number of is samples\n # is set to the number of test inputs\n self.num_samples = inputs.shape[0]\n else:\n \n if isinstance(given_inputs, np.ndarray):\n N = given_inputs.shape[0]\n inds = self.np_rand_gen.choice(N, \n num_to_test,\n replace = False)\n \n inputs = given_inputs[inds,:]\n \n self.num_samples = num_to_test\n \n else:\n print(\"Error: %s does not contain file %s\"\n %(sub_dir2, tr_file) +\" and given_inputs is []\")\n print(\"Script execution will terminate\") \n sys.exit()\n \n self.batch_size = inputs.shape[0]\n \n if self.mixture:\n self.set_mixture_means(inputs = inputs)\n \n is_samples, _ = self.is_sampler()\n \n for sub_f2 in os.listdir(sub_dir):\n \n sub_dir2 = os.path.join(sub_dir, sub_f2)\n \n if os.path.isdir(sub_dir2):\n \n if \"_\" in sub_f2:\n spl_str = sub_f2.split(\"_\")\n if len(spl_str) > 3:\n algorithm = spl_str[0]+\"_\"+spl_str[1]\n else:\n algorithm = spl_str[0]\n \n else:\n algorithm = sub_f2\n \n field_name =algorithm \n \n if reg_dict[algorithm] != None:\n if reg_dict[algorithm] in sub_f2:\n if \"_\" in reg_dict[algorithm]:\n reg_name_s = reg_dict[algorithm].split(\"_\")\n \n reg_name_s = reg_name_s[0][0].upper() +\\\n reg_name_s[1][0].upper()\n field_name +=\" %s\"%reg_name_s\n else:\n field_name +=\" %s\"%reg_dict[algorithm]\n \n get_val = sub_f2\n get_val = get_val.split(reg_dict[algorithm])[1]\n \n field_name +=\" %s\"%get_val\n \n if field_name not in E_gaps.keys():\n \n E_gaps[field_name] = []\n \n if field_name not in p_tilda_data.keys():\n \n p_tilda_data[field_name] = []\n \n if field_name not in end_w_norms.keys():\n \n end_w_norms[field_name] = []\n \n par_path = os.path.join(sub_dir2, param_file)\n \n w_norms_path = os.path.join(sub_dir2, w_norm_file)\n \n w_norms = np.loadtxt(w_norms_path)\n \n end_w_norms[field_name].append(w_norms[-1])\n \n if os.path.exists(par_path):\n \n css_diff, p_tilda_vals =\\\n self.compare_css_terms(x_inputs = inputs,\n x_samples = is_samples,\n full_path = par_path)\n \n E_gaps[field_name].append(css_diff)\n \n mean_val = np.mean(p_tilda_vals[0:self.batch_size])\n \n p_tilda_data[field_name].append(mean_val)\n \n else:\n \n print(\"Error: %s does not exist\"%par_path)\n sys.exit()\n \n for f_exp in get_means_from.keys():\n \n file_name = get_means_from[f_exp]\n \n check_err_file = os.path.join(sub_dir2, file_name)\n \n errors = np.loadtxt(check_err_file)\n \n mean_val = np.mean(errors)\n \n if field_name not in recon_errors[f_exp].keys():\n \n recon_errors[f_exp][field_name] = []\n \n recon_errors[f_exp][field_name].append(mean_val)\n \n return E_gaps, recon_errors, p_tilda_data, end_w_norms",
"def i(target: QubitSetInput) -> Iterable[Instruction]:\n return [Instruction(I(), target=qubit) for qubit in QubitSet(target)]",
"def present_target(target):\n print_heading(\"Analysing\")\n writer(target)",
"def get_targets(self):\n\t\n\t\tself.target = []\n\t\ttarget_ins = self.settings['target']\n\t\tfor key in target_ins.keys():\n\t\t\tif key == 'raw':\n\t\t\t\tself.target.append(target_ins[key])\n\t\t\telif key == 'textfile':\n\t\t\t\twith open(target_ins[key],'r') as fp: targs = fp.readlines()\n\t\t\t\tfor t in targs:\n\t\t\t\t\tif re.match('^[a-z,A-Z,_].+\\s*:\\s*[A-Z].+$',t):\n\t\t\t\t\t\tself.target.append(tuple([i.strip() for i in t.split(':')]))\n\t\t\telif key == 'textfile_rna':\n\t\t\t\twith open(target_ins[key],'r') as fp: targs = fp.readlines()\n\t\t\t\tfor t in targs:\n\t\t\t\t\tif re.match('^[a-z,A-Z,0-9,_].+\\s*:\\s*[A-Z,a-z].+$',t):\n\t\t\t\t\t\tself.target.append(list([i.strip() for i in t.split(':')]))\n\t\t\t\t\t\trnaseq = self.target[-1][1]\n\t\t\t\t\t\t#---extra substitutions for later\n\t\t\t\t\t\tif 'regex_subs' in self.settings.keys():\n\t\t\t\t\t\t\tfor regex in self.settings['regex_subs']:\n\t\t\t\t\t\t\t\trnaseq = re.sub(regex[0],regex[1],rnaseq)\n\t\t\t\t\t\trnaseq = rnaseq.upper()\n\t\t\t\t\t\trnaseq = re.sub('T','U',rnaseq)\n\t\t\t\t\t\taminoseq = ''.join([dna_mapping[i] for i in [rnaseq[i:i+3] \n\t\t\t\t\t\t\tfor i in range(0,len(rnaseq),3)]])\n\t\t\t\t\t\tself.target[-1][1] = re.sub('T','U',aminoseq)\n\t\t\t\t\t\tself.target[-1] = tuple(self.target[-1])\n\t\t\telse: raise Exception('except: unclear target type')",
"def list(self, request, *args, **kwargs):\n\n # get minions per target\n tid = self.request.QUERY_PARAMS.get(\"tid\", None)\n\n # get minions per target\n qtid = self.request.QUERY_PARAMS.get(\"qtid\", False)\n\n if tid:\n try:\n target_obj = Target.objects.get(id=tid)\n except:\n return Response(dict(error=[\"target_doesnot_exists\"], data={}),\n status=status.HTTP_400_BAD_REQUEST)\n\n minions = [obj.minion for obj in target_obj\\\n .targetminions_set.all()]\n\n elif qtid:\n try:\n target_obj = Target.objects.get(is_quick_target=True,\n target_name=\"\",\n system_folder=None)\n except:\n return Response(dict(error=[\"qt_doesnot_exists\"], data={}),\n status=status.HTTP_400_BAD_REQUEST)\n minions = [obj.minion for obj in target_obj\\\n .targetminions_set.all()]\n\n else:\n # get total minion objects\n minions = Minion.objects.all()\n\n # get minion connections statistics\n minion_conn_stats = self.minion_connection_stats(minions)\n # get minion key statistics\n minion_key_stats = self.minion_key_stats(minions)\n # get minion os statistics\n minion_os_stats = self.minion_os_stats(minions)\n # get minion version statistics\n minion_version_stats = self.minion_version_stats(minions)\n\n return Response(dict(data=dict(minion_conn_stats=minion_conn_stats,\n minion_key_stats=minion_key_stats,\n minion_os_stats=minion_os_stats,\n minion_version_stats=minion_version_stats,\n total_minions=len(minions)),\n error=[]), status=status.HTTP_200_OK)",
"def copy_all_possible_origins(cls, annotated: Chem.Mol, target: Chem.Mol) -> Tuple[List[Chem.Mol], List[List[int]]]:\n mcs = rdFMCS.FindMCS([target, annotated],\n atomCompare=rdFMCS.AtomCompare.CompareElements,\n bondCompare=rdFMCS.BondCompare.CompareAny,\n ringMatchesRingOnly=True)\n common = Chem.MolFromSmarts(mcs.smartsString)\n options = []\n originss = []\n for target_match in target.GetSubstructMatches(common):\n for anno_match in annotated.GetSubstructMatches(common):\n dmapping = dict(zip(target_match, anno_match))\n origins = []\n option = Chem.Mol(target)\n for i in range(option.GetNumAtoms()):\n if i in dmapping:\n atom = annotated.GetAtomWithIdx(dmapping[i])\n tatom = option.GetAtomWithIdx(i)\n o = cls._get_origin(atom)\n tatom.SetProp('_Origin', json.dumps(o))\n xyz = cls._get_xyz(atom)\n if xyz:\n cls._set_xyz(tatom, xyz)\n options.append(option)\n originss.append(origins)\n return options, originss",
"def target(self, receptors_to_target: list[str]) -> None:\n self._assert_receptor_input_is_valid(receptors_to_target)\n self._target = [\n r for r in self.observer.photoreceptors if r in receptors_to_target\n ]",
"def ti(target: QubitSetInput) -> Iterable[Instruction]:\n return [Instruction(Ti(), target=qubit) for qubit in QubitSet(target)]",
"def analyze(self):\n # turn off all indicator lights\n self._stop_all()\n \n # run, but catch exceptions and abort if necessary\n try:\n # setup\n self.analysis_led[1].blink\n ims_left = self.num_images\n fluid_left = True\n \n data_session = Data(self.data_path)\n \n # run motor & imaging\n while self.power.update() and ims_left > 0:\n # run pump\n self.motor.run(self.pump_runtime)\n \n if not self.power.update():\n break\n \n # image\n time.sleep(self.rest_time)\n self.cam_led.on\n self.camera.capture()\n data_session.fetch_data()\n self.cam_led.off\n \n # subtract from remaining images every cycle\n # if the fluid sensor turns off, set remaining\n # images to the maximum possible remaining\n ims_left -= 1\n if fluid_left and \\\n not self.fluid.update() and \\\n ims_left > self.samps_after_sensor_off:\n fluid_left = False\n ims_left = self.samps_after_sensor_off\n \n # change indicator lights, given complete or power off\n if ims_left == 0:\n # set analysis to green\n self.analysis_led[1].off\n self.analysis_led[0].on\n else:\n # set analysis to solid red\n self.analysis_led[1].on\n \n # transmit data whether or not power switched off\n self.data_led.blink\n data = data_session.prepare_broadcast()\n broadcast_session = Broadcast(self.peer_ip)\n broadcast_session.broadcast_data(data)\n self.data_led.off\n \n except:\n # turn on error indicator and turn off all else\n # do not transmit data\n self._stop_all()\n self.error.on",
"def build_all_analysis(self, matrix_handler, trajectory_handler):\n distance_matrix = matrix_handler.distance_matrix\n\n self.all_possible_analysis = {}\n\n # Pure queries\n self.all_possible_analysis[\"Details\"] = Analysis(\"Details\", self.analysis_function_details)\n self.all_possible_analysis[\"NumClusters\"] = Analysis(\"Number of clusters\", self.analysis_function_num_clusters)\n self.all_possible_analysis[\"NumClusteredElems\"] = Analysis(\"Number of clustered elements\", self.analysis_function_total_elements)\n self.all_possible_analysis[\"MeanClusterSize\"] = Analysis(\"Mean cluster size\", self.analysis_function_mean_cluster_size)\n self.all_possible_analysis[\"PercentInTop4\"] = Analysis(\"Percent in top 4 clusters\", self.analysis_function_top_4)\n self.all_possible_analysis[\"PercentInTop\"] = Analysis(\"Percent in top cluster\", self.analysis_function_top_percent)\n self.all_possible_analysis[\"ClustersTo90\"] = Analysis(\"Clusters to 90\", self.analysis_function_num_clusters_to_percent, 90)\n self.all_possible_analysis[\"NoiseLevel\"] = Analysis(\"Noise level\", self.analysis_function_noise_level, distance_matrix.row_length)\n\n # Evaluators\n self.all_possible_analysis[\"MirrorCohesion\"] = Analysis(\"MirrorCohesion\", self.evaluate_with_calculator,\n {\"class\":MirrorCohesionCalculator,\"matrix\":distance_matrix})\n\n self.all_possible_analysis[\"Cohesion\"] = Analysis(\"Cohesion\", self.evaluate_with_calculator,\n {\"class\":CohesionCalculator,\"matrix\":distance_matrix})\n\n self.all_possible_analysis[\"Separation\"] = Analysis(\"Separation\", self.evaluate_with_calculator,\n {\"class\":SeparationCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"MinimumMeanSeparation\"] = Analysis(\"MinimumMeanSeparation\", self.evaluate_with_calculator,\n {\"class\":MeanMinimumDistanceCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Silhouette\"] = Analysis(\"Silhouette\", self.evaluate_with_calculator,\n {\"class\":SilhouetteCoefficientCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Calinski-Harabasz\"] = Analysis(\"Calinski-Harabasz\", self.evaluate_with_calculator,\n {\"class\":CalinskiHarabaszCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Dunn\"] = Analysis(\"Dunn\", self.evaluate_with_calculator,\n {\"class\":DunnCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Davies-Bouldin\"] = Analysis(\"Davies-Bouldin\", self.evaluate_with_calculator,\n {\"class\":DaviesBouldinCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"GaussianSeparation\"] = Analysis(\"GaussianSeparation\", self.evaluate_with_calculator,\n {\"class\":GaussianSeparationCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Compactness\"] = Analysis(\"Compactness\", self.evaluate_with_calculator,\n {\"class\":CompactnessCalculator,\"matrix\":distance_matrix})\n\n # Cython\n self.all_possible_analysis[\"CythonMirrorCohesion\"] = Analysis(\"CythonMirrorCohesion\", self.evaluate_with_calculator,\n {\"class\":CythonMirrorCohesionCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"CythonMinimumMeanSeparation\"] = Analysis(\"CythonMinimumMeanSeparation\", self.evaluate_with_calculator,\n {\"class\":CythonMeanMinimumDistanceCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"CythonSilhouette\"] = Analysis(\"CythonSilhouette\", self.evaluate_with_calculator,\n {\"class\":CythonSilhouetteCoefficientCalculator,\"matrix\":distance_matrix})\n\n # Graph\n self.all_possible_analysis[\"RatioCut\"] = Analysis(\"RatioCut\", self.evaluate_with_calculator,\n {\"class\":RatioCut,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"NCut\"] = Analysis(\"NCut\", self.evaluate_with_calculator,\n {\"class\":NCut,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"NormNCut\"] = Analysis(\"NormNCut\", self.analysis_function_norm_n_cut,distance_matrix)\n self.all_possible_analysis[\"MinMaxCut\"] = Analysis(\"MinMaxCut\", self.evaluate_with_calculator,\n {\"class\":MinMaxCut,\"matrix\":distance_matrix})\n\n # Cython & Graph\n self.all_possible_analysis[\"CythonNormNCut\"] = Analysis(\"CythonNormNCut\", self.analysis_function_cython_norm_n_cut,distance_matrix)\n\n # PCA\n self.all_possible_analysis[\"PCAanalysis\"] = Analysis(\"PCAanalysis\", self.analysis_function_pca, trajectory_handler)",
"def getTargets():\n cams = CAMS\n for cam in cams:\n\n cam.resetTargets()\n cam.resetSizes()\n image = cam.getImage()\n print(\"Time before getting enemies: \" + str(time.time()))\n bloons, sizes = Vision_Processing.GetBalloon.getEnemies(image)\n print(\"Time after getting enemies: \" + str(time.time()))\n\n # print \"getTargets\"\n # print bloons\n\n points = []\n for bloon in bloons:\n points.append(np.array([bloon[0], bloon[1]]))\n for i in range(len(bloons)):\n cam.addTarget(points[i])\n cam.addSize(sizes[i])\n\n targets = triangulate()\n print(\"Time after triangulation: \" + str(time.time()))\n print \"getTargets\"\n print targets\n # targets = cartesianToSpheric(targets, place, orientation)\n return targets",
"def post_combine(self, target):\n target_extra_files = self.target_extra_files\n if target_extra_files:\n if self.disable_cleanup:\n self.stderr.write(\"Cleanup operations disabled by user.\\n\")\n else:\n self.stderr.write(\"Found extra files not part of source tree(s): \"\n f\"{len(target_extra_files)} files.\\n\")\n\n keep_existing = create_filtered_list(\"splunk\", default=False)\n # splglob_simple: Either full paths, or simple file-only match\n keep_existing.feedall(self.keep_existing, filter=splglob_simple)\n for dest_fn in target_extra_files:\n if keep_existing.match_path(dest_fn):\n self.stderr.write(f\"Keep existing file {dest_fn}\\n\")\n elif self.disable_cleanup:\n self.stderr.write(f\"Skip cleanup of unwanted file {dest_fn}\\n\")\n else:\n self.stderr.write(f\"Remove unwanted file {dest_fn}\\n\")\n os.unlink(os.path.join(target, dest_fn))"
] | [
"0.62864506",
"0.55273044",
"0.53461725",
"0.53085685",
"0.52310586",
"0.515455",
"0.51431394",
"0.51272136",
"0.50833684",
"0.505777",
"0.4997364",
"0.49457228",
"0.49457228",
"0.4943795",
"0.49298733",
"0.48836672",
"0.48811308",
"0.4876747",
"0.4860567",
"0.48581758",
"0.48576966",
"0.48441944",
"0.48389703",
"0.4828769",
"0.48271948",
"0.48212978",
"0.4820693",
"0.48099458",
"0.480758",
"0.4806715"
] | 0.65034586 | 0 |
Validate the metadata.csv file to check basic formatting is correct | def check_metadata(metadata_file, input_validate_dict):
validated = True
# Metedata.csv has the following columns:
# crystal_name: must not be spaces or null and should contain the RealCrystalName
# RealCrystalName: must not be spaces or null
# smiles: must not be null
# new_smiles: no specific validation
# alternate_name: no specific validation
# site_name: whole column should either be null or not null (no partial columns)
# pdb_entry: no specific validation
meta_dataframe = pd.read_csv(metadata_file)
# File level checks.
meta_sites = meta_dataframe['site_name']
if meta_sites.isnull().values.all() or meta_sites.notnull().values.all():
pass
else:
add_tset_warning(input_validate_dict, 'Metadata.csv',
'site_name column should either be completely filled or completely null', 0)
validated = False
meta_dataframe['crystal_name'] = meta_dataframe['crystal_name'].astype(str)
meta_dataframe['RealCrystalName'] = meta_dataframe['RealCrystalName'].astype(str)
meta_dataframe['smiles'] = meta_dataframe['smiles'].astype(str)
# Loop through metadata doing basic checks on each row
for idx, (_, row) in enumerate(meta_dataframe.iterrows()):
validated, input_validate_dict = check_meatadata_row(validated, input_validate_dict, row, idx)
return validated, input_validate_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_valid_csvformat(self, csv_path):\n with open(self.csv_path, \"rb+\") as file_obj:\n reader = csv.reader(file_obj, delimiter=',') # CSV DictReader object\n self.check_valid_csv_header(reader.next())\n self.check_valid_csv_data(reader.next())",
"def test_is_valid_manifest_format_with_csv(caplog):\n assert is_valid_manifest_format(\"tests/test_manifest.csv\") == True\n assert caplog.text == \"\"",
"def check_valid_csv_data(self, row):\n obj = re.match(re.compile('^[0-9]{4}\\,[A-Z]{1}[a-z]{2}\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Data String must be like `1990` `Jan` Check Sample file\")",
"def check_meatadata_row(validated, input_validate_dict, row, idx):\n\n if row['RealCrystalName'].isspace() or row['RealCrystalName'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'RealCrystalName spaces or null', idx + 2)\n validated = False\n if row['crystal_name'].isspace() or row['RealCrystalName'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Crystal name spaces or null', idx + 2)\n validated = False\n if row['RealCrystalName'] not in row['crystal_name']:\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Crystal name does not contain RealCrystalName', idx + 2)\n validated = False\n if row['smiles'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Smiles null', idx + 2)\n validated = False\n\n return validated, input_validate_dict",
"def validate_csv(filename, header, cols, rows):\n\n # open file\n data = pd.read_csv(filename, delimiter='|')\n\n # validate header\n assert header == '|'.join(list(data.columns.values))\n\n # validate column count\n assert data.shape[1] == cols\n\n # validate row count\n assert data.shape[0] == rows\n\n # return (header_result == column_result == row_result) is True",
"def validate_csv(filename: str) -> bool:\n # From: https://stackoverflow.com/questions/2984888/check-if-file-has-a-csv-format-with-python\n try:\n with open(filename, newline='') as csvfile:\n start = csvfile.read(4096)\n\n # isprintable does not allow newlines, printable does not allow umlauts...\n if not all([c in string.printable or c.isprintable() for c in start]):\n return False\n dialect = csv.Sniffer().sniff(start)\n return True\n except csv.Error:\n # Could not get a csv dialect -> probably not a csv.\n return False\n except UnicodeError:\n return False",
"def validate_file_contents(cube, metadata):\n _check_start_end_times(cube, metadata)\n _check_contiguity(cube, metadata)\n _check_data_point(cube, metadata)",
"def test_schema_invalid_format(self):\n bad_schema = [int, int, float, float, str]\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)",
"def validate_bed_format(row):\n assert len(row) >= 3, 'Bed Files must have at least 3 tab separated fields.'\n\n return True",
"def check_valid_csv_header(self, row):\n obj = re.match(re.compile('^Year\\,Month\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Headers must be `Year` `Month` Check Sample file\")",
"def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))",
"def validate_format(self):\n return all(\n [\n self.validate_header_keyword(),\n self.validate_type_keyword(),\n self.validate_type_annotations(),\n self.validate_unique_header(),\n self.validate_against_header_count(),\n ]\n )",
"def csv_parser_test():\n data = csv_parser(myspreadsheet)\n print 'Your data object:'\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(data) \n # Did your parser work?\n for row_num, row in enumerate(data):\n try:\n assert len(row) == 3\n except AssertionError:\n print \"Row %s seems to be misparsed; its length is %s\" % (row_num, len(row))\n # Check on one of the values:\n try:\n assert data[4][2] == 'Linguist'\n except AssertionError:\n print \"Error: data[4][2] should equal 'Linguist'; actual value is %s\" % data[4][2]\n # Did you remember your int conversions?\n try:\n assert isinstance(data[0][0], int)\n except AssertionError:\n print \"Error: data[0][0] should be an int\"\n # Did you remember your float conversions?\n try:\n assert isinstance(data[6][1], float)\n except AssertionError:\n print \"Error: data[6][1] should be a float\"",
"def __checkFile(self, filename):\n \n try:\n with open(filename, 'r') as f:\n first_line = f.readline()\n \n if not len(first_line.split(\"\\t\")) == 19:\n raise BadProteomeScoutFile(\"N/A\")\n \n \n except:\n BadProteomeScoutFile(\"Invalid ProteomeScout flat file %s.\\nFile is invalid or corrupted\" % str(filename))",
"def csv_parser_test():\r\n data = csv_parser(myspreadsheet)\r\n print('Your data object:')\r\n pp = pprint.PrettyPrinter(indent=4)\r\n pp.pprint(data) \r\n # Did your parser work?\r\n for row_num, row in enumerate(data):\r\n try:\r\n assert len(row) == 3\r\n except AssertionError:\r\n print ((\"Row %s seems to be misparsed; its length is %s\") % (row_num, len(row)))\r\n # Check on one of the values:\r\n try:\r\n assert data[4][2] == 'Linguist'\r\n except AssertionError:\r\n print ((\"Error: data[4][2] should equal 'Linguist'; actual value is %s\") % data[4][2])\r\n # Did you remember your int conversions?\r\n try:\r\n assert isinstance(data[0][0], int)\r\n except AssertionError:\r\n print (\"Error: data[0][0] should be an int\")\r\n # Did you remember your float conversions?\r\n try:\r\n assert isinstance(data[6][1], float)\r\n except AssertionError:\r\n print (\"Error: data[6][1] should be a float\")",
"def verify_metadata(df):\n # Check that mandatory column headings are present\n col_headings = df.columns.values\n requireds = ['Title', 'Authors', 'Categories', 'Item type', 'Keywords', 'Description', 'License', 'Data Sensitivity', 'RDR Project ID']\n result = all(elem in col_headings for elem in requireds)\n if not result:\n print('Error: You must supply all mandatory column headings')\n sys.exit()\n\n\n # Check that values exist for each of the mandatory fields\n for index, row in df.iterrows():\n if row['Title'] == '' or row['Title'] is None or row['Title'] is np.NaN:\n print(f\"Title is missing on row {index+1}\")\n sys.exit()\n if row['Authors'] == '' or row['Authors'] is None or row['Authors'] is np.NaN:\n print(f\"Authors is missing on row {index+1}\")\n sys.exit()\n if row['Categories'] == '' or row['Categories'] is None or row['Categories'] is np.NaN:\n print(f\"Categories is missing on row {index+1}\")\n sys.exit()\n if row['Item type'] == '' or row['Item type'] is None or row['Item type'] is np.NaN:\n print(f\"Item type is missing on row {index+1}\")\n sys.exit()\n if row['Keywords'] == '' or row['Keywords'] is None or row['Keywords'] is np.NaN:\n print(f\"Keywords is missing on row {index+1}\")\n sys.exit()\n if row['Description'] == '' or row['Description'] is None or row['Description'] is np.NAN:\n print(f\"Description is missing on row {index+1}\")\n sys.exit()\n if row['License'] == '' or row['License'] is None or row['License'] is np.NAN:\n print(f\"License is missing on row {index+1}\")\n sys.exit()\n if row['Data Sensitivity'] == '' or row['Data Sensitivity'] is None or row['Data Sensitivity'] is np.NAN:\n print(f\"Data Sensitivity is missing on row {index+1}\")\n sys.exit()\n if row['RDR Project ID'] == '' or row['RDR Project ID'] is None or row['RDR Project ID'] is np.NAN:\n print(f\"RDR Project ID is missing on row {index+1}\")\n sys.exit()",
"def test_invalid_header(self, tmpdir):\n path1 = tmpdir.join(\"invalid.csv\")\n path1.write(\"not,a,valid,header,row\")\n with pytest.raises(ValueError):\n parse_file(str(path1))\n\n path2 = tmpdir.join(\"valid.csv\")\n path2.write(\",\".join(HEADER_ROW))\n try:\n parse_file(str(path2))\n except ValueError:\n assert False, \"Unexpected ValueError\"",
"def test_validate_csv():\n duplicate_keys_file_path = os.path.join(\n TEST_DATA_DIR, \"clubs_invalid_duplicate_keys.csv\"\n )\n\n invalid_headers_file_path = os.path.join(\n TEST_DATA_DIR, \"membership_invalid_syntax.csv\"\n )\n\n # Test duplicate keys\n with open(duplicate_keys_file_path) as test_file:\n test_file = test_file.read()\n\n rows = list(csv.DictReader(StringIO(test_file)))\n\n with pytest.raises(ValidationFailed) as v_error:\n validate_csv(rows)\n\n validation_resp = v_error.value.errors[0]\n assert \"error\" in validation_resp\n duplicate_keys = validation_resp[\"detail\"]\n assert \"5\" in duplicate_keys\n assert \"2\" in duplicate_keys\n\n # Test invalid syntax\n with open(invalid_headers_file_path) as test_file:\n test_file = test_file.read()\n\n rows = list(csv.DictReader(StringIO(test_file)))\n with pytest.raises(ValidationFailed) as v_error:\n validate_csv(rows)\n\n validation_resp = v_error.value.errors[0]\n invalid_rows = [x[\"row\"] for x in validation_resp[\"detail\"]]\n assert \"error\" in validation_resp\n assert 3 in invalid_rows\n assert 4 in invalid_rows\n assert 5 in invalid_rows\n\n # Test unicode decode errors\n test_data = b\"\\xff\\xfe_\\x00k\\x00e\\x00y\\x00,\\x00n\\x00a\\x00m\\x00e\\x00\\n\"\n pytest.raises(DecodeFailed, decode_data, test_data)",
"def test_metadata(self):\n cr = CaseReader(self.filename)\n self.assertEqual(cr.format_version, format_version,\n msg='incorrect format version')\n self.assertIsNone(cr.parameters,\n msg='parameter metadata should be None')\n self.assertIsNone(cr.unknowns, msg='unknown metadata should be None')",
"def test_missing_columns(self):\n file = SimpleUploadedFile(\n \"test.csv\",\n b\"msisdn,messaging consent,edd year,edd month,baby dob year,\"\n b\"baby dob month,baby dob day\\n\",\n )\n form = MomConnectImportForm(\n data={\"source\": \"MomConnect Import\"}, files={\"file\": file}\n )\n self.assertTrue(form.is_valid())\n instance = form.save()\n self.assertEqual(instance.status, MomConnectImport.Status.ERROR)\n [error] = instance.errors.all()\n self.assertEqual(\n error.error, \"Fields edd_day facility_code id_type not found in header\"\n )",
"def test_schema_invalid_type(self):\n bad_schema = -77\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)",
"def test_is_entry_formatted(self):\n\n valid_formats = test_case_data.get('valid_formats')\n for i, valid_entry in enumerate(test_case_data.get('valid_entries')):\n entry = [value.strip() for value in valid_entry.split(',')]\n format_fields = valid_formats[i].split(',')\n valid = self.parser._is_entry_formatted(entry, format_fields)\n self.assertTrue(valid, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [value.strip() for value in invalid_entry.split(',')]\n for f in valid_formats:\n format_fields = f.split(',')\n entry_dict = self.parser._is_entry_formatted(entry, format_fields)\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')",
"def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data",
"def test_validate_file_extension_csv(self):\n data_contacts = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n data_contacts_false = open(self.test_dir + 'mannheim_short.json',\n encoding='utf-8')\n a = validate_file_extension_csv(data_contacts)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_file_extension_csv(data_contacts_false)\n data_contacts.close()\n data_contacts_false.close()\n self.assertTrue(\"Keine gültige CSV-Datei\" or \"No valid CSV file\" in\n str(context.exception))",
"def isFormatOk(self, row):\n try:\n date = datetime.strptime(row[0], \"%m/%d/%Y\").date()\n state = fix_text(row[1])\n impressions = int(row[2])\n if impressions < 0:\n raise ValueError\n CTR = float(row[3].replace(\"%\",\"\"))\n if CTR < 0 or CTR > 1:\n raise ValueError\n except ValueError as e:\n print(f\"Wrong format of provided data {row}\", file=sys.stderr)\n return False\n return Record(date=date, state=state, impressions=impressions, CTR=CTR)",
"def check_format_of_annotation_in_file(self):\n if not self.is_span_valid():\n sys.exit()",
"def check_header(self, entry):\n if entry not in self.metadata:\n raise SyntaxError(\"Header entry must be described in the metadata lines. Entry: %s is not in metadata.\" % entry)",
"def clean(self):\n if self.csv_file:\n #TODO: add category validation here\n self.forms = self.csv_file.get_form_repr()\n self._errors = [0]*self.total_form_count()\n return\n if any(self.errors):\n return",
"def _metadata_is_consistent(metadata):\n checks = []\n required = ('version', 'fields', 'size', 'width', 'height', 'points',\n 'viewpoint', 'data')\n for f in required:\n if f not in metadata:\n print('%s required' % f)\n checks.append((lambda m: all([k in m for k in required]),\n 'missing field'))\n checks.append((lambda m: len(m['type']) == len(m['count']) ==\n len(m['fields']),\n 'length of type, count and fields must be equal'))\n checks.append((lambda m: m['height'] > 0,\n 'height must be greater than 0'))\n checks.append((lambda m: m['width'] > 0,\n 'width must be greater than 0'))\n checks.append((lambda m: m['points'] > 0,\n 'points must be greater than 0'))\n checks.append((lambda m: m['data'].lower() in ('ascii', 'binary',\n 'binary_compressed'),\n 'unknown data type:'\n 'should be ascii/binary/binary_compressed'))\n ok = True\n for check, msg in checks:\n if not check(metadata):\n print('error:', msg)\n ok = False\n return ok",
"def __init__(self, message, file_handle, format):\n oh = open(file_handle, \"rU\")\n config.log.error(\"csv/tsv file did not pass the csv parser\")\n config.log.error(\"Message: %s\" % message)\n print(\"-----------------------\")\n print(\"CSV Diagnostic:\")\n if \"skiplines\" in format: # skip the lines.\n if format[\"skiplines\"] != -1:\n for n in range(format[\"skiplines\"]):\n oh.readline().rstrip(\"\\r\\n\")\n\n print(\"0:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"1:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"2:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"3:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"-----------------------\")\n print(\"Format Specifier: %s\" % (\" \".join([\"%s:%s\\t\" % (key, format[key]) for key in format])))\n print(\"Expected Format, based on the format specifier:\")\n oh.close()\n\n # This is a safe-ish version of loadCSV() that intelligently fails.\n\n if \"sniffer\" not in format:\n oh = open(file_handle, \"rU\")\n if \"dialect\" in format:\n reader = csv.reader(oh, dialect=format[\"dialect\"])\n else:\n reader = csv.reader(oh)\n\n try:\n if \"skiplines\" in format:\n skiplines = format[\"skiplines\"]\n else:\n skiplines = 0 # skip any header row by default.\n except:\n print(\"Error: End of File\") # premature end of file, skip out.\n print(\"-----------------------\")\n print(\"Error: %s\" % (message))\n return\n\n for index, column in enumerate(reader): # This is cryptically called column, when it is actually row.\n if index > skiplines:\n if column: # list is empty, so omit.\n if (not (column[0] in typical_headers)):\n d = {}\n for key in format:\n if not (key in ignorekeys): # ignore these tags\n try:\n if not key in d:\n d[key] = {}\n if isinstance(format[key], dict) and \"code\" in format[key]:\n # a code block insertion goes here - any valid lib and one line python code fragment\n # store it as a dict with the key \"code\"\n d[key] = eval(format[key][\"code\"]) # this always fails for some reason...\n else:\n d[key] = str(column[format[key]])\n except:\n d[key] = \"mangled\"\n print(\"%s\" % (\" \".join([\"%s:%s\" % (key, d[key]) for key in d])))\n if index > 3:\n break\n else:\n print(\" No specified format (glbase will guess)\")\n\n print(\"-----------------------\")\n config.log.error(\"End of error output\")"
] | [
"0.7094834",
"0.693815",
"0.68971455",
"0.68674195",
"0.6756951",
"0.67441386",
"0.67386234",
"0.67116493",
"0.66961503",
"0.6681226",
"0.66213435",
"0.6540441",
"0.6507407",
"0.6489792",
"0.6473728",
"0.63925356",
"0.63877565",
"0.63724947",
"0.6351833",
"0.63142306",
"0.6309316",
"0.62900144",
"0.6281137",
"0.62703973",
"0.62647605",
"0.62075955",
"0.6187965",
"0.61770767",
"0.6125492",
"0.61177087"
] | 0.7302781 | 0 |
Evaluate force and energy in python for Periodic. | def _evaluate_periodic(snapshot, params):
box = hoomd.Box(*snapshot.configuration.box)
positions = snapshot.particles.position
A = params['A']
i = params['i']
w = params['w']
p = params['p']
a1, a2, a3 = box.to_matrix().T
V = np.dot(a1, np.cross(a2, a3))
b1 = 2 * np.pi / V * np.cross(a2, a3)
b2 = 2 * np.pi / V * np.cross(a3, a1)
b3 = 2 * np.pi / V * np.cross(a1, a2)
b = {0: b1, 1: b2, 2: b3}.get(i)
energies = A * np.tanh(
1 / (2 * np.pi * p * w) * np.cos(p * np.dot(positions, b)))
forces = A / (2 * np.pi * w) * np.sin(p * np.dot(positions, b))
forces *= 1 - (np.tanh(
np.cos(p * np.dot(positions, b)) / (2 * np.pi * p * w)))**2
forces = np.outer(forces, b)
return forces, energies | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_ttw_energy(self) -> None:\n\n self.energy = self.ecm.motive_energy_per_km(\n driving_mass=self[\"driving mass\"],\n rr_coef=self[\"rolling resistance coefficient\"],\n drag_coef=self[\"aerodynamic drag coefficient\"],\n frontal_area=self[\"frontal area\"],\n electric_motor_power=self[\"electric power\"],\n engine_power=self[\"power\"],\n recuperation_efficiency=self[\"recuperation efficiency\"],\n aux_power=self[\"auxiliary power demand\"],\n battery_charge_eff=self[\"battery charge efficiency\"],\n battery_discharge_eff=self[\"battery discharge efficiency\"],\n fuel_cell_system_efficiency=self[\"fuel cell system efficiency\"],\n )\n\n self.energy = self.energy.assign_coords(\n {\n \"powertrain\": self.array.powertrain,\n \"year\": self.array.year,\n \"size\": self.array.coords[\"size\"],\n \"value\": self.array.coords[\"value\"],\n }\n )\n\n if self.energy_consumption:\n self.override_ttw_energy()\n\n distance = self.energy.sel(parameter=\"velocity\").sum(dim=\"second\") / 1000\n\n self[\"engine efficiency\"] = (\n np.ma.array(\n self.energy.loc[dict(parameter=\"engine efficiency\")],\n mask=self.energy.loc[dict(parameter=\"power load\")] == 0,\n )\n .mean(axis=0)\n .T\n )\n\n _o = lambda x: np.where((x == 0) | (x == np.nan), 1, x)\n\n if self.engine_efficiency is not None:\n print(\"Engine efficiency is being overridden.\")\n for key, val in self.engine_efficiency.items():\n pwt, size, year = key\n if (\n (val is not None)\n & (pwt in self.array.powertrain.values)\n & (year in self.array.year.values)\n & (size in self.array[\"size\"].values)\n ):\n self.array.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ] = float(val)\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ] = float(val) * np.where(\n self.energy.loc[\n dict(\n parameter=\"power load\",\n powertrain=pwt,\n size=size,\n year=year,\n )\n ]\n == 0,\n 0,\n 1,\n )\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy\",\n )\n ] = self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy at wheels\",\n )\n ] / (\n _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ]\n )\n * _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ]\n )\n )\n\n self[\"transmission efficiency\"] = (\n np.ma.array(\n self.energy.loc[dict(parameter=\"transmission efficiency\")],\n mask=self.energy.loc[dict(parameter=\"power load\")] == 0,\n )\n .mean(axis=0)\n .T\n )\n\n if self.transmission_efficiency is not None:\n print(\"Transmission efficiency is being overridden.\")\n for key, val in self.transmission_efficiency.items():\n pwt, size, year = key\n\n if (\n (val is not None)\n & (pwt in self.array.powertrain.values)\n & (year in self.array.year.values)\n & (size in self.array[\"size\"].values)\n ):\n self.array.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ] = float(val)\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ] = float(val) * np.where(\n self.energy.loc[\n dict(\n parameter=\"power load\",\n powertrain=pwt,\n size=size,\n year=year,\n )\n ]\n == 0,\n 0,\n 1,\n )\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy\",\n )\n ] = self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy at wheels\",\n )\n ] / (\n _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ]\n )\n * _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ]\n )\n )\n\n self[\"TtW energy\"] = (\n self.energy.sel(\n parameter=[\"motive energy\", \"auxiliary energy\", \"recuperated energy\"]\n ).sum(dim=[\"second\", \"parameter\"])\n / distance\n ).T\n\n self[\"TtW energy, combustion mode\"] = self[\"TtW energy\"] * (\n self[\"combustion power share\"] > 0\n )\n self[\"TtW energy, electric mode\"] = self[\"TtW energy\"] * (\n self[\"combustion power share\"] == 0\n )\n\n self[\"auxiliary energy\"] = (\n self.energy.sel(parameter=\"auxiliary energy\").sum(dim=\"second\") / distance\n ).T",
"def test_periodic():\n\n from global_variables import DATA_DIR\n\n # Initialise the lightcurve\n lcf = pd.read_pickle(\n '{}/trappist/k2gp200164267-c12-lcf-full.pickle'.format(DATA_DIR))\n lcf['x'] = lcf.x_pos\n lcf['y'] = lcf.y_pos\n lcf = lc_preparation.initialise_lcf(lcf, f_col='f_tpf_pos')\n #lcf = lc_preparation.clean_lcf(lcf)\n # lcf = lc_preparation.remove_outliers_initial(lcf, 0.03)\n\n # if flag_transits:\n # _, lcf, _ = lc_preparation.flag_transits(lcf)\n\n output = lc_preparation.find_periodicity_peak(\n lcf.t, lcf.f_temporal + lcf.f_detrended, plot=True)\n\n print(output)\n\n return detrend_lcf_quasiperiodic(\n lcf, period=3, proc_kw='ideal', verbose=True, plot_all=True,\n evolve=False, n_samples=1400)",
"def getEnergy(self):\n energy = 0.0\n\n for i in range(0, self.nPoints):\n energy += self.tDomain[i] ** 2\n\n energy /= self.nPoints\n return energy",
"def _evaluate_electric(snapshot, params):\n positions = snapshot.particles.position\n charges = snapshot.particles.charge\n E_field = params\n energies = -charges * np.dot(positions, E_field)\n forces = np.outer(charges, E_field)\n return forces, energies",
"def energy(self):\n e = 0\n\n restoration = RestorationModel(self.graph_damaged)\n restoration.run(self.state)\n restoration_graphs = restoration.get_restoration_graphs()\n restoration_times = restoration.get_restoration_times()\n restoration_costs = restoration.get_restoration_costs()\n\n damaged = []\n damaged.append(get_delta(self.no_damage, self.initial_damage))\n\n sim_results = Parallel(n_jobs=4)(delayed(parallel_model)(\n graph, self.od_graph, self.od_matrix) for graph in restoration_graphs[:-1])\n for values in sim_results:\n damaged.append(get_delta(self.no_damage, values))\n\n for idx, values in enumerate(damaged):\n dt = restoration_times[idx] if idx == 0 else restoration_times[idx] - \\\n restoration_times[idx-1]\n e += sum(restoration_costs[idx]) + dt * (self.day_factor * values[2] * np.sum(self.mu*self.xi) +\n values[3] * np.sum(self.mu * (self.nu * self.F_w + self.rho)) + values[4] * self.upsilon)\n with open(self.fdir+'energy.csv', 'a') as f:\n f.write('\\n'+str(e))\n\n return e",
"def ComputeEnergyConsumption(self):\r\n pass",
"def evaluate(self, time) -> float:\n ...",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))",
"def eval_energy(at, do_KE=True, do_PE=True):\n energy = eval_energy_PV(at) + eval_energy_mu(at)\n\n if do_PE:\n energy += eval_energy_PE(at) \n\n if do_KE:\n energy += eval_energy_KE(at)\n\n return energy",
"def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))",
"def compute_energy(self):\n\n # radiation energy\n Qsqrd = self.omega_coords[:,:,1]*self.omega_coords[:,:,1]\n Psqrd = self.omega_coords[:,:,0]*self.omega_coords[:,:,0]\n\n e_rad = (Psqrd/self.mode_mass + (self.mode_mass*self.omega**2)*Qsqrd)*.5\n\n # space charge energy\n Dsqrd = self.dc_coords[:,:,0]*self.dc_coords[:,:,0]\n\n e_drft = Dsqrd/(2.*self.mode_mass)\n\n energy = e_rad+e_drft\n\n return energy",
"def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)",
"def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy",
"def evolve_system(self,dt, energy_file = None):\n phi = self.compute_field()\n force_m = self.compute_forces_mesh()\n self.acc_new = np.zeros([len(self),2])\n #Computes the force felt by each particles and deduce the acceleration\n for i in range(len(self)):\n x,y = self.ptclgrid.ixy[i]\n x = int(x)\n y = int(y)\n self.acc_new[i][0] += (1/self.mass[i]*force_m[0][x,y])\n self.acc_new[i][1] += (1/self.mass[i]*force_m[1][x,y])\n #Evolve the position and momenta of the particle in the list\n self.particles.evolve(self.acc,self.acc_new,dt,self.size, boundary_periodic=self.boundary_periodic)\n #For non-periodic condition, deletes the particles that leave the grid from the list\n if self.boundary_periodic!=True: \n index = np.argwhere((self.particles.position>self.size-1))\n index2 = np.argwhere((self.particles.position<0))\n index = {a for a in np.append(index,index2)}\n index = list(index)\n self.particles.momentum = np.delete(self.particles.momentum,index,axis=0)\n self.acc = np.delete(self.acc,index,axis=0)\n self.acc_new = np.delete(self.acc_new,index,axis=0)\n self.mass = np.delete(self.mass,index,axis=0)\n self.particles.position = np.delete(self.particles.position,index,axis=0)\n self.acc = self.acc_new.copy()\n #Update the position of the particles on the grid\n self.ptclgrid.update_position(self.particles.position,self.mass)\n self.grid = self.ptclgrid.grid\n self.grid_pos = self.ptclgrid.grid_pos\n #Write the energy in a file if on is given\n if energy_file != None:\n energy_file.write(f'{self.energy()}\\n')\n energy_file.flush()\n return self.grid_pos",
"def eval_calc(self):\n\n # Define the GmfGetter\n\n #for args_tag in range(len(self.args)-1):\n # Looping over all source models (Note: the last attribute in self.args is a monitor - so skipping it)\n \n from openquake.calculators import getters\n from openquake.baselib import general\n from openquake.hazardlib import const, calc, gsim\n from openquake.commands import dbserver as cdbs\n if self.vtag >= 12:\n from openquake.hazardlib.const import StdDev\n if self.vtag >= 12:\n from openquake.commonlib import datastore\n else:\n from openquake.baselib import datastore\n\n cur_getter = getters.GmfGetter(self.args[0][0], calc.filters.SourceFilter(\n self.dstore['sitecol'], self.dstore['oqparam'].maximum_distance), \n self.calculator.param['oqparam'], self.calculator.param['amplifier'], \n self.calculator.param['sec_perils'])\n\n # Evaluate each computer\n print('FetchOpenQuake: Evaluting ground motion models.')\n for computer in cur_getter.gen_computers(self.mon):\n # Looping over rupture(s) in the current realization\n sids = computer.sids\n #print('eval_calc: site ID sids = ')\n #print(sids)\n eids_by_rlz = computer.ebrupture.get_eids_by_rlz(\n cur_getter.rlzs_by_gsim)\n mag = computer.ebrupture.rupture.mag\n im_list = []\n data = general.AccumDict(accum=[])\n cur_T = self.event_info['IntensityMeasure'].get('Periods', None)\n for cur_gs, rlzs in cur_getter.rlzs_by_gsim.items():\n # Looping over GMPE(s)\n #print('eval_calc: cur_gs = ')\n #print(cur_gs)\n num_events = sum(len(eids_by_rlz[rlz]) for rlz in rlzs)\n if num_events == 0: # it may happen\n continue\n # NB: the trick for performance is to keep the call to\n # .compute outside of the loop over the realizations;\n # it is better to have few calls producing big arrays\n tmpMean = []\n tmpstdtot = []\n tmpstdinter = []\n tmpstdintra = []\n if self.vtag >= 12:\n mean_stds_all = computer.cmaker.get_mean_stds([computer.ctx], StdDev.EVENT)[0]\n for imti, imt in enumerate(computer.imts): \n # Looping over IM(s)\n #print('eval_calc: imt = ', imt)\n if str(imt) in ['PGA', 'PGV', 'PGD']:\n cur_T = [0.0]\n im_list.append(str(imt))\n imTag = 'ln' + str(imt)\n else:\n if 'SA' not in im_list:\n im_list.append('SA')\n imTag = 'lnSA'\n if isinstance(cur_gs, gsim.multi.MultiGMPE):\n gs = cur_gs[str(imt)] # MultiGMPE\n else:\n gs = cur_gs # regular GMPE\n try:\n if self.vtag >= 12:\n mean_stds = mean_stds_all[:, imti]\n num_sids = len(computer.sids)\n num_stds = len(mean_stds)\n if num_stds == 1:\n # no standard deviation is available\n # for truncation_level = 0 there is only mean, no stds\n if computer.correlation_model:\n raise ValueError('truncation_level=0 requires '\n 'no correlation model')\n mean = mean_stds[0]\n stddev_intra = 0\n stddev_inter = 0\n stddev_total = 0\n if imti == 0:\n tmpMean = mean\n tmpstdinter = np.concatenate((tmpstdinter, stddev_inter), axis=1)\n tmpstdintra = np.concatenate((tmpstdintra, stddev_intra), axis=1)\n tmpstdtot = stddev_total\n else:\n tmpMean = np.concatenate((tmpMean, mean), axis=0)\n tmpstdinter = np.concatenate((tmpstdinter, stddev_inter), axis=1)\n tmpstdintra = np.concatenate((tmpstdintra, stddev_intra), axis=1)\n tmpstdtot = np.concatenate((tmpstdtot, stddev_total), axis=0)\n elif num_stds == 2:\n # If the GSIM provides only total standard deviation, we need\n # to compute mean and total standard deviation at the sites\n # of interest.\n # In this case, we also assume no correlation model is used.\n # By default, we evaluate stddev_inter as the stddev_total\n\n if self.correlation_model:\n raise CorrelationButNoInterIntraStdDevs(\n self.correlation_model, gsim)\n\n mean, stddev_total = mean_stds\n stddev_total = stddev_total.reshape(stddev_total.shape + (1, ))\n mean = mean.reshape(mean.shape + (1, ))\n stddev_inter = stddev_total\n stddev_intra = 0\n if imti == 0:\n tmpMean = mean\n tmpstdinter = np.concatenate((tmpstdinter, stddev_inter), axis=1)\n tmpstdintra = np.concatenate((tmpstdintra, stddev_intra), axis=1)\n tmpstdtot = stddev_total\n else:\n tmpMean = np.concatenate((tmpMean, mean), axis=0)\n tmpstdinter = np.concatenate((tmpstdinter, stddev_inter), axis=1)\n tmpstdintra = np.concatenate((tmpstdintra, stddev_intra), axis=1)\n tmpstdtot = np.concatenate((tmpstdtot, stddev_total), axis=0)\n else:\n mean, stddev_inter, stddev_intra = mean_stds\n stddev_intra = stddev_intra.reshape(stddev_intra.shape + (1, ))\n stddev_inter = stddev_inter.reshape(stddev_inter.shape + (1, ))\n mean = mean.reshape(mean.shape + (1, ))\n if imti == 0:\n tmpMean = mean\n tmpstdinter = stddev_inter\n tmpstdintra = stddev_intra\n tmpstdtot = np.sqrt(stddev_inter * stddev_inter + stddev_intra * stddev_intra)\n else:\n tmpMean = np.concatenate((tmpMean, mean), axis=1)\n tmpstdinter = np.concatenate((tmpstdinter, stddev_inter), axis=1)\n tmpstdintra = np.concatenate((tmpstdintra, stddev_intra), axis=1)\n tmpstdtot = np.concatenate((tmpstdtot,np.sqrt(stddev_inter * stddev_inter + stddev_intra * stddev_intra)), axis=1)\n\n elif self.vtag == 11:\n # v11\n dctx = computer.dctx.roundup(\n cur_gs.minimum_distance)\n if computer.distribution is None:\n if computer.correlation_model:\n raise ValueError('truncation_level=0 requires '\n 'no correlation model')\n mean, _stddevs = cur_gs.get_mean_and_stddevs(\n computer.sctx, computer.rctx, dctx, imt, stddev_types=[])\n num_sids = len(computer.sids)\n if cur_gs.DEFINED_FOR_STANDARD_DEVIATION_TYPES == {const.StdDev.TOTAL}:\n # If the GSIM provides only total standard deviation, we need\n # to compute mean and total standard deviation at the sites\n # of interest.\n # In this case, we also assume no correlation model is used.\n if computer.correlation_model:\n raise CorrelationButNoInterIntraStdDevs(\n computer.correlation_model, cur_gs)\n\n mean, [stddev_total] = cur_gs.get_mean_and_stddevs(\n computer.sctx, computer.rctx, dctx, imt, [const.StdDev.TOTAL])\n stddev_total = stddev_total.reshape(\n stddev_total.shape + (1, ))\n mean = mean.reshape(mean.shape + (1, ))\n if imti == 0:\n tmpMean = mean\n tmpstdtot = stddev_total\n else:\n tmpMean = np.concatenate((tmpMean, mean), axis=0)\n tmpstdtot = np.concatenate((tmpstdtot, stddev_total), axis=0)\n else:\n mean, [stddev_inter, stddev_intra] = cur_gs.get_mean_and_stddevs(\n computer.sctx, computer.rctx, dctx, imt,\n [const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT])\n stddev_intra = stddev_intra.reshape(\n stddev_intra.shape + (1, ))\n stddev_inter = stddev_inter.reshape(\n stddev_inter.shape + (1, ))\n mean = mean.reshape(mean.shape + (1, ))\n\n if imti == 0:\n tmpMean = mean\n tmpstdinter = stddev_inter\n tmpstdintra = stddev_intra\n tmpstdtot = np.sqrt(stddev_inter * stddev_inter + stddev_intra * stddev_intra)\n else:\n tmpMean = np.concatenate((tmpMean, mean), axis=1)\n tmpstdinter = np.concatenate((tmpstdinter, stddev_inter), axis=1)\n tmpstdintra = np.concatenate((tmpstdintra, stddev_intra), axis=1)\n tmpstdtot = np.concatenate((tmpstdtot,np.sqrt(stddev_inter * stddev_inter + stddev_intra * stddev_intra)), axis=1)\n\n else:\n # v10\n dctx = computer.dctx.roundup(\n cur_gs.minimum_distance)\n if computer.truncation_level == 0:\n if computer.correlation_model:\n raise ValueError('truncation_level=0 requires '\n 'no correlation model')\n mean, _stddevs = cur_gs.get_mean_and_stddevs(\n computer.sctx, computer.rctx, dctx, imt, stddev_types=[])\n num_sids = len(computer.sids)\n if cur_gs.DEFINED_FOR_STANDARD_DEVIATION_TYPES == {const.StdDev.TOTAL}:\n # If the GSIM provides only total standard deviation, we need\n # to compute mean and total standard deviation at the sites\n # of interest.\n # In this case, we also assume no correlation model is used.\n if computer.correlation_model:\n raise CorrelationButNoInterIntraStdDevs(\n computer.correlation_model, cur_gs)\n\n mean, [stddev_total] = cur_gs.get_mean_and_stddevs(\n computer.sctx, computer.rctx, dctx, imt, [const.StdDev.TOTAL])\n stddev_total = stddev_total.reshape(\n stddev_total.shape + (1, ))\n mean = mean.reshape(mean.shape + (1, ))\n if imti == 0:\n tmpMean = mean\n tmpstdtot = stddev_total\n else:\n tmpMean = np.concatenate((tmpMean, mean), axis=0)\n tmpstdtot = np.concatenate((tmpstdtot, stddev_total), axis=0)\n else:\n mean, [stddev_inter, stddev_intra] = cur_gs.get_mean_and_stddevs(\n computer.sctx, computer.rctx, dctx, imt,\n [const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT])\n stddev_intra = stddev_intra.reshape(\n stddev_intra.shape + (1, ))\n stddev_inter = stddev_inter.reshape(\n stddev_inter.shape + (1, ))\n mean = mean.reshape(mean.shape + (1, ))\n\n if imti == 0:\n tmpMean = mean\n tmpstdinter = stddev_inter\n tmpstdintra = stddev_intra\n tmpstdtot = np.sqrt(stddev_inter * stddev_inter + stddev_intra * stddev_intra)\n else:\n tmpMean = np.concatenate((tmpMean, mean), axis=1)\n tmpstdinter = np.concatenate((tmpstdinter, stddev_inter), axis=1)\n tmpstdintra = np.concatenate((tmpstdintra, stddev_intra), axis=1)\n tmpstdtot = np.concatenate((tmpstdtot,np.sqrt(stddev_inter * stddev_inter + stddev_intra * stddev_intra)), axis=1)\n\n except Exception as exc:\n raise RuntimeError(\n '(%s, %s, source_id=%r) %s: %s' %\n (gs, imt, computer.source_id.decode('utf8'),\n exc.__class__.__name__, exc)\n ).with_traceback(exc.__traceback__)\n\n # initialize\n # NOTE: needs to be extended for gmpe logic tree\n gm_collector = []\n # collect data\n for k in range(tmpMean.shape[0]):\n imResult = {}\n if len(tmpMean):\n imResult.update({'Mean': [float(x) for x in tmpMean[k].tolist()]})\n if len(tmpstdtot):\n imResult.update({'TotalStdDev': [float(x) for x in tmpstdtot[k].tolist()]})\n if len(tmpstdinter):\n imResult.update({'InterEvStdDev': [float(x) for x in tmpstdinter[k].tolist()]})\n if len(tmpstdintra):\n imResult.update({'IntraEvStdDev': [float(x) for x in tmpstdintra[k].tolist()]})\n gm_collector.append({imTag: imResult})\n #print(gm_collector)\n \n # close datastore instance\n self.calculator.datastore.close()\n \n # stop dbserver\n if self.vtag >= 11:\n cdbs.main('stop')\n else:\n cdbs.dbserver('stop')\n \n # terminate the subprocess\n if self.prc:\n self.prc.kill()\n\n # copy calc hdf file\n if self.vtag >= 11:\n calc_id = datastore.get_last_calc_id()\n path = os.path.join(datastore.get_datadir(), 'calc_%d.hdf5' % calc_id)\n else:\n path = os.path.join(datastore.get_datadir(), 'calc_%d.hdf5' % self.calc_id)\n\n if self.dir_info:\n dir_output = self.dir_info['Output']\n try:\n shutil.copy2(path, dir_output)\n print('FetchOpenQuake: calc hdf file saved.')\n except:\n print('FetchOpenQuake: failed to copy calc hdf file.')\n\n # Final results\n res = {'Magnitude': mag,\n 'Periods': cur_T,\n 'IM': im_list,\n 'GroundMotions': gm_collector}\n \n # return\n return res",
"def _compute_force(mass, evo_config):\n\n max_force = evo_config['individuals']['max_force']\n min_force = evo_config['individuals']['min_force']\n max_force = max_force - min_force\n return 1 / (1 / max_force + np.exp(-mass * 3)) + min_force",
"def energy_calculation(theta_0, omega_0, dt):\n samples = int(T/dt) # Finds samplerate for chosen dt\n \n # Creat array of values using Euler-Cromer approx\n thetaArr, omegaArr, timeArr = euler_cromer_approx(theta_0,omega_0,dt,T_i)\n \n # Function for total energy\n energy_func = lambda m,l,omega,theta: (1/2)*m*(l**2)*(omega**2) + (1/2)*m*g*l*(theta**2)\n \n # Time array in same dimension \n t = np.linspace(T_i,T,samples)\n energy = np.zeros(samples)\n \n for i in range(len(t)):\n \"\"\"\n Calculation of total energy for every time-element\n \"\"\"\n energy[i] = energy_func(m,l,omegaArr[i],thetaArr[i])\n \n \n E_total = energy\n\n return t, E_total",
"def calc_variables ( ):\n\n # In this example we simulate using the shifted-force potential only\n # The values of < p_sf >, < e_sf > and density should be consistent (for this potential)\n # There are no long-range or delta corrections\n\n from averages_module import VariableType\n \n # Preliminary calculations\n vol = box**3 # Volume\n rho = n / vol # Density\n\n # Variables of interest, of class VariableType, containing three attributes:\n # .val: the instantaneous value\n # .nam: used for headings\n # .method: indicating averaging method\n # If not set below, .method adopts its default value of avg\n # The .nam and some other attributes need only be defined once, at the start of the program,\n # but for clarity and readability we assign all the values together below\n\n # Move acceptance ratio\n m_r = VariableType ( nam = 'Move ratio', val = m_ratio, instant = False )\n\n # Internal energy per molecule (shifted-force potential)\n # Ideal gas contribution (assuming nonlinear molecules) plus total PE divided by N\n e_sf = VariableType ( nam = 'E/N shifted force', val = 3.0*temperature + total.pot/n )\n\n # Pressure (shifted-force potential)\n # Ideal gas contribution plus total virial divided by V\n p_sf = VariableType ( nam = 'P shifted force', val = rho*temperature + total.vir/vol )\n\n # Collect together into a list for averaging\n return [ m_r, e_sf, p_sf ]",
"def energies():\n # Hardcoded initial values\n numsteps = 10000\n time_max = 1\n # Running the calculation in the solver class using the velocity verlet method\n # for better accuracy.\n verlet = solver(input_matrix, 'verlet', time_max, numsteps)\n output_matrix, KE, PE, AM = verlet.main()\n # Creating a simple time axis for plotting\n x = np.linspace(0, 1, numsteps+1)\n\n # Plotting kinetic energy over time\n plt.figure(1, figsize=(10, 10))\n plt.plot(x, KE)\n plt.suptitle('Total kinetic energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE'])\n\n # Plotting potential energy over time\n plt.figure(2, figsize=(10, 10))\n plt.plot(x, PE)\n plt.suptitle('Total potential energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE'])\n\n # Plotting total energy against time\n plt.figure(3, figsize=(10, 10))\n plt.plot(x, PE+KE)\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE+PE'])\n\n # Plotting angular momentum against time. print the amplitude to terminal\n amplitude = max(AM)-min(AM)\n print('Amplitude of angular momentum during 1 year: %g[AU²/yr²]' %(amplitude))\n plt.figure(4, figsize=(10, 10))\n plt.plot(x, AM)\n plt.suptitle('Total angular momentum in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²/yr²]', fontsize=16)\n plt.legend(['AM'])\n\n # Plotting the kinetic, potential and total energy against time to see\n # how great the variations are\n plt.figure(5, figsize=(10, 10))\n plt.plot(x, PE, x, KE, x, KE+PE)\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE', 'KE', 'KE+PE'])\n plt.show()",
"def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)",
"def calcEVals(self):\n self.eVals,self.eVecs = np.linalg.eigh(self.rhoOp)",
"def configuration_energies(self, minimize=False, max_confs=None):\n # Determine the name of the file\n prefix = 'xtal' if self.args.FNs['score']=='default' else \\\n os.path.basename(self.args.FNs['score']).split('.')[0]\n if minimize:\n prefix = 'min_' + prefix\n energyFN = os.path.join(self.args.dir['CD'], prefix + '.pkl.gz')\n\n # Set the force field to fully interacting\n params_full = self.system.paramsFromAlpha(1.0, 'CD')\n self.system.setParams(params_full)\n\n # Load the configurations\n if os.path.isfile(energyFN):\n (confs, Es) = load_pkl_gz(energyFN)\n else:\n (confs, Es) = self._get_confs_to_rescore(site=False, \\\n minimize=minimize, sort=False)\n\n self.log.set_lock('CD')\n self.log.tee(\"\\n>>> Calculating energies for %d configurations, \"%len(confs) + \\\n \"starting at \" + \\\n time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime()) + \"\\n\")\n self.log.recordStart('configuration_energies')\n\n updated = False\n # Calculate MM and OBC energies\n if not 'MM' in Es.keys():\n Es = self.system.energyTerms(confs, Es)\n solvation_o = self.args.params['CD']['solvation']\n self.args.params['CD']['solvation'] = 'Full'\n if self.system.isForce('OBC'):\n del self._forceFields['OBC']\n self.system.clear_evaluators()\n self.system.setParams(params_full)\n Es = self.system.energyTerms(confs, Es)\n self.args.params['CD']['solvation'] = solvation_o\n updated = True\n\n # Direct electrostatic energy\n FN = os.path.join(os.path.dirname(self.args.FNs['grids']['ELE']),\n 'direct_ele.nc')\n if not 'direct_ELE' in Es.keys() and os.path.isfile(FN):\n key = 'direct_ELE'\n Es[key] = np.zeros(len(confs))\n from AlGDock.ForceFields.Grid.Interpolation import InterpolationForceField\n FF = InterpolationForceField(FN, \\\n scaling_property='scaling_factor_electrostatic')\n self.top.universe.setForceField(FF)\n for c in range(len(confs)):\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, confs[c]))\n Es[key][c] = self.top.universe.energy()\n updated = True\n\n # Calculate symmetry-corrected RMSD\n if not 'rmsd' in Es.keys() and (self.args.params['CD']['rmsd'] is\n not False):\n Es['rmsd'] = self.get_rmsds(confs)\n updated = True\n\n if updated:\n self.log.tee(\"\\nElapsed time for ligand MM, OBC, and grid energies: \" + \\\n HMStime(self.log.timeSince('configuration_energies')), \\\n process='CD')\n self.log.clear_lock('CD')\n\n # Reduce the number of conformations\n if max_confs is not None:\n confs = confs[:max_confs]\n\n # Implicit solvent energies\n self.data['CD'].confs['starting_poses'] = None\n from AlGDock.postprocessing import Postprocessing\n pp_complete = Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run([('original', 0, 0, 'R')])\n\n for phase in self.args.params['CD']['phases']:\n if not 'R' + phase in Es.keys():\n Es['R' + phase] = self.args.params['CD']['receptor_' + phase]\n\n toClear = []\n for phase in self.args.params['CD']['phases']:\n for moiety in ['L', 'RL']:\n if not moiety + phase in Es.keys():\n outputname = os.path.join(self.args.dir['CD'],\n '%s.%s%s' % (prefix, moiety, phase))\n if phase.startswith('NAMD'):\n traj_FN = os.path.join(self.args.dir['CD'],\n '%s.%s.dcd' % (prefix, moiety))\n self._write_traj(traj_FN, confs, moiety)\n elif phase.startswith('sander'):\n traj_FN = os.path.join(self.args.dir['CD'],\n '%s.%s.mdcrd' % (prefix, moiety))\n self._write_traj(traj_FN, confs, moiety)\n elif phase.startswith('gbnsr6'):\n traj_FN = os.path.join(self.args.dir['CD'], \\\n '%s.%s%s'%(prefix,moiety,phase),'in.crd')\n elif phase.startswith('OpenMM'):\n traj_FN = None\n elif phase in ['APBS_PBSA']:\n traj_FN = os.path.join(self.args.dir['CD'],\n '%s.%s.pqr' % (prefix, moiety))\n else:\n raise Exception('Unknown phase!')\n if not traj_FN in toClear:\n toClear.append(traj_FN)\n for program in ['NAMD', 'sander', 'gbnsr6', 'OpenMM', 'APBS']:\n if phase.startswith(program):\n # TODO: Mechanism to do partial calculation\n Es[moiety+phase] = getattr(self,'_%s_Energy'%program)(confs, \\\n moiety, phase, traj_FN, outputname, debug=DEBUG)\n updated = True\n # Get any data added since the calculation started\n if os.path.isfile(energyFN):\n (confs_o, Es_o) = load_pkl_gz(energyFN)\n for key in Es_o.keys():\n if key not in Es.keys():\n Es[key] = Es_o[key]\n # Store the data\n self.log.tee(write_pkl_gz(energyFN, (confs, Es)))\n break\n for FN in toClear:\n if (FN is not None) and os.path.isfile(FN):\n os.remove(FN)\n\n for key in Es.keys():\n Es[key] = np.array(Es[key])\n self._combine_MM_and_solvent(Es)\n\n if updated:\n self.log.set_lock('CD')\n self.log.tee(\"\\nElapsed time for energies: \" + \\\n HMStime(self.log.timeSince('configuration_energies')), \\\n process='CD')\n self.log.clear_lock('CD')\n\n # Get any data added since the calculation started\n if os.path.isfile(energyFN):\n (confs_o, Es_o) = load_pkl_gz(energyFN)\n for key in Es_o.keys():\n if key not in Es.keys():\n Es[key] = Es_o[key]\n\n # Store the data\n self.log.tee(write_pkl_gz(energyFN, (confs, Es)))\n return (confs, Es)",
"def _exe_(self):\n print(\"\\n Start simulation (using Pharlap) ...\")\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n self._copy_ne_()\n [self._compute_(case) for case in [\"bgc\", \"flare\"]]\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"bgc\")\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"flare\")\n self._compute_doppler_()\n rec = self._compute_velocity_()\n return rec",
"def test_energy_cost(self):\n rs = self.rate.get_rate_schedule(self.eir.api)\n\n i = pd.date_range(start = '2019-05-01', end='2019-06-30', freq='5min')\n s = pd.Series(data=0, index = i, dtype = np.float32)\n\n total = 10.0 * .1338\n total += 10.0 * .0969\n total += 10.0 * .1611\n total += 20.3 * 2\n s[pd.Timestamp('2019-05-01T18:00:00')] = 10.0\n s[pd.Timestamp('2019-05-01T06:00:00')] = 10.0\n s[pd.Timestamp('2019-06-05T15:00:00')] = 10.0\n\n df = rs.get_costs(s)\n\n print(df.head())",
"def force(self, osc):\n pass",
"def Aperiodic(self):\n return self._with_axiom('Aperiodic')",
"def __call__(self, x):\n\n self.dbeads.q = x\n e = self.dforces.pot # Energy\n g = -self.dforces.f # Gradient\n\n return e, g",
"def ha(env, cstate=0):\n T1 = 10\n T2 = 10\n thM = 20\n thm = 5\n vr = 10.5\n v1 = -1.3\n v2 = -2.7\n assert(T1 == T2)\n\n delta = None # None to cause failure\n # The continous variables used in this ha\n x = T1 # clock1 variable\n y = T2 # clock2 variable\n th = 11.5 # The reactor temperature\n\n # You need vtol here, because of floating point error.\n loc0_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc0_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc0_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(vr),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc0_FT = False\n\n loc1_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc1_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc1_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(v1),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc1_FT = False\n\n loc2_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc2_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc2_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(v2),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc2_FT = False\n\n # Location 3 is reactor shutdown\n loc3_FT = False\n\n # Location 0\n def location0(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thM and x >= T1:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 1, 0, x, y, th, None, True, None, None, curr_time\n elif th == thM and y >= T2:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 2, 0, x, y, th, None, None, True, None, curr_time\n elif th == thM and x < T1 and y < T2:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 3, 0, x, y, th, None, None, None, True, curr_time\n # The invariant\n elif th <= thM:\n if not loc0_FT:\n x = loc0_ode_x.compute(vals, curr_time-prev_time)\n y = loc0_ode_y.compute(vals, curr_time-prev_time)\n th = loc0_ode_th.compute(vals, curr_time-prev_time)\n loc0_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thM) > loc0_ode_th.vtol:\n deltath = loc0_ode_th.delta(vals, quanta=(thM-th))\n else:\n th = thM\n deltath = 0\n return 0, deltath, x, y, th, False, None, None, None, curr_time\n else:\n # print('th:', th)\n raise RuntimeError('Reached unreachable branch'\n ' in location 0')\n\n def location1(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thm:\n x = 0 # Reset\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 0, 0, x, y, th, True, None, None, None, curr_time\n # The invariant\n elif th >= thm:\n if not loc1_FT:\n x = loc1_ode_x.compute(vals, curr_time-prev_time)\n y = loc1_ode_y.compute(vals, curr_time-prev_time)\n th = loc1_ode_th.compute(vals, curr_time-prev_time)\n loc1_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thm) > loc1_ode_th.vtol:\n deltath = loc1_ode_th.delta(vals, quanta=(thm-th))\n else:\n th = thm\n deltath = 0\n return 1, deltath, x, y, th, False, None, None, None, curr_time\n else:\n raise RuntimeError('Reached unreachable branch'\n ' in location 1')\n\n def location2(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thm:\n y = 0 # Reset\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 0, 0, x, y, th, True, None, None, None, curr_time\n # The invariant\n elif th >= thm:\n if not loc2_FT:\n x = loc2_ode_x.compute(vals, curr_time-prev_time)\n y = loc2_ode_y.compute(vals, curr_time-prev_time)\n th = loc2_ode_th.compute(vals, curr_time-prev_time)\n loc2_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thm) > loc2_ode_th.vtol:\n deltath = loc2_ode_th.delta(vals, quanta=(thm-th))\n else:\n th = thm\n deltath = 0\n return 2, deltath, x, y, th, False, None, None, None, curr_time\n else:\n raise RuntimeError('Reached unreachable branch'\n ' in location 2')\n\n def location3(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n global step\n # print('total steps: ', step)\n # Done\n print(time.time()-start)\n sys.exit(1)\n\n # The dictionary for the switch statement.\n switch_case = {\n 0: location0,\n 1: location1,\n 2: location2,\n 3: location3\n }\n\n prev_time = env.now\n while(True):\n (cstate, delta, x, y, th,\n loc0_FT, loc1_FT, loc2_FT, loc3_FT,\n prev_time) = switch_case[cstate](x, y, th,\n loc0_FT,\n loc1_FT,\n loc2_FT,\n loc3_FT,\n prev_time)\n # This should always be the final statement in this function\n global step\n step += 1\n yield env.timeout(delta)",
"def get_results(self):\n mae_thetas = []\n mae_phis = []\n num_ang = 0\n q_vectors = []\n\n try:\n calculation = self.ctx.f_t\n if not calculation.is_finished_ok:\n message = f'ERROR: Force theorem Fleur calculation failed somehow it has exit status {calculation.exit_status}'\n self.control_end_wc(message)\n return self.exit_codes.ERROR_FORCE_THEOREM_FAILED\n except AttributeError:\n message = 'ERROR: Something went wrong I do not have a force theorem Fleur calculation'\n self.control_end_wc(message)\n return self.exit_codes.ERROR_FORCE_THEOREM_FAILED\n\n try:\n out_dict = calculation.outputs.output_parameters.dict\n h_so = out_dict.dmi_force_so_h_so\n mae_thetas = out_dict.dmi_force_theta\n mae_phis = out_dict.dmi_force_phi\n num_ang = out_dict.dmi_force_angles\n q_vectors = [self.ctx.wf_dict['q_vectors'][x - 1] for x in out_dict.dmi_force_so_q]\n e_u = out_dict.dmi_force_units\n except AttributeError:\n message = ('Did not manage to read evSum or energy units after FT calculation.')\n self.control_end_wc(message)\n return self.exit_codes.ERROR_FORCE_THEOREM_FAILED\n\n if not isinstance(h_so, list):\n message = ('Did not manage to read evSum or energy units after FT calculation.')\n self.control_end_wc(message)\n return self.exit_codes.ERROR_FORCE_THEOREM_FAILED\n\n if e_u in ['htr', 'Htr']:\n h_so = np.array(h_so) * HTR_TO_EV\n h_so = h_so.tolist()\n\n self.ctx.h_so = h_so\n self.ctx.q_vectors = q_vectors\n self.ctx.mae_thetas = mae_thetas\n self.ctx.mae_phis = mae_phis\n self.ctx.num_ang = num_ang"
] | [
"0.6137717",
"0.6044311",
"0.59760916",
"0.5962495",
"0.5909589",
"0.582551",
"0.57956237",
"0.5791442",
"0.57832485",
"0.5759966",
"0.5754337",
"0.5751843",
"0.57382846",
"0.57323146",
"0.57047796",
"0.56934625",
"0.56874776",
"0.5665626",
"0.5659576",
"0.56581616",
"0.561197",
"0.5606913",
"0.55802715",
"0.55553085",
"0.55470574",
"0.55450064",
"0.5532125",
"0.55228734",
"0.55217004",
"0.5494804"
] | 0.6097572 | 1 |
Evaluate force and energy in python for ElectricField. | def _evaluate_electric(snapshot, params):
positions = snapshot.particles.position
charges = snapshot.particles.charge
E_field = params
energies = -charges * np.dot(positions, E_field)
forces = np.outer(charges, E_field)
return forces, energies | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_electric_field(self):\n self.set_grid()\n rho = self.grid.distribute(self.bunch.positions)\n rho *= self.bunch.line_charge_density * 4 # unknown origin\n phi = self.solver.get_potential(rho, self.bunch.line_charge_density)\n Ex, Ey = self.grid.gradient(-phi)\n self.fields[:, 0] = self.grid.interpolate(Ex, self.bunch.positions)\n self.fields[:, 1] = self.grid.interpolate(Ey, self.bunch.positions)",
"def compute_energy(self):\n\n # radiation energy\n Qsqrd = self.omega_coords[:,:,1]*self.omega_coords[:,:,1]\n Psqrd = self.omega_coords[:,:,0]*self.omega_coords[:,:,0]\n\n e_rad = (Psqrd/self.mode_mass + (self.mode_mass*self.omega**2)*Qsqrd)*.5\n\n # space charge energy\n Dsqrd = self.dc_coords[:,:,0]*self.dc_coords[:,:,0]\n\n e_drft = Dsqrd/(2.*self.mode_mass)\n\n energy = e_rad+e_drft\n\n return energy",
"def evaluate_Q_e(self, q):\n # positions of spring endpoints in GCS\n # distance vector\n r_ij_P = self.evaluate_rijP(q)\n\n # length of vector - spring length\n self.l = np.linalg.norm(r_ij_P, ord=2)\n\n I_r_ij = self._evaluate_I_r_ij(r_ij_P, self.l)\n\n # velocity of deformation of spring length\n dq_ = self.body_i.evaluate_dr(q, element_id=self.element_id, ksi=self.element_ksi)\n dl = np.dot(I_r_ij, dq_)\n\n # force value (amplitude) of spring element\n self.F_s = self._evaluate_F(self.l, self.dl)\n if self.direction == \"compression\":\n if self.F_s > 0.:\n self.F_s = 0.\n\n if self.direction == \"tension\":\n if self.F_s < 0.:\n self.F_s = 0.\n\n F = -self.F_s * I_r_ij\n\n # force on flexible body\n S = self._element._evaluate_S(self.element_ksi)\n Q_e_i_element = np.dot(S.T, F)\n\n Q_e_i = reduce(np.dot, [self._element.B.T, self._element.T.T, Q_e_i_element])\n\n # force on ground body\n Q_e_j = None\n\n return Q_e_i, Q_e_j",
"def calculate_ttw_energy(self) -> None:\n\n self.energy = self.ecm.motive_energy_per_km(\n driving_mass=self[\"driving mass\"],\n rr_coef=self[\"rolling resistance coefficient\"],\n drag_coef=self[\"aerodynamic drag coefficient\"],\n frontal_area=self[\"frontal area\"],\n electric_motor_power=self[\"electric power\"],\n engine_power=self[\"power\"],\n recuperation_efficiency=self[\"recuperation efficiency\"],\n aux_power=self[\"auxiliary power demand\"],\n battery_charge_eff=self[\"battery charge efficiency\"],\n battery_discharge_eff=self[\"battery discharge efficiency\"],\n fuel_cell_system_efficiency=self[\"fuel cell system efficiency\"],\n )\n\n self.energy = self.energy.assign_coords(\n {\n \"powertrain\": self.array.powertrain,\n \"year\": self.array.year,\n \"size\": self.array.coords[\"size\"],\n \"value\": self.array.coords[\"value\"],\n }\n )\n\n if self.energy_consumption:\n self.override_ttw_energy()\n\n distance = self.energy.sel(parameter=\"velocity\").sum(dim=\"second\") / 1000\n\n self[\"engine efficiency\"] = (\n np.ma.array(\n self.energy.loc[dict(parameter=\"engine efficiency\")],\n mask=self.energy.loc[dict(parameter=\"power load\")] == 0,\n )\n .mean(axis=0)\n .T\n )\n\n _o = lambda x: np.where((x == 0) | (x == np.nan), 1, x)\n\n if self.engine_efficiency is not None:\n print(\"Engine efficiency is being overridden.\")\n for key, val in self.engine_efficiency.items():\n pwt, size, year = key\n if (\n (val is not None)\n & (pwt in self.array.powertrain.values)\n & (year in self.array.year.values)\n & (size in self.array[\"size\"].values)\n ):\n self.array.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ] = float(val)\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ] = float(val) * np.where(\n self.energy.loc[\n dict(\n parameter=\"power load\",\n powertrain=pwt,\n size=size,\n year=year,\n )\n ]\n == 0,\n 0,\n 1,\n )\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy\",\n )\n ] = self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy at wheels\",\n )\n ] / (\n _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ]\n )\n * _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ]\n )\n )\n\n self[\"transmission efficiency\"] = (\n np.ma.array(\n self.energy.loc[dict(parameter=\"transmission efficiency\")],\n mask=self.energy.loc[dict(parameter=\"power load\")] == 0,\n )\n .mean(axis=0)\n .T\n )\n\n if self.transmission_efficiency is not None:\n print(\"Transmission efficiency is being overridden.\")\n for key, val in self.transmission_efficiency.items():\n pwt, size, year = key\n\n if (\n (val is not None)\n & (pwt in self.array.powertrain.values)\n & (year in self.array.year.values)\n & (size in self.array[\"size\"].values)\n ):\n self.array.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ] = float(val)\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ] = float(val) * np.where(\n self.energy.loc[\n dict(\n parameter=\"power load\",\n powertrain=pwt,\n size=size,\n year=year,\n )\n ]\n == 0,\n 0,\n 1,\n )\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy\",\n )\n ] = self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy at wheels\",\n )\n ] / (\n _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ]\n )\n * _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ]\n )\n )\n\n self[\"TtW energy\"] = (\n self.energy.sel(\n parameter=[\"motive energy\", \"auxiliary energy\", \"recuperated energy\"]\n ).sum(dim=[\"second\", \"parameter\"])\n / distance\n ).T\n\n self[\"TtW energy, combustion mode\"] = self[\"TtW energy\"] * (\n self[\"combustion power share\"] > 0\n )\n self[\"TtW energy, electric mode\"] = self[\"TtW energy\"] * (\n self[\"combustion power share\"] == 0\n )\n\n self[\"auxiliary energy\"] = (\n self.energy.sel(parameter=\"auxiliary energy\").sum(dim=\"second\") / distance\n ).T",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))",
"def calcEVals(self):\n self.eVals,self.eVecs = np.linalg.eigh(self.rhoOp)",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))",
"def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))",
"def __call__(self, x):\n\n self.dbeads.q = x\n e = self.dforces.pot # Energy\n g = -self.dforces.f # Gradient\n\n return e, g",
"def make_energy(self):\n def energy_func(m):\n heff = self.field(m)\n return -energy.zeeman(m, self.Ms, heff) \\\n + energy.shape_anisotropy(m, self.Ms,\n self.Nd[0], self.Nd[1], self.Nd[2])\n self.energy = energy_func",
"def computeEnergy(self):\n\t\tGmo = self.Gmo\n\t\te = self.e\n\t\tself.Ec = 0.0\n\n\t\tfor i in range( self.nocc ):\n\t\t\tfor j in range( self.nocc ):\n\t\t\t\tfor a in range( self.nocc,self.norb ):\n\t\t\t\t\tfor b in range( self.nocc,self.norb ):\n\t\t\t\t\t\tself.Ec += 0.25*(Gmo[i,j,a,b]*Gmo[a,b,i,j])/(e[i]+e[j]-e[a]-e[b])\n\n\t\treturn self.E0 + self.Ec",
"def energy(e: float) -> float:\n\n return (1/np.sqrt(2))*(gamma(-e/2+1/2)/(gamma(-e/2+3/4)))",
"def energy(self):\n e = 0\n\n restoration = RestorationModel(self.graph_damaged)\n restoration.run(self.state)\n restoration_graphs = restoration.get_restoration_graphs()\n restoration_times = restoration.get_restoration_times()\n restoration_costs = restoration.get_restoration_costs()\n\n damaged = []\n damaged.append(get_delta(self.no_damage, self.initial_damage))\n\n sim_results = Parallel(n_jobs=4)(delayed(parallel_model)(\n graph, self.od_graph, self.od_matrix) for graph in restoration_graphs[:-1])\n for values in sim_results:\n damaged.append(get_delta(self.no_damage, values))\n\n for idx, values in enumerate(damaged):\n dt = restoration_times[idx] if idx == 0 else restoration_times[idx] - \\\n restoration_times[idx-1]\n e += sum(restoration_costs[idx]) + dt * (self.day_factor * values[2] * np.sum(self.mu*self.xi) +\n values[3] * np.sum(self.mu * (self.nu * self.F_w + self.rho)) + values[4] * self.upsilon)\n with open(self.fdir+'energy.csv', 'a') as f:\n f.write('\\n'+str(e))\n\n return e",
"def _calc_energy( self, V_a, eos_d ):\n pass",
"def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)",
"def F(self):\n return self.generic_getter(get_F_potential, \"F\", \"convert_energy\")",
"def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)",
"def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy",
"def _calculate_fuel(self):\n self._fuel = self._calculate_fuel_r(self._mass)",
"def new_param_energy_vac(coords, params, T=293.15):\n\n #-------------------\n # CONSTANTS\n #-------------------\n kB = 0.0083145 #Boltzmann constant (Gas constant) in kJ/(mol*K)\n beta = 1/(kB*T)\n\n #-------------------\n # PARAMETERS\n #-------------------\n params = params\n \n # Determine number of states we wish to estimate potential energies for\n mols = []\n for i in params:\n mols.append(i)\n mol = '../monomers/'+mols[0]+'.mol2'\n K = len(params[mols[0]].keys())\n\n\n #-------------\n # SYSTEM SETUP\n #-------------\n verbose = False # suppress echos from OEtoolkit functions\n ifs = oechem.oemolistream(mol)\n mol = oechem.OEMol()\n # This uses parm@frosst atom types, so make sure to use the forcefield-flavor reader\n flavor = oechem.OEIFlavor_Generic_Default | oechem.OEIFlavor_MOL2_Default | oechem.OEIFlavor_MOL2_Forcefield\n ifs.SetFlavor( oechem.OEFormat_MOL2, flavor)\n oechem.OEReadMolecule(ifs, mol )\n # Perceive tripos types\n oechem.OETriposAtomNames(mol)\n\n # Load forcefield file\n #ffxml = 'smirnoff99Frosst_with_AllConstraints.ffxml'#\n #print('The forcefield being used is smirnoff99Frosst_with_AllConstraints.ffxml')\n ffxml = get_data_filename('forcefield/smirnoff99Frosst.ffxml')\n print('The forcefield being used is smirnoff99Frosst.ffxml')\n\n ff = ForceField(ffxml)\n\n # Generate a topology\n topology = generateTopologyFromOEMol(mol)\n\n #-----------------\n # MAIN\n #-----------------\n\n # Calculate energies\n\n E_kn = np.zeros([K,len(coords)],np.float64)\n u_kn = np.zeros([K,len(coords)],np.float64)\n for i,j in enumerate(params):\n AlkEthOH_id = j\n for k,l in enumerate(params[AlkEthOH_id]):\n print(\"Anotha one\")\n for m,n in enumerate(params[AlkEthOH_id][l]):\n newparams = ff.getParameter(smirks=n[0]) \n newparams[n[1]]=n[2]\n ff.setParameter(newparams,smirks=n[0])\n system = ff.createSystem(topology, [mol])\n #print(newparams)\n for o,p in enumerate(coords):\n e = get_energy_vac(system,p)\n E_kn[k,o] = e._value\n u_kn[k,o] = e._value*beta\n\n\n return E_kn,u_kn",
"def electric_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber*r\n ikr = 1j * kr\n\n front_term = (\n (1j * self.omega * self.mu * self.moment) / (4. * np.pi * r**2) *\n (ikr + 1) * np.exp(-ikr)\n )\n return front_term * self.cross_orientation(dxyz) / r",
"def evaluate(self, energy, **kwargs):\n\n rotation = kwargs.pop(\"rotation\")\n tmax = kwargs.pop(\"tmax\")\n smooth = kwargs.pop(\"smooth\", True)\n bias = kwargs.pop(\"bias\")\n\n # change max delay time\n if not tmax == self.cascmap.tmax:\n self.cascmap.tmax = tmax\n\n # change rotation angle\n # and apply rotation\n if not rotation == self.cascmap.angle:\n self.cascmap.angle = rotation\n\n # calculate flux from observed point source specturm\n # first the ebl contribution\n # and remove parameters from kwargs that belong to the EBL model\n kwargs_ebl = {}\n for k in self._ebl.parameters.names:\n kwargs_ebl[k] = kwargs.pop(k)\n\n result = self._ebl.evaluate(energy * (1. + bias), **kwargs_ebl)\n result *= self._intrinsic_spectral_model.evaluate(energy * (1. + bias), **kwargs)\n\n # change spectral weights\n self.cascmap.apply_spectral_weights(injspec=self._inj_spec,\n smooth=smooth,\n force_recompute=True,\n **kwargs)\n\n is_cached_coord = [\n _ is coord for _, coord in zip(energy, self._cached_coordinates)\n ]\n\n # reset cache\n if not np.all(is_cached_coord):\n self._cached_value = None\n\n if self._cached_weights is not None and \\\n not np.all(np.equal(self.cascmap.weights, self._cached_weights)):\n self._cached_weights = None\n\n if self._cached_value is None or self._cached_weights is None:\n self._cached_coordinates = energy\n self._cached_value = self._interpolate(energy * (1. + bias))\n self._cached_weights = self.cascmap.weights\n\n if self.add_primary:\n result += self._cached_value.to(result.unit)\n else:\n result = self._cached_value.to(result.unit)\n\n return result",
"def ComputeEnergyConsumption(self):\r\n pass",
"def calc_variables ( ):\n\n # In this example we simulate using the shifted-force potential only\n # The values of < p_sf >, < e_sf > and density should be consistent (for this potential)\n # There are no long-range or delta corrections\n\n from averages_module import VariableType\n \n # Preliminary calculations\n vol = box**3 # Volume\n rho = n / vol # Density\n\n # Variables of interest, of class VariableType, containing three attributes:\n # .val: the instantaneous value\n # .nam: used for headings\n # .method: indicating averaging method\n # If not set below, .method adopts its default value of avg\n # The .nam and some other attributes need only be defined once, at the start of the program,\n # but for clarity and readability we assign all the values together below\n\n # Move acceptance ratio\n m_r = VariableType ( nam = 'Move ratio', val = m_ratio, instant = False )\n\n # Internal energy per molecule (shifted-force potential)\n # Ideal gas contribution (assuming nonlinear molecules) plus total PE divided by N\n e_sf = VariableType ( nam = 'E/N shifted force', val = 3.0*temperature + total.pot/n )\n\n # Pressure (shifted-force potential)\n # Ideal gas contribution plus total virial divided by V\n p_sf = VariableType ( nam = 'P shifted force', val = rho*temperature + total.vir/vol )\n\n # Collect together into a list for averaging\n return [ m_r, e_sf, p_sf ]",
"def eval_calc(self):\n\n # Define the GmfGetter\n\n #for args_tag in range(len(self.args)-1):\n # Looping over all source models (Note: the last attribute in self.args is a monitor - so skipping it)\n \n from openquake.calculators import getters\n from openquake.baselib import general\n from openquake.hazardlib import const, calc, gsim\n from openquake.commands import dbserver as cdbs\n if self.vtag >= 12:\n from openquake.hazardlib.const import StdDev\n if self.vtag >= 12:\n from openquake.commonlib import datastore\n else:\n from openquake.baselib import datastore\n\n cur_getter = getters.GmfGetter(self.args[0][0], calc.filters.SourceFilter(\n self.dstore['sitecol'], self.dstore['oqparam'].maximum_distance), \n self.calculator.param['oqparam'], self.calculator.param['amplifier'], \n self.calculator.param['sec_perils'])\n\n # Evaluate each computer\n print('FetchOpenQuake: Evaluting ground motion models.')\n for computer in cur_getter.gen_computers(self.mon):\n # Looping over rupture(s) in the current realization\n sids = computer.sids\n #print('eval_calc: site ID sids = ')\n #print(sids)\n eids_by_rlz = computer.ebrupture.get_eids_by_rlz(\n cur_getter.rlzs_by_gsim)\n mag = computer.ebrupture.rupture.mag\n im_list = []\n data = general.AccumDict(accum=[])\n cur_T = self.event_info['IntensityMeasure'].get('Periods', None)\n for cur_gs, rlzs in cur_getter.rlzs_by_gsim.items():\n # Looping over GMPE(s)\n #print('eval_calc: cur_gs = ')\n #print(cur_gs)\n num_events = sum(len(eids_by_rlz[rlz]) for rlz in rlzs)\n if num_events == 0: # it may happen\n continue\n # NB: the trick for performance is to keep the call to\n # .compute outside of the loop over the realizations;\n # it is better to have few calls producing big arrays\n tmpMean = []\n tmpstdtot = []\n tmpstdinter = []\n tmpstdintra = []\n if self.vtag >= 12:\n mean_stds_all = computer.cmaker.get_mean_stds([computer.ctx], StdDev.EVENT)[0]\n for imti, imt in enumerate(computer.imts): \n # Looping over IM(s)\n #print('eval_calc: imt = ', imt)\n if str(imt) in ['PGA', 'PGV', 'PGD']:\n cur_T = [0.0]\n im_list.append(str(imt))\n imTag = 'ln' + str(imt)\n else:\n if 'SA' not in im_list:\n im_list.append('SA')\n imTag = 'lnSA'\n if isinstance(cur_gs, gsim.multi.MultiGMPE):\n gs = cur_gs[str(imt)] # MultiGMPE\n else:\n gs = cur_gs # regular GMPE\n try:\n if self.vtag >= 12:\n mean_stds = mean_stds_all[:, imti]\n num_sids = len(computer.sids)\n num_stds = len(mean_stds)\n if num_stds == 1:\n # no standard deviation is available\n # for truncation_level = 0 there is only mean, no stds\n if computer.correlation_model:\n raise ValueError('truncation_level=0 requires '\n 'no correlation model')\n mean = mean_stds[0]\n stddev_intra = 0\n stddev_inter = 0\n stddev_total = 0\n if imti == 0:\n tmpMean = mean\n tmpstdinter = np.concatenate((tmpstdinter, stddev_inter), axis=1)\n tmpstdintra = np.concatenate((tmpstdintra, stddev_intra), axis=1)\n tmpstdtot = stddev_total\n else:\n tmpMean = np.concatenate((tmpMean, mean), axis=0)\n tmpstdinter = np.concatenate((tmpstdinter, stddev_inter), axis=1)\n tmpstdintra = np.concatenate((tmpstdintra, stddev_intra), axis=1)\n tmpstdtot = np.concatenate((tmpstdtot, stddev_total), axis=0)\n elif num_stds == 2:\n # If the GSIM provides only total standard deviation, we need\n # to compute mean and total standard deviation at the sites\n # of interest.\n # In this case, we also assume no correlation model is used.\n # By default, we evaluate stddev_inter as the stddev_total\n\n if self.correlation_model:\n raise CorrelationButNoInterIntraStdDevs(\n self.correlation_model, gsim)\n\n mean, stddev_total = mean_stds\n stddev_total = stddev_total.reshape(stddev_total.shape + (1, ))\n mean = mean.reshape(mean.shape + (1, ))\n stddev_inter = stddev_total\n stddev_intra = 0\n if imti == 0:\n tmpMean = mean\n tmpstdinter = np.concatenate((tmpstdinter, stddev_inter), axis=1)\n tmpstdintra = np.concatenate((tmpstdintra, stddev_intra), axis=1)\n tmpstdtot = stddev_total\n else:\n tmpMean = np.concatenate((tmpMean, mean), axis=0)\n tmpstdinter = np.concatenate((tmpstdinter, stddev_inter), axis=1)\n tmpstdintra = np.concatenate((tmpstdintra, stddev_intra), axis=1)\n tmpstdtot = np.concatenate((tmpstdtot, stddev_total), axis=0)\n else:\n mean, stddev_inter, stddev_intra = mean_stds\n stddev_intra = stddev_intra.reshape(stddev_intra.shape + (1, ))\n stddev_inter = stddev_inter.reshape(stddev_inter.shape + (1, ))\n mean = mean.reshape(mean.shape + (1, ))\n if imti == 0:\n tmpMean = mean\n tmpstdinter = stddev_inter\n tmpstdintra = stddev_intra\n tmpstdtot = np.sqrt(stddev_inter * stddev_inter + stddev_intra * stddev_intra)\n else:\n tmpMean = np.concatenate((tmpMean, mean), axis=1)\n tmpstdinter = np.concatenate((tmpstdinter, stddev_inter), axis=1)\n tmpstdintra = np.concatenate((tmpstdintra, stddev_intra), axis=1)\n tmpstdtot = np.concatenate((tmpstdtot,np.sqrt(stddev_inter * stddev_inter + stddev_intra * stddev_intra)), axis=1)\n\n elif self.vtag == 11:\n # v11\n dctx = computer.dctx.roundup(\n cur_gs.minimum_distance)\n if computer.distribution is None:\n if computer.correlation_model:\n raise ValueError('truncation_level=0 requires '\n 'no correlation model')\n mean, _stddevs = cur_gs.get_mean_and_stddevs(\n computer.sctx, computer.rctx, dctx, imt, stddev_types=[])\n num_sids = len(computer.sids)\n if cur_gs.DEFINED_FOR_STANDARD_DEVIATION_TYPES == {const.StdDev.TOTAL}:\n # If the GSIM provides only total standard deviation, we need\n # to compute mean and total standard deviation at the sites\n # of interest.\n # In this case, we also assume no correlation model is used.\n if computer.correlation_model:\n raise CorrelationButNoInterIntraStdDevs(\n computer.correlation_model, cur_gs)\n\n mean, [stddev_total] = cur_gs.get_mean_and_stddevs(\n computer.sctx, computer.rctx, dctx, imt, [const.StdDev.TOTAL])\n stddev_total = stddev_total.reshape(\n stddev_total.shape + (1, ))\n mean = mean.reshape(mean.shape + (1, ))\n if imti == 0:\n tmpMean = mean\n tmpstdtot = stddev_total\n else:\n tmpMean = np.concatenate((tmpMean, mean), axis=0)\n tmpstdtot = np.concatenate((tmpstdtot, stddev_total), axis=0)\n else:\n mean, [stddev_inter, stddev_intra] = cur_gs.get_mean_and_stddevs(\n computer.sctx, computer.rctx, dctx, imt,\n [const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT])\n stddev_intra = stddev_intra.reshape(\n stddev_intra.shape + (1, ))\n stddev_inter = stddev_inter.reshape(\n stddev_inter.shape + (1, ))\n mean = mean.reshape(mean.shape + (1, ))\n\n if imti == 0:\n tmpMean = mean\n tmpstdinter = stddev_inter\n tmpstdintra = stddev_intra\n tmpstdtot = np.sqrt(stddev_inter * stddev_inter + stddev_intra * stddev_intra)\n else:\n tmpMean = np.concatenate((tmpMean, mean), axis=1)\n tmpstdinter = np.concatenate((tmpstdinter, stddev_inter), axis=1)\n tmpstdintra = np.concatenate((tmpstdintra, stddev_intra), axis=1)\n tmpstdtot = np.concatenate((tmpstdtot,np.sqrt(stddev_inter * stddev_inter + stddev_intra * stddev_intra)), axis=1)\n\n else:\n # v10\n dctx = computer.dctx.roundup(\n cur_gs.minimum_distance)\n if computer.truncation_level == 0:\n if computer.correlation_model:\n raise ValueError('truncation_level=0 requires '\n 'no correlation model')\n mean, _stddevs = cur_gs.get_mean_and_stddevs(\n computer.sctx, computer.rctx, dctx, imt, stddev_types=[])\n num_sids = len(computer.sids)\n if cur_gs.DEFINED_FOR_STANDARD_DEVIATION_TYPES == {const.StdDev.TOTAL}:\n # If the GSIM provides only total standard deviation, we need\n # to compute mean and total standard deviation at the sites\n # of interest.\n # In this case, we also assume no correlation model is used.\n if computer.correlation_model:\n raise CorrelationButNoInterIntraStdDevs(\n computer.correlation_model, cur_gs)\n\n mean, [stddev_total] = cur_gs.get_mean_and_stddevs(\n computer.sctx, computer.rctx, dctx, imt, [const.StdDev.TOTAL])\n stddev_total = stddev_total.reshape(\n stddev_total.shape + (1, ))\n mean = mean.reshape(mean.shape + (1, ))\n if imti == 0:\n tmpMean = mean\n tmpstdtot = stddev_total\n else:\n tmpMean = np.concatenate((tmpMean, mean), axis=0)\n tmpstdtot = np.concatenate((tmpstdtot, stddev_total), axis=0)\n else:\n mean, [stddev_inter, stddev_intra] = cur_gs.get_mean_and_stddevs(\n computer.sctx, computer.rctx, dctx, imt,\n [const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT])\n stddev_intra = stddev_intra.reshape(\n stddev_intra.shape + (1, ))\n stddev_inter = stddev_inter.reshape(\n stddev_inter.shape + (1, ))\n mean = mean.reshape(mean.shape + (1, ))\n\n if imti == 0:\n tmpMean = mean\n tmpstdinter = stddev_inter\n tmpstdintra = stddev_intra\n tmpstdtot = np.sqrt(stddev_inter * stddev_inter + stddev_intra * stddev_intra)\n else:\n tmpMean = np.concatenate((tmpMean, mean), axis=1)\n tmpstdinter = np.concatenate((tmpstdinter, stddev_inter), axis=1)\n tmpstdintra = np.concatenate((tmpstdintra, stddev_intra), axis=1)\n tmpstdtot = np.concatenate((tmpstdtot,np.sqrt(stddev_inter * stddev_inter + stddev_intra * stddev_intra)), axis=1)\n\n except Exception as exc:\n raise RuntimeError(\n '(%s, %s, source_id=%r) %s: %s' %\n (gs, imt, computer.source_id.decode('utf8'),\n exc.__class__.__name__, exc)\n ).with_traceback(exc.__traceback__)\n\n # initialize\n # NOTE: needs to be extended for gmpe logic tree\n gm_collector = []\n # collect data\n for k in range(tmpMean.shape[0]):\n imResult = {}\n if len(tmpMean):\n imResult.update({'Mean': [float(x) for x in tmpMean[k].tolist()]})\n if len(tmpstdtot):\n imResult.update({'TotalStdDev': [float(x) for x in tmpstdtot[k].tolist()]})\n if len(tmpstdinter):\n imResult.update({'InterEvStdDev': [float(x) for x in tmpstdinter[k].tolist()]})\n if len(tmpstdintra):\n imResult.update({'IntraEvStdDev': [float(x) for x in tmpstdintra[k].tolist()]})\n gm_collector.append({imTag: imResult})\n #print(gm_collector)\n \n # close datastore instance\n self.calculator.datastore.close()\n \n # stop dbserver\n if self.vtag >= 11:\n cdbs.main('stop')\n else:\n cdbs.dbserver('stop')\n \n # terminate the subprocess\n if self.prc:\n self.prc.kill()\n\n # copy calc hdf file\n if self.vtag >= 11:\n calc_id = datastore.get_last_calc_id()\n path = os.path.join(datastore.get_datadir(), 'calc_%d.hdf5' % calc_id)\n else:\n path = os.path.join(datastore.get_datadir(), 'calc_%d.hdf5' % self.calc_id)\n\n if self.dir_info:\n dir_output = self.dir_info['Output']\n try:\n shutil.copy2(path, dir_output)\n print('FetchOpenQuake: calc hdf file saved.')\n except:\n print('FetchOpenQuake: failed to copy calc hdf file.')\n\n # Final results\n res = {'Magnitude': mag,\n 'Periods': cur_T,\n 'IM': im_list,\n 'GroundMotions': gm_collector}\n \n # return\n return res",
"def make_energy(self):\n @nb.njit\n def energy_func(m):\n heff = self.field(m)\n return -energy.zeeman(m, self.Ms, heff) \\\n + energy.shape_anisotropy(m, self.Ms, self.Nd[0], self.Nd[1], self.Nd[2]) \\\n + energy.uniaxial_anisotropy(m, self.u, self.Ku1, self.Ku2) \\\n + energy.cubic_anisotropy(m, self.c1, self.c2, self.c3,\n self.Kc1, self.Kc2, self.Kc3)\n self.energy = energy_func",
"def E(self):\n return self.generic_getter(get_energy, \"E\", \"convert_energy\")",
"def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)",
"def dynamics(x,Earth):\r\n\r\n # precompute a few terms to reduce number of operations\r\n r = norm(x[0:3])\r\n Re_r_sqr = 1.5*Earth.J2*(Earth.R/r)**2\r\n five_z_sqr = 5*x[2]**2/(r**2)\r\n\r\n # two body and J2 acceleration together\r\n accel = (-Earth.mu/(r**3))*np.array([x[0]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[1]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[2]*(1 - Re_r_sqr*(five_z_sqr - 3))])\r\n\r\n return np.array([x[3],x[4],x[5],accel[0],accel[1],accel[2]])",
"def nonlinear_electroelastodynamics(optimise=True):\n\n mesh = Mesh()\n mesh.Parallelepiped(upper_right_front_point=(1,1,0.001),nx=10,ny=10,nz=1, element_type=\"hex\")\n\n mu = 5.0e4\n mu1 = mu\n mu2 = mu\n eps_2 = 4.0*8.8541e-12\n v = 0.4\n lamb = 2.*mu*v/(1-2.*v)\n material = IsotropicElectroMechanics_108(3, mu1=mu1, mu2=mu2, lamb=lamb, eps_2=eps_2, rho=1200.)\n\n formulation = DisplacementPotentialFormulation(mesh)\n\n\n def dirichlet_function(mesh):\n\n boundary_data = np.zeros((mesh.points.shape[0],4))+np.NAN\n\n Z_0 = np.logical_and(np.isclose(mesh.points[:,0],0.),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n Z_0 = np.logical_and(np.isclose(mesh.points[:,1],0.),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n Z_0 = np.logical_and(np.isclose(mesh.points[:,0],1),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n Z_0 = np.logical_and(np.isclose(mesh.points[:,1],1),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n\n Z_0 = np.isclose(mesh.points[:,2],0.)\n boundary_data[Z_0,3] = 0.\n\n Z_0 = np.isclose(mesh.points[:,2],.001)\n boundary_data[Z_0,3] = 9e3\n\n return boundary_data\n\n boundary_condition = BoundaryCondition()\n boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n nonlinear_static_solver = FEMSolver(total_time=60.,\n number_of_load_increments=25,\n analysis_nature=\"nonlinear\",\n analysis_type=\"static\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n )\n\n nonlinear_static_results = nonlinear_static_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n nonlinear_dynamic_solver = FEMSolver(total_time=60.,\n number_of_load_increments=250,\n analysis_nature=\"nonlinear\",\n analysis_type=\"dynamic\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n compute_energy_dissipation=True,\n compute_linear_momentum_dissipation=True,\n )\n\n nonlinear_dynamic_results = nonlinear_dynamic_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n # boundary_condition.__reset_state__()\n # boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n # nonlinear_dynamic_solver_exp = FEMSolver(total_time=6.,\n # number_of_load_increments=200000,\n # save_frequency=200000,\n # analysis_nature=\"nonlinear\",\n # analysis_type=\"dynamic\",\n # analysis_subtype=\"explicit\",\n # newton_raphson_tolerance=1e-5,\n # newton_raphson_solution_tolerance=1e-11,\n # optimise=optimise,\n # print_incremental_log=True,\n # )\n\n # nonlinear_dynamic_results_exp = nonlinear_dynamic_solver_exp.Solve(formulation=formulation, mesh=mesh,\n # material=material, boundary_condition=boundary_condition)\n\n\n boundary_condition.__reset_state__()\n boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n linear_static_solver = FEMSolver(total_time=60.,\n number_of_load_increments=250,\n analysis_nature=\"linear\",\n analysis_type=\"static\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n )\n\n linear_static_results = linear_static_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n boundary_condition.__reset_state__()\n boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n linear_dynamic_solver = FEMSolver(total_time=60.,\n number_of_load_increments=1000,\n analysis_nature=\"linear\",\n analysis_type=\"dynamic\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n break_at_increment=100,\n )\n\n linear_dynamic_results = linear_dynamic_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n s1 = nonlinear_static_results.GetSolutionVectors()\n s2 = nonlinear_dynamic_results.GetSolutionVectors()\n # s3 = nonlinear_dynamic_results_exp.GetSolutionVectors()\n s4 = linear_static_results.GetSolutionVectors()\n s5 = linear_dynamic_results.GetSolutionVectors()\n\n norm = lambda x: np.linalg.norm(x[:,2,-1])\n assert norm(s1) > 0.13 and norm(s1) < 0.15\n assert norm(s2) > 0.13 and norm(s2) < 0.15\n assert norm(s4) > 0.13 and norm(s4) < 0.15"
] | [
"0.6979177",
"0.6731371",
"0.6482822",
"0.64128745",
"0.6365258",
"0.6362616",
"0.63293177",
"0.62808794",
"0.6273513",
"0.62004024",
"0.6196962",
"0.6111015",
"0.60516304",
"0.60275024",
"0.60095674",
"0.59755975",
"0.596262",
"0.59490305",
"0.59482163",
"0.59235406",
"0.5885458",
"0.58756685",
"0.5863213",
"0.58623356",
"0.58459187",
"0.5842846",
"0.58278894",
"0.58210653",
"0.58198273",
"0.57964206"
] | 0.72647285 | 0 |
Assert the params of the external object match whats in the dict. | def _assert_correct_params(external_obj, param_attr, params):
if type(params) == dict:
for param in params.keys():
npt.assert_allclose(
getattr(external_obj, param_attr)['A'][param], params[param])
if type(params) == tuple:
npt.assert_allclose(getattr(external_obj, param_attr)['A'], params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _assert_params_equals(self, url, param_dict):\n url_params = url_get_params(url)\n assert_equals(url_params, param_dict)",
"def test_validate_params(mocker, params):\n validate_params(**params)",
"def _assertParams(self) -> None:\n params = parse_qs(self.http_client.request.call_args[1][\"data\"].decode(\"utf-8\"))\n self.assertEqual(params[\"token\"], [\"mockAccessToken\"])\n self.assertEqual(params[\"client_id\"], [CLIENT_ID])\n self.assertEqual(params[\"client_secret\"], [CLIENT_SECRET])",
"def inner_test(param: dict):\n self.assertEqual(param, {'foo': 1, 'bar': ['bat', 2]})",
"def check_params(params):\n\n required = ['initlandmarks']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)",
"def validate_params(self, params: Dict[str, Any]) -> bool:\n dict_set_defaults(params, self.DEFAULT_PARAMS)\n\n for k in self.params:\n if k in {\"name\", \"descr\", \"cache_file\"}:\n continue\n\n if self.params[k] != params.get(k):\n return False\n\n return True",
"def test_valid_analysis_request(analysis_request_dict: JSONDict) -> None:\n\n request = AnalysisRequest(**analysis_request_dict)\n\n assert request.dict() == analysis_request_dict",
"def test_good_custom_params(self):\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\n self.xmodule.oauth_params = Mock()\n self.xmodule.get_input_fields()\n self.xmodule.oauth_params.assert_called_with(\n {'custom_test_custom_params': 'test_custom_param_value'},\n 'test_client_key', 'test_client_secret'\n )",
"def _check_params(self):\n pass",
"def test_good_custom_params(self):\r\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\r\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\r\n self.xmodule.oauth_params = Mock()\r\n self.xmodule.get_input_fields()\r\n self.xmodule.oauth_params.assert_called_with(\r\n {u'custom_test_custom_params': u'test_custom_param_value'},\r\n 'test_client_key', 'test_client_secret'\r\n )",
"def check_params(info_dict):\n # check the info_dict\n if not isinstance(info_dict, dict):\n raise TypeError(\"info_dict should be dict, but the input is %s\" % \\\n type(info_dict))\n\n # check the op_type info\n if \"op_type\" not in info_dict.keys():\n raise KeyError(\"the keyword 'op_type' is missing in input params\")",
"def verifyData(self, expectedDict):\n pass",
"def test_compose_params(self):\n filter = Bleach(**self.params)\n self.assertEquals(self.params, filter.bleach_params)",
"def _assert_hook_call_record_has_expected_parameters(\n call_record: logging.LogRecord, expected_parameters: List[str]\n):\n for param in expected_parameters:\n assert hasattr(call_record, param)",
"def _validate_params(self):\n assert set(self.required_params) - set(self._params) == set()\n for par, val in self.optional_params.items():\n if par not in self._params:\n self._params[par] = val",
"def check_params(params):\n assert 'split' in params.keys(\n ), 'Params must include split (train, val, or test).'\n\n required = ['batch_size', 'im_shape']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)",
"def check_params(params):\n assert 'split' in params.keys(\n ), 'Params must include split (train, val, or test).'\n\n required = ['batch_size', 'root', 'im_shape']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)",
"def validate_params(params, expected, opt_param=set()):\n expected = set(expected)\n opt_param = set(opt_param)\n pkeys = set(params)\n if expected - pkeys:\n raise ValueError(\"Required keys {} not in supplied parameters\"\n .format(\", \".join(expected - pkeys)))\n defined_param = expected | opt_param\n for param in params:\n if param not in defined_param:\n logger.warning(\"Unexpected parameter {} supplied\".format(param))",
"def validate_params(params, expected, opt_param=set()):\n expected = set(expected)\n opt_param = set(opt_param)\n pkeys = set(params)\n if expected - pkeys:\n raise ValueError(\"Required keys {} not in supplied parameters\"\n .format(\", \".join(expected - pkeys)))\n defined_param = expected | opt_param\n for param in params:\n if param not in defined_param:\n logging.warning(\"Unexpected parameter {} supplied\".format(param))",
"def check_params_equal(param1, param2):\n for key, val in param1.items():\n if np.any(param1[key] != param2[key]):\n return False\n return True",
"def assert_common_params(self, params, action=None):\n if action:\n assert params[\"Action\"] == action\n\n assert params[\"AWSAccessKeyId\"] == TEST_MWS_ACCESS_KEY\n assert params[self.api_class.ACCOUNT_TYPE] == TEST_MWS_ACCOUNT_ID\n if \"MWSAuthToken\" in params:\n assert params[\"MWSAuthToken\"] == TEST_MWS_AUTH_TOKEN\n\n # Signature keys (below) are defined with string literals in MWS.get_params\n # If test fails here, check that method.\n assert params[\"SignatureMethod\"] == \"HmacSHA256\"\n assert params[\"SignatureVersion\"] == \"2\"\n isoformat_str = \"%Y-%m-%dT%H:%M:%S\"\n try:\n datetime.datetime.strptime(params[\"Timestamp\"], isoformat_str)\n except ValueError:\n pytest.fail(\n \"Timestamp expected an ISO-8601 datetime string url encoded\"\n \" with format [YYYY-MM-DDTHH:MM:SS].\"\n )",
"def check_params(params):\n\n required = ['gtsrb_train_root', 'gtsrb_test_root', 'batch_size']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)",
"def check_params(cls, **kwargs) -> None:\n\n for key, val in kwargs.items():\n cls.check_param(key, val)",
"def test_map_params(self):\n assert self.route.map_params(id=\"user_id\").route[\"map_params\"] == {\"id\": \"user_id\"}",
"def check_params(self, model_params):\n return model_params",
"def test_request_params():\n ctxt = SqContextMock('key', 'http', 'rest-ip', 80)\n sqobj = SqObjMock(ctxt, 'default', 'default', 'default',\n 'default', 'default', 'default', 'default')\n engine = SqRestEngine(sqobj)\n # paramters which will override engine internal paramters\n sqobj_override_params = ['hostname', 'namespace', 'view']\n # other parameters\n other_params = ['other_param_0', 'other_param_1']\n\n testing_params = sqobj_override_params + other_params\n # try all combinations of params\n for n_sq_params in range(1, len(testing_params)+1):\n for sq_params in combinations(testing_params, n_sq_params):\n req_params = {p: 'override' for p in sq_params}\n validate_args(engine, req_params)",
"def test_accepts_dictionary(self):\n self.Test.scope('foo', {'where': 'foo'})\n self.assertEqual(self.Test.foo().params['where'], ['foo'])",
"def test_build_params( self ):\n r = Requester( self.logger )\n ( partnership_id, authorization_id, pickup_location, search_key, search_value ) = ( 'a', 'b', 'c', 'd', 'e' )\n params = r.build_params( partnership_id, authorization_id, pickup_location, search_key, search_value )\n self.assertEqual(\n ['ExactSearch', 'Notes', 'PartnershipId', 'PickupLocation'],\n sorted(params.keys()) )",
"def test_task_params(self):\n from sosbeacon.event.message import get_contact_broadcast_task\n\n student_key = Mock()\n student_key.urlsafe.return_value = \"ASTUDENTKEY\"\n\n event_key = Mock()\n event_key.urlsafe.return_value = \"ANEVENTKEY\"\n\n message_key = Mock()\n message_key.urlsafe.return_value = \"SOMEMESSAGEKEY\"\n\n batch_id = \"THEBATCHID\"\n\n contact = {\n 'name': 'Johny Jones',\n 'methods': (\n {'type': 't', 'value': '1234567890'},\n {'type': 'e', 'value': '[email protected]'},\n )\n }\n\n task = get_contact_broadcast_task(\n event_key, message_key, student_key, contact, batch_id)\n\n check_params = {\n 'student': 'ASTUDENTKEY',\n 'event': 'ANEVENTKEY',\n 'message': 'SOMEMESSAGEKEY',\n 'batch': 'THEBATCHID',\n 'contact': json.dumps(contact),\n }\n self.assertEqual(check_params, task.extract_params())",
"def _validate_params(self):\n raise NotImplementedError('Must be implemented in subclasses.')"
] | [
"0.7194935",
"0.7190007",
"0.6861588",
"0.6417681",
"0.6358602",
"0.6349058",
"0.6329792",
"0.63230383",
"0.6288156",
"0.62793636",
"0.6263622",
"0.6222141",
"0.61963415",
"0.61763614",
"0.61737525",
"0.61573094",
"0.6152412",
"0.61353236",
"0.6125796",
"0.6115335",
"0.61032295",
"0.6066115",
"0.6054544",
"0.6050753",
"0.6045273",
"0.6038042",
"0.6011698",
"0.60053843",
"0.5979012",
"0.5942498"
] | 0.7591457 | 0 |
Get documentation string for this configuration | def get_doc_string(self) -> str:
r = "Undocumented"
if self.doc is not None: r = self.doc
return r | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def documentation(self) -> str:\n return pulumi.get(self, \"documentation\")",
"def doc(self):\n doc = self.get('doc')\n if doc:\n from .config import defaults\n return defaults.types.doc(doc)",
"def documentation(self):\n return self.handle.__doc__",
"def getDoc(self):\r\n return self.__doc__",
"def documentation(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"documentation\")",
"def description(self):\n return (self.__doc__ or \"\").strip()",
"def DocString():\n return",
"def get_description(cls) -> str:\n return cls.__doc__ or \"\"",
"def get_doc(self) -> Documentation:\n r : Documentation = [self.get_doc_string()]\n r_src = \"\"\n if hasattr(self,\"_path\"): r_src += \"locally at '%s'\" % (str(self._path))\n if self.url is not None: r_src += \" remote url(orig) '%s'\" % (self.url)\n r_src += \" remote url(parsed) '%s'\" % (self.git_url.as_string())\n if self.branch is not None: r_src += \" branch '%s'\" % (self.branch)\n r.append(r_src)\n r_stages = []\n for (sn,s) in self.stages.items():\n r_stages.append(sn)\n pass\n r_stages.sort()\n if len(r_stages)>0:\n r.append(\"Stages: %s\"%(\" \".join(r_stages)))\n pass\n return r",
"def documentation_url(self) -> str:\n return self._documentation_url",
"def description(cls) -> str:\n\n return cls.__doc__ or \"\"",
"def __repr__(self) -> str:\n return f\"<Doc[{self.desc}]>\"",
"def get_main_help(self):\r\n return __doc__.strip()",
"def documentation():\n return auto.html()",
"def rawDoc(self):\n return self.namespace[\"__doc__\"]",
"def __str__(self):\n return self.__class__.__name__ + '\\n' + self.__class__.__doc__",
"def get_command_docstring(self, command):\n return self.get_catalog(command).udocstring",
"def docstring(self) -> str:\n out = f\"{self.model.__module__}.{self.model.__qualname__}\"\n docstring = inspect.getdoc(self.model)\n if docstring:\n out += \"\\n\\n\" + docstring\n return out",
"def __doc__(self, ???):",
"def description(self) -> ConfigNodePropertyString:\n return self._description",
"def describe(self) -> str:",
"def name(self):\n return self.__doc__.split('\\n')[0]",
"def raw_doc(self):\n try:\n return str(self.definition.docstr)\n except AttributeError:\n return ''",
"def shortDescription(self):\n # Suppress default logging of docstrings.\n return None",
"def help(cls):\n return textwrap.dedent(cls.__doc__).strip()",
"def get_description(self):\n class_docs = self._clean_docs(get_view_description(self.callback))\n method_docs = self._clean_docs(formatting.dedent(smart_text(self.get_docs())))\n\n if self.yaml_parser.get_param('replace_docs', False):\n docstring_body = method_docs\n else:\n docstring_body = \"\\n\\n\".join([docstring for docstring in\n [class_docs, method_docs] if docstring])\n\n explicit_docs = self.yaml_parser.get_param(\"docs\", None)\n if explicit_docs is not None:\n docstring_body = explicit_docs.format(super=docstring_body)\n\n return docstring_body.strip()",
"def documentation():\n return render_template('help.html')",
"def __doc__(self):\n return self.fget.__doc__",
"def description(self) -> str:\n return self.doc.get('description', '')",
"def get_description(self):\n return re.sub('\\n\\W+',' ', self.__doc__)"
] | [
"0.8267782",
"0.7552703",
"0.7545474",
"0.7456952",
"0.73271036",
"0.7129974",
"0.7098139",
"0.7057664",
"0.7055784",
"0.70394516",
"0.68911743",
"0.684487",
"0.68445027",
"0.6841313",
"0.68265826",
"0.67644143",
"0.6692945",
"0.66825646",
"0.6674594",
"0.6660667",
"0.6659072",
"0.66508627",
"0.6632637",
"0.66204166",
"0.66079324",
"0.6606326",
"0.6589629",
"0.65855235",
"0.6575949",
"0.6544503"
] | 0.7629708 | 1 |
Resolve the strings in the repo description and its stages, using the repo configuration's environment If resolve_fully is True then all the environment must resolve If resolve_fully is False then the URL, path and branch must resolve | def resolve(self, env:GripEnv, resolve_fully:bool=True, error_handler:ErrorHandler=None) -> None:
self.grip_repo_desc.base.add_log_string("Resolve repo '%s' in config '%s'"%(self.name, self.grip_config.name))
self.env = GripEnv(name="repo %s"%self.name, parent=env)
self.env.build_from_values(self.values.env)
url = self.env.substitute(self.values.url, finalize=True, error_handler=error_handler)
if url is None:
raise GripTomlError("for repo '%s' has unknown url '%s'"%(self.name, self.values.url))
self.url = url
try:
self.git_url = GitUrl(self.url)
pass
except:
raise GripTomlError("for repo '%s' could not parse git url '%s'"%(self.name, self.url))
self.branch = self.env.substitute(self.values.branch, finalize=True, error_handler=error_handler)
self._path = Path(self.git_url.repo_name)
if self.values.path is not None:
self._path = Path(self.env.substitute(self.values.path, finalize=True, error_handler=error_handler))
pass
if self.values.shallow is None:
self.shallow = False
pass
else:
self.shallow = self.values.shallow
pass
self.doc = self.values.doc
self.env.add_values({"GRIP_REPO_PATH":"@GRIP_ROOT_PATH@/"+str(self._path)})
if resolve_fully:
self.env.resolve(error_handler=error_handler)
for (n,s) in self.stages.items():
s.resolve(self.env, error_handler=error_handler)
pass
pass
# print("Resolve %s:%s:%s:%s"%(self,self.name,self.url,self.git_url))
self._is_resolved = True
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resolve_ref(ref):\n if ref == DIRTY:\n return ref\n try:\n return git_rev_parse(ref)\n except CommandFailure:\n for remote in git_remote():\n try:\n return git_rev_parse('{remote}/{ref}'.format(**locals()))\n except CommandFailure:\n continue\n return None",
"def resolve_references(self):\n self.specs = self._resolve_partial(self.parsed_url, self.specs, ())",
"def _prepare_manual_resolve(self):\n # Files that have been deleted between branch and cherry-pick will not have\n # their skip-worktree bit set so set it manually for those files to avoid\n # git status incorrectly listing them as unstaged deletes.\n repo_status = self._run_git_command(\n ['-c', 'core.quotePath=false', 'status', '--porcelain']).splitlines()\n extra_files = [f[3:] for f in repo_status if f[:2] == ' D']\n if extra_files:\n self._run_git_command_with_stdin(\n ['update-index', '--skip-worktree', '--stdin'],\n stdin='\\n'.join(extra_files) + '\\n')",
"def parse_resolve(cls, url):\n loc = cls.parse(url)\n if loc.path and loc.path != '/':\n # If true ref name contains slash, a prefix of path might be a suffix of\n # ref. Try to resolve it.\n ref_prefix = None\n if loc.treeish.startswith('refs/'):\n ref_prefix = loc.treeish + '/'\n refs = get_refs(loc.hostname, loc.project, ref_prefix)\n if not refs:\n raise TreeishResolutionError('could not resolve treeish in %s' % url)\n\n treeishes = set(refs.keys())\n # Add branches and tags without a prefix.\n for ref in refs:\n for prefix in ('refs/tags/', 'refs/heads/'):\n if ref.startswith(prefix):\n treeishes.add(ref[len(prefix):])\n break\n loc = cls.parse(url, treeishes=treeishes)\n return loc",
"def do_resolve(self,args):\n try:\n for solution in self.resolve_all(args):\n self.print_solution(solution)\n except:\n traceback.print_exc(file=sys.stdout)",
"def _resolve(self):\n pass",
"def resolve(self, *args):\n return _libsbml.SBMLResolverRegistry_resolve(self, *args)",
"def parse_artifact_resolve(self, txt, **kwargs):\n\n _resp = parse_soap_enveloped_saml_artifact_resolve(txt)\n return artifact_resolve_from_string(_resp)",
"def resolve(self):\n pass # pragma: no cover",
"def resolve(self, *args):\n return _libsbml.SBMLResolver_resolve(self, *args)",
"def resolve(self,**bindings):\n for solution in self.resolve_all(**bindings):\n return solution",
"def resolve_path(self, path):\n\n return (\n self.resolve_root(path) or\n self.resolve_tree(path) or\n self.resolve_ref(path) or\n self.resolve_ref_hierarchy(path) or\n self.resolve_repository_entry(path)\n )",
"def resolve(name, env):\n t = name\n while t in env:\n t = env[t]\n return t",
"def resolve(self, *args):\n return _libsbml.SBMLFileResolver_resolve(self, *args)",
"def resolve(self, spec):\r\n with ParseContext.temp():\r\n return Pants(spec).resolve()",
"def resolveDefinitions (self, allow_unresolved=False):\n if not self.needsResolution():\n return True\n\n while 0 < len(self.__unresolvedComponents):\n # Save the list of unresolved objects, reset the list to capture\n # any new objects defined during resolution, and attempt the\n # resolution for everything that isn't resolved.\n unresolved = self.__unresolvedComponents\n\n self.__unresolvedComponents = []\n self.__unresolvedDependents = {}\n for resolvable in unresolved:\n # Attempt the resolution.\n resolvable._resolve()\n\n # Either we resolved it, or we queued it to try again later\n assert resolvable.isResolved() or (resolvable in self.__unresolvedComponents), 'Lost resolvable %s' % (resolvable,)\n\n # We only clone things that have scope None. We never\n # resolve things that have scope None. Therefore, we\n # should never have resolved something that has\n # clones.\n if (resolvable.isResolved() and (resolvable._clones() is not None)):\n assert False\n if self.__unresolvedComponents == unresolved:\n if allow_unresolved:\n return False\n # This only happens if we didn't code things right, or the\n # there is a circular dependency in some named component\n # (i.e., the schema designer didn't do things right).\n failed_components = []\n from pyxb.xmlschema import structures\n for d in self.__unresolvedComponents:\n if isinstance(d, structures._NamedComponent_mixin):\n failed_components.append('%s named %s' % (d.__class__.__name__, d.name()))\n else:\n failed_components.append('Anonymous %s' % (d.__class__.__name__,))\n raise pyxb.NotInNamespaceError('Infinite loop in resolution:\\n %s' % (\"\\n \".join(failed_components),))\n\n # Replace the list of unresolved components with None, so that\n # attempts to subsequently add another component fail.\n self.__unresolvedComponents = None\n self.__unresolvedDependents = None\n\n # NOTE: Dependencies may require that we keep these around for a while\n # longer.\n #\n # Remove the namespace context from everything, since we won't be\n # resolving anything else.\n self._releaseNamespaceContexts()\n\n return True",
"def resolve(self):\n for reference in self._references:\n if reference.target is None:\n definition = self._definitions.get(reference.name)\n if definition is None:\n msg = message_factory.get_message(\n 'vapi.data.structref.structure.not.defined',\n reference.name)\n logger.debug(msg)\n raise CoreException(msg)\n reference.target = definition",
"def resolve(self, config: \"Config\") -> bool:\n option = config.getoption(self.argument, default=None)\n if option is not None:\n # A value is set, and it is not the default one\n return self.validate(option)\n\n if os.getenv(self.environment_variable):\n return True\n\n return False",
"def resolve(self):\n raise NotImplementedError",
"def prepare_repositories(self):\n if 'packages' not in self.image:\n return\n\n if self.image.get('packages').get('content_sets'):\n logger.warning('The image has ContentSets repositories specified, all other repositories are removed!')\n self.image['packages']['repositories'] = []\n repos = self.image.get('packages').get('repositories', [])\n\n injected_repos = []\n\n for repo in repos:\n if self._handle_repository(repo):\n injected_repos.append(repo)\n\n if self.image.get('packages').get('content_sets'):\n url = self._prepare_content_sets(self.image.get('packages').get('content_sets'))\n if url:\n repo = Repository({'name': 'content_sets_odcs',\n 'url': {'repository': url}})\n injected_repos.append(repo)\n self._fetch_repos = True\n\n if self._fetch_repos:\n for repo in injected_repos:\n repo.fetch(os.path.join(self.target, 'image', 'repos'))\n self.image['packages']['repositories_injected'] = injected_repos\n else:\n self.image['packages']['set_url'] = injected_repos",
"def resolve(\n self,\n pipeline_info: data_types.PipelineInfo,\n metadata_handler: metadata.Metadata,\n source_channels: Dict[Text, types.Channel],\n ) -> ResolveResult:\n raise NotImplementedError",
"def resolve_variables(self, service, environment, extra_variables=None,\n require_all_replaced=True):\n all_vars = self.load_all_variables(service, environment, extra_variables)\n self.resolved_vars = recursive_replace_vars(\n all_vars, require_all_replaced,\n all_vars[EXCONF_VAR_TEMPLATE_COMMENT_BEGIN],\n all_vars[EXCONF_VAR_STR_TEMPLATE_PREFIX],\n all_vars[EXCONF_VAR_STR_TEMPLATE_SUFFIX])\n return self.resolved_vars",
"def resolve(disco, service, version, environment=\"sandbox\"):\n return disco.resolve(service, version,\n _parseEnvironment(environment)).value().getValue()",
"def resolve(config, interpreter, logger=print):\r\n\r\n setuptools_requirement = failsafe_parse(\r\n 'setuptools==%s' % config.get('python-setup', 'setuptools_version', default='2.2'))\r\n wheel_requirement = failsafe_parse(\r\n 'wheel==%s' % config.get('python-setup', 'wheel_version', default='0.22.0'))\r\n\r\n interpreter = resolve_interpreter(config, interpreter, setuptools_requirement, logger=logger)\r\n if interpreter:\r\n return resolve_interpreter(config, interpreter, wheel_requirement, logger=logger)",
"def resolve_all_refs(s):\n for ref in list_of_all_unpointed_refs():\n ref.resolve()",
"def _handle_repository(self, repo):\n\n logger.debug(\"Loading configuration for repository: '%s' from '%s'.\"\n % (repo['name'],\n 'repositories-%s' % self._type))\n\n if 'id' in repo:\n logger.warning(\"Repository '%s' is defined as plain. It must be available \"\n \"inside the image as Cekit will not inject it.\"\n % repo['name'])\n return False\n\n if 'content_sets' in repo:\n self._fetch_repos = True\n return self._prepare_content_sets(repo)\n\n elif 'rpm' in repo:\n self._prepare_repository_rpm(repo)\n return False\n\n elif 'url' in repo:\n return True\n\n return False",
"def resolve_image(image):\n resolved = image\n if resolved.startswith(\"file:\"):\n return load_image_from_file(resolved[5:])\n if \":\" not in resolved:\n resolved = \"neo4j:\" + image\n if resolved.endswith(\"!\"):\n force = True\n resolved = resolved[:-1]\n else:\n force = False\n if resolved == \"neo4j:snapshot\":\n return pull_snapshot(\"community\", force)\n elif resolved in (\"neo4j:snapshot-enterprise\",\n \"neo4j-enterprise:snapshot\"):\n return pull_snapshot(\"enterprise\", force)\n else:\n return resolved",
"def resolve_git_url(self, grip_git_url:GitUrl) -> None:\n assert self._is_resolved\n if self.git_url.is_leaf():\n self.git_url.make_relative_to(abs_url=grip_git_url)\n pass\n pass",
"def resolve( self, name, version, type, **kwds ):\n if version is None or self.versionless:\n return self._find_dep_default( name, type=type, **kwds )\n else:\n return self._find_dep_versioned( name, version, type=type, **kwds )",
"def resolve( self, aWeb ):\n self.fullName= aWeb.fullNameFor( self.refTo )\n self.chunkList= aWeb.getchunk( self.refTo )"
] | [
"0.54447615",
"0.5355276",
"0.5341811",
"0.52984947",
"0.52957577",
"0.5100457",
"0.4885221",
"0.4863801",
"0.48408106",
"0.47952473",
"0.4756272",
"0.46724224",
"0.46047822",
"0.45708436",
"0.45521674",
"0.45461917",
"0.45224598",
"0.4522276",
"0.45165187",
"0.45096165",
"0.4500106",
"0.44956103",
"0.44926187",
"0.44807675",
"0.4471333",
"0.44696173",
"0.44592395",
"0.44536638",
"0.44479847",
"0.44397074"
] | 0.75666535 | 0 |
Resolve relative (and those using environment variables?) git urls | def resolve_git_url(self, grip_git_url:GitUrl) -> None:
assert self._is_resolved
if self.git_url.is_leaf():
self.git_url.make_relative_to(abs_url=grip_git_url)
pass
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_fix_repo_url():\n repo_url_git = 'git://github.com/Tinche/bower-cache'\n repo_url_https = 'https://github.com/Tinche/bower-cache'\n fixed_url_https = 'https://:@github.com/Tinche/bower-cache'\n assert repo_url_git == gitwrapper._fix_repo_url(repo_url_git)\n assert fixed_url_https == gitwrapper._fix_repo_url(repo_url_https)",
"def _get_git_url_if_present(uri):\n if '#' in uri:\n # Already a URI in git repo format\n return uri\n try:\n from git import Repo, InvalidGitRepositoryError, GitCommandNotFound, NoSuchPathError\n except ImportError as e:\n print(\"Notice: failed to import Git (the git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\" % e, file=sys.stderr)\n return uri\n try:\n # Check whether this is part of a git repo\n repo = Repo(uri, search_parent_directories=True)\n\n # Repo url\n repo_url = \"file://%s\" % repo.working_tree_dir\n\n # Sub directory\n rlpath = uri.replace(repo.working_tree_dir, '')\n if (rlpath == ''):\n git_path = repo_url\n elif (rlpath[0] == '/'):\n git_path = repo_url + '#' + rlpath[1:]\n else:\n git_path = repo_url + '#' + rlpath\n return git_path\n except (InvalidGitRepositoryError, GitCommandNotFound, ValueError, NoSuchPathError):\n return uri",
"def update_urls(self):\n rel_url_re = re.compile(r'^Relative URL: \\^(.*)/?$')\n if not self._relative_url:\n # noinspection PyPep8\n for line in internals.run(\n f'{self.svn_client} info {self.path}').split('\\n'):\n match = rel_url_re.match(line)\n if match:\n self._relative_url = match.group(1)\n break\n return self._relative_url",
"def git_url(fp: str) -> str:\n return f\"https://github.com/pantsbuild/pants/blob/release_{PANTS_SEMVER}/{fp}\"",
"def resolve_url(url, redirects):\n s = url.find(':')\n if s < 0:\n return url\n scheme, rest = url[:s], url[s+1:]\n if scheme in redirects:\n root = redirects[scheme]\n elif scheme in REPO_ROOTS:\n root = REPO_ROOTS[scheme]\n else:\n return url\n root = root.rstrip('/')\n rest = rest.lstrip('/')\n return '/'.join([root, rest])",
"def parse_ref(url_path):\n ref = url_path.lstrip('/')\n if not ref:\n ref = os.environ.get('DEFAULT_GIT_REF', 'HEAD').strip()\n return ref",
"def resolve_ref(ref):\n if ref == DIRTY:\n return ref\n try:\n return git_rev_parse(ref)\n except CommandFailure:\n for remote in git_remote():\n try:\n return git_rev_parse('{remote}/{ref}'.format(**locals()))\n except CommandFailure:\n continue\n return None",
"def resolve(self, env:GripEnv, resolve_fully:bool=True, error_handler:ErrorHandler=None) -> None:\n self.grip_repo_desc.base.add_log_string(\"Resolve repo '%s' in config '%s'\"%(self.name, self.grip_config.name))\n self.env = GripEnv(name=\"repo %s\"%self.name, parent=env)\n self.env.build_from_values(self.values.env)\n url = self.env.substitute(self.values.url, finalize=True, error_handler=error_handler)\n if url is None:\n raise GripTomlError(\"for repo '%s' has unknown url '%s'\"%(self.name, self.values.url))\n self.url = url\n try:\n self.git_url = GitUrl(self.url)\n pass\n except:\n raise GripTomlError(\"for repo '%s' could not parse git url '%s'\"%(self.name, self.url))\n\n self.branch = self.env.substitute(self.values.branch, finalize=True, error_handler=error_handler)\n self._path = Path(self.git_url.repo_name)\n if self.values.path is not None:\n self._path = Path(self.env.substitute(self.values.path, finalize=True, error_handler=error_handler))\n pass\n\n if self.values.shallow is None:\n self.shallow = False\n pass\n else:\n self.shallow = self.values.shallow\n pass\n\n self.doc = self.values.doc\n\n self.env.add_values({\"GRIP_REPO_PATH\":\"@GRIP_ROOT_PATH@/\"+str(self._path)})\n if resolve_fully:\n self.env.resolve(error_handler=error_handler)\n for (n,s) in self.stages.items():\n s.resolve(self.env, error_handler=error_handler)\n pass\n pass\n # print(\"Resolve %s:%s:%s:%s\"%(self,self.name,self.url,self.git_url))\n self._is_resolved = True\n pass",
"def getProjectURL():",
"def lookup_scm_url(package_location):\n scm_cfg = configparser.ConfigParser()\n if os.path.exists('%s/.git' % package_location):\n scm_cfg.read('%s/.git/config' % package_location)\n if 'remote \"origin\"' in scm_cfg:\n return scm_cfg['remote \"origin\"'].get('url')\n elif os.path.exists('%s/.hg' % package_location):\n scm_cfg.read('%s/.hg/hgrc' % package_location)\n if 'paths' in scm_cfg:\n return scm_cfg['paths'].get('default')",
"def getUrls(self):\n # in case you need to move from a read only Url to a writeable one, here it gets replaced\n repopath = self.repositoryUrl().replace(\"[git]\", \"\")\n repoString = utils.replaceVCSUrl(repopath)\n [repoUrl, repoBranch, repoTag] = utils.splitVCSUrl(repoString)\n if not repoBranch and not repoTag:\n repoBranch = \"master\"\n print(\"|\".join([repoUrl, repoBranch, repoTag]))\n return True",
"def test_repo_relpath(self):\n from os import path\n repodir = \"~/codes/ci/tests\"\n relpath = \"../pyci/config.py\"\n result = path.expanduser(\"~/codes/ci/pyci/config.py\")\n self.assertEqual(result, get_repo_relpath(repodir, relpath))",
"def set_git_url(context, url):\n context.url = url",
"def source_repo_url(branch_url_mode, vcs, source_repo, source_repo_branch):\n return {\n 'short': source_repo_branch,\n 'medium': '{source_repo.strpath}#{source_repo_branch}'.format(**locals()),\n 'long': '{vcs}+{source_repo.strpath}#{source_repo_branch}'.format(**locals())\n }[branch_url_mode]",
"def _transform_github_url(self):\n self.url = (\n self.url\n .replace('/blob/', '/')\n .replace(self.GITHUB_NETLOC, self.GITHUB_RAW_NETLOC)\n )",
"def parse_git_repo(potential_url: str) -> Optional[RepoUrl]:\n return RepoUrl.parse(potential_url)",
"def repositoryPathToURI( path ):\n return \"pbi://secondary/references/%s\" % os.path.basename( path )",
"def git_remote_url(self):\n return self._git_remote_url",
"def test_vcs_url_scheme_to_object(tmpdir):\n git_repo = create_repo_from_pip_url(\n **{\n 'pip_url': 'git+git://git.myproject.org/MyProject.git@da39a3ee5e6b4b',\n 'repo_dir': str(tmpdir.join('myproject1')),\n }\n )\n\n # TODO cwd and name if duplicated should give an error\n\n assert isinstance(git_repo, GitRepo)\n assert isinstance(git_repo, BaseRepo)\n\n hg_repo = create_repo_from_pip_url(\n **{\n 'pip_url': 'hg+https://hg.myproject.org/MyProject#egg=MyProject',\n 'repo_dir': str(tmpdir.join('myproject2')),\n }\n )\n\n assert isinstance(hg_repo, MercurialRepo)\n assert isinstance(hg_repo, BaseRepo)\n\n svn_repo = create_repo_from_pip_url(\n **{\n 'pip_url': 'svn+svn://svn.myproject.org/svn/MyProject#egg=MyProject',\n 'repo_dir': str(tmpdir.join('myproject3')),\n }\n )\n\n assert isinstance(svn_repo, SubversionRepo)\n assert isinstance(svn_repo, BaseRepo)",
"def parse_resolve(cls, url):\n loc = cls.parse(url)\n if loc.path and loc.path != '/':\n # If true ref name contains slash, a prefix of path might be a suffix of\n # ref. Try to resolve it.\n ref_prefix = None\n if loc.treeish.startswith('refs/'):\n ref_prefix = loc.treeish + '/'\n refs = get_refs(loc.hostname, loc.project, ref_prefix)\n if not refs:\n raise TreeishResolutionError('could not resolve treeish in %s' % url)\n\n treeishes = set(refs.keys())\n # Add branches and tags without a prefix.\n for ref in refs:\n for prefix in ('refs/tags/', 'refs/heads/'):\n if ref.startswith(prefix):\n treeishes.add(ref[len(prefix):])\n break\n loc = cls.parse(url, treeishes=treeishes)\n return loc",
"def repo_value(url):\n if url == '^':\n return url\n tup = urlsplit(url)\n if tup.scheme or tup.netloc:\n return urlunsplit(tup[:3]+('', ''))\n raise ValueError('URL %(url)r doesn\\'t contain a scheme '\n 'nor a hostname'\n % locals())",
"def _git_path(request, wiki):\n\n path = request.path.split(u'/{0}/'.format(wiki))[1]\n\n # Remove slashes\n while path and path[0] == u'/':\n path = path[1:]\n\n while path and path[-1] == u'/':\n path = path[:-1]\n\n return path",
"def urls(gh, user):\n return [repo.url for repo in getuserrepos(gh, user)]",
"def get_url(self):\n if self.url:\n return self.url\n # if we have a uuid and happen to know the URL for it, use that\n elif self.uuid and PathIdentifier.repo_hints.has_key(self.uuid):\n self.url = PathIdentifier.repo_hints[self.uuid] + self.repo_relative_path\n PathIdentifier.locobjs[self.url] = self\n return self.url\n # if we've only seen one rep, use that (a guess, but an educated one)\n elif not self.uuid and len(PathIdentifier.repo_hints) == 1:\n uuid, root = PathIdentifier.repo_hints.items()[0]\n if uuid:\n self.uuid = uuid\n PathIdentifier.locobjs['uuid://%s%s' % (uuid, self.repo_relative_path)] = self\n self.url = root + self.repo_relative_path\n PathIdentifier.locobjs[self.url] = self\n report(\"Guessing that '%s' refers to '%s'\" % (self, self.url))\n return self.url\n else:\n error(\"Cannot determine URL for '%s'; \" % self +\n \"Explicit source argument (-S/--source) required.\\n\")",
"def build_url(cls, config, namespace, name):\n return \"hxxp://mock.repo.url/\" + namespace + \"/\" + name + \".git\"",
"def normalize_url(url):\n if not url.startswith((\"git+\", \"hg+\")):\n return url\n return url[4:]",
"def resolve_config_path(config: Mapping[str, Any], path: str) -> str:\n if is_absolute_path(path):\n abs_path = path\n else:\n base_dir = get_base_dir(config)\n abs_path = f\"{base_dir}/{path}\"\n # Resolve \"..\" and \".\" in path\n if \"://\" in abs_path:\n scheme, host_path = abs_path.split(\"://\", maxsplit=1)\n if \"/\" in host_path:\n hostname, url_path = host_path.split(\"/\", maxsplit=1)\n url_path = _remove_path_dot_segments(url_path)\n return f\"{scheme}://{hostname}/{url_path}\"\n else:\n return f\"{scheme}://{host_path}\"\n else:\n if os.name == \"nt\":\n # Windows can also live with forward slashes\n abs_path = abs_path.replace(\"\\\\\", \"/\")\n return _remove_path_dot_segments(abs_path)",
"def linkcode_resolve(domain, info):\n if domain != 'py' or not info['module']:\n return None\n filename = info['module'].replace('.', '/')\n return \"https://github.com/mathcamp/flywheel/blob/%s/%s.py\" % (version_data['ref'], filename)",
"def _get_unfurl_requirement_url(spec):\n if not spec:\n return spec\n if \"egg=unfurl\" in spec:\n # looks fully specified, just return it\n return spec\n\n url, sep, ref = spec.rpartition(\"@\")\n if sep:\n if ref:\n ref = \"@\" + ref\n else:\n ref = \"@\" + __version__()\n\n if not url:\n return \"git+https://github.com/onecommons/unfurl.git\" + ref + \"#egg=unfurl\"\n if not url.startswith(\"git+\"):\n return \"git+file://\" + os.path.abspath(url) + ref + \"#egg=unfurl\"\n else:\n return url + ref + \"#egg=unfurl\"",
"def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))"
] | [
"0.6905865",
"0.6465272",
"0.63995284",
"0.63346756",
"0.63076055",
"0.6199112",
"0.61906636",
"0.6154257",
"0.61487776",
"0.61288077",
"0.61084867",
"0.609824",
"0.6035324",
"0.6018412",
"0.6003314",
"0.5865326",
"0.583461",
"0.5826739",
"0.5811917",
"0.5784114",
"0.57677877",
"0.576303",
"0.5745155",
"0.5745122",
"0.5739425",
"0.57229644",
"0.5714985",
"0.569357",
"0.5671451",
"0.56551"
] | 0.6919627 | 0 |
password The password. iterations The number of iterations of PBKDF2 (default=100000). returns a key | def make_key(password, iterations=ITERATIONS):
key = PBKDF2(password, SALT, dkLen=KEY_LENGTH_BYTES, count=iterations)
return key | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pbkdf2(password, salt, iterations, dklen=0, digest=None):\n if digest is None:\n digest = hashlib.sha256\n dklen = dklen or None\n password = force_bytes(password)\n salt = force_bytes(salt)\n return hashlib.pbkdf2_hmac(digest().name, password, salt, iterations, dklen)",
"def pbkdf2(password, salt, iterations, dklen=0, digest=None):\n if digest is None:\n digest = settings.CRYPTOGRAPHY_DIGEST\n if not dklen:\n dklen = digest.digest_size\n password = force_bytes(password)\n salt = force_bytes(salt)\n kdf = PBKDF2HMAC(\n algorithm=digest,\n length=dklen,\n salt=salt,\n iterations=iterations,\n backend=settings.CRYPTOGRAPHY_BACKEND)\n return kdf.derive(password)",
"def derive_fernet_key(password, salt):\n kdf = pbkdf2.PBKDF2HMAC(\n algorithm=hashes.SHA256(),\n length=32,\n salt=encoding.force_bytes(salt),\n iterations=100000,\n backend=default_backend()\n )\n return base64.urlsafe_b64encode(kdf.derive(\n encoding.force_bytes(password)))",
"def pbkdf2(password, salt, iter_count, dk_len=None, digest=hashlib.sha512):\n h_len = digest().digest_size\n\n if dk_len is None:\n dk_len = h_len\n\n if dk_len > ((2**32) - 1) * h_len:\n raise Exception(\"pbkdf2: derived key too long\")\n\n l = int(math.ceil(float(dk_len) / h_len))\n r = dk_len - (l - 1) * h_len\n\n unhex_fmt = \"{{0:0{0}x}}\".format(h_len*2)\n\n def F(i):\n u = hmac.new(password, salt + struct.pack(\">I\", i),\n digestmod=digest).digest()\n F_result = big_int(binascii.hexlify(u), 16)\n for _ in range(1, iter_count):\n u = hmac.new(password, u, digestmod=digest).digest()\n # Performance improvement using only one XOR with Python's long int\n F_result ^= big_int(binascii.hexlify(u), 16)\n\n return binascii.unhexlify(unhex_fmt.format(F_result).encode())\n\n Ts = (F(i) for i in range(1, l))\n return b''.join(Ts) + F(l)[:r]",
"def create_password_hash(self, password):\n return pbkdf2_sha256.encrypt(password, rounds=1000, salt_size=16)",
"def generate_key_from_password(pwd, salt=None):\n # https://cryptography.io/en/latest/fernet/#using-passwords-with-fernet\n password = base64.urlsafe_b64encode(pwd.encode())\n if salt is None:\n salt = os.urandom(16)\n kdf = PBKDF2HMAC(\n algorithm=hashes.SHA256(),\n length=32,\n salt=salt,\n iterations=settings.HASH_ITERATIONS,\n backend=default_backend()\n )\n key = base64.urlsafe_b64encode(kdf.derive(password))\n return key",
"def hashPassword(self, password):\n key = hashlib.pbkdf2_hmac(\n 'sha256',\n str.encode(password),\n self.salt,\n 100000\n )\n return key",
"def password_to_key(password: str):\r\n curve = ec.SECP256R1() # Elliptic curve\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(password.encode())\r\n password_int = int.from_bytes(digest.finalize(), \"big\")\r\n return ec.derive_private_key(password_int, curve)",
"def get_key(name):\n import os\n salt = os.urandom(16)\n name = name.encode()\n from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC\n from cryptography.hazmat.primitives import hashes\n from cryptography.hazmat.backends import default_backend\n kdf = PBKDF2HMAC(algorithm=hashes.SHA256(),\n length=32,\n salt=salt,\n iterations=100000,\n backend=default_backend())\n import base64\n key = base64.urlsafe_b64encode(kdf.derive(name))\n return key",
"def pbkdf2_bin(\n data, salt, iterations=1000, keylen=24, hashfunc=None, encoding='utf-8'):\n hashfunc = hashfunc or hashlib.sha1\n if can_encode(data):\n data = data.encode(encoding)\n if can_encode(salt):\n salt = salt.encode(encoding)\n mac = hmac.new(data, None, hashfunc)\n blocks_len = -(-keylen // mac.digest_size) + 1\n return _bin(mac, salt, blocks_len, iterations)[:keylen]",
"def generate_secret_key():\n return b64encode(Fernet.generate_key()).decode('utf-8')",
"def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None):\n if not isinstance(hash_name, str):\n raise TypeError(hash_name)\n\n if not isinstance(password, (bytes, bytearray)):\n password = bytes(buffer(password))\n if not isinstance(salt, (bytes, bytearray)):\n salt = bytes(buffer(salt))\n\n # Fast inline HMAC implementation\n inner = hashlib.new(hash_name)\n outer = hashlib.new(hash_name)\n blocksize = getattr(inner, 'block_size', 64)\n if len(password) > blocksize:\n password = hashlib.new(hash_name, password).digest()\n password = password + b'\\x00' * (blocksize - len(password))\n inner.update(password.translate(_trans_36))\n outer.update(password.translate(_trans_5C))\n\n def prf(msg, inner=inner, outer=outer):\n # PBKDF2_HMAC uses the password as key. We can re-use the same\n # digest objects and just update copies to skip initialization.\n icpy = inner.copy()\n ocpy = outer.copy()\n icpy.update(msg)\n ocpy.update(icpy.digest())\n return ocpy.digest()\n\n if iterations < 1:\n raise ValueError(iterations)\n if dklen is None:\n dklen = outer.digest_size\n if dklen < 1:\n raise ValueError(dklen)\n\n hex_format_string = \"%%0%ix\" % (hashlib.new(hash_name).digest_size * 2)\n\n dkey = b''\n loop = 1\n while len(dkey) < dklen:\n prev = prf(salt + struct.pack(b'>I', loop))\n rkey = int(binascii.hexlify(prev), 16)\n for i in xrange(iterations - 1):\n prev = prf(prev)\n rkey ^= int(binascii.hexlify(prev), 16)\n loop += 1\n dkey += binascii.unhexlify(hex_format_string % rkey)\n\n return dkey[:dklen]",
"def generate_hash(password):\n return pbkdf2_sha256.hash(password)",
"def generate_symmetric_key():\n return Fernet.generate_key()",
"def generate_key():\n key = Fernet.generate_key()\n with open(\"pass.key\", \"wb\") as key_file:\n key_file.write(key)",
"def password_encryption(self, password):\n return bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt())",
"def _derive_key(\n self, passphrase: str, otp: YubikeyOTP, *args : bytes\n ) -> bytes:\n return self._context_kdf.derive(\n combine_keys(\n passphrase.encode('utf-8'),\n otp.token.private_uid,\n *args\n )\n )",
"def get_key(data):\n decrypt_key = pow(int(data), d, n)\n return decrypt_key",
"def generate_key():\n return get_random_bytes(KEY_SIZE)",
"def hash_new_password(password: str) -> Tuple[bytes, bytes]:\r\n salt = os.urandom(16)\r\n pw_hash = hashlib.pbkdf2_hmac('sha256', password.encode(), salt, 100000)\r\n return salt, pw_hash",
"def __hash_new_password(password: str) -> Tuple[bytes, bytes]:\n salt = os.urandom(16)\n pw_hash = hashlib.pbkdf2_hmac(\"sha256\", password.encode(), salt, 100000)\n return salt, pw_hash",
"def encrypt_password(cls, password):\n return generate_password_hash(password)",
"def hash_pass(password, salt):\n return hashlib.pbkdf2_hmac('sha512', password.encode(), salt, 100000)",
"def generate_key():\n key = ''.join([chr(random.randint(0, 0x10)) for _ in range(block_size)])\n return AES.new(second_key, AES.MODE_ECB).encrypt(pad((key.encode('ascii')), block_size))",
"def reveal_seed():\n password = getpass.getpass('Password from keystore: ') # Prompt the user for a password of keystore file\n\n configuration = Configuration().load_configuration()\n api = get_api()\n\n try:\n wallet = api.get_private_key(configuration, password)\n click.echo('Account prv key: %s' % str(wallet.get_private_key().hex()))\n\n except InvalidPasswordException:\n click.echo('Incorrect password!')",
"def _produce_key(self, passphrase):\n from hashlib import sha256\n pp = bytes(passphrase, 'utf-8')\n hash_alg = sha256(pp)\n for i in range(self._get_key_stretches()):\n d = hash_alg.digest()\n hash_alg.update(d + pp)\n return hash_alg.digest()",
"def hash_new_password(password: str) -> Tuple[bytes, bytes]:\n salt = os.urandom(16)\n pw_hash = hashlib.pbkdf2_hmac(\"sha256\", password.encode(), salt, 100000)\n return salt, pw_hash",
"def scrypt(salt: bytes, N: int, password: bytes) -> bytes:\n kdf = Scrypt(salt=salt, length=32, n=N, r=8, p=1, backend=default_backend())\n return kdf.derive(password)",
"def private_key(self, seed: str) -> str:\n return nanopy.deterministic_key(seed, self.account_index)[0]",
"def passphrase(password):\n\tnow = int(time.time())\n\tkey = sha256(password).hexdigest()\n\tphrase = sha256(str(now)+key).hexdigest()\n\n\treturn (now, phrase)"
] | [
"0.7508836",
"0.7408228",
"0.72142005",
"0.6800081",
"0.67509955",
"0.67296535",
"0.669605",
"0.6585402",
"0.6559936",
"0.6557969",
"0.6481103",
"0.6474474",
"0.6468353",
"0.6362947",
"0.6215115",
"0.6179237",
"0.6177522",
"0.6165302",
"0.6124966",
"0.608187",
"0.6069272",
"0.6069011",
"0.60611105",
"0.60537124",
"0.60452706",
"0.60432565",
"0.6035527",
"0.6019373",
"0.60074294",
"0.60073227"
] | 0.8685448 | 0 |
Creates an HMAC from the given message, using the given key. Uses HMACMD5. message The message to create an HMAC of. key The key to use for the HMAC (at least 16 bytes). returns A hex string of the HMAC. | def make_hmac(message, key):
h = HMAC.new(key)
h.update(message)
return h.hexdigest() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_hmac(self, msg):\r\n return hmac.new(self.hmacKey, msg, sha256).digest()",
"def get_hmac(secret, message):\n return hmac.new(\n secret, message.encode('ascii'), digestmod=hashlib.sha256\n ).hexdigest()",
"def _hmac(self, key, msg):\n return hmac.new(key, msg, digestmod=self.hashfunc).digest()",
"def getHMAC(key, value):\n h = hmac.new(key, value, digestmod=DIGESTMOD)\n return h.digest()",
"def monthly_signature(key, message):\n byte_key = binascii.unhexlify(key)\n message = message.encode()\n return hmac.new(byte_key, message, hashlib.sha256).hexdigest().upper()",
"def create_hmac(mac_pass, msg_bytes):\n return hmac.new(\n mac_pass, msg_bytes, digestmod=hashlib.sha256).digest()",
"def hash_hmac( self, msg ):\n result = hmac.new( self.secret, msg, hashlib.sha512 )\n return result.hexdigest()",
"def _get_hex_digest(cls, message, secret):\n hmac_digester = hmac.new(secret.encode('utf-8'), message.encode('utf-8'), digestmod='sha512')\n return hmac_digester.hexdigest()",
"def daily_signature(key, message):\n byte_key = binascii.unhexlify(key)\n message = message.encode()\n return hmac.new(byte_key, message, hashlib.sha256).hexdigest().upper()",
"def CBCMACbasedOnAES(message, key):\n\n # Convert the message into bytes\n message1 = bytes(message)\n # Convert the key into bytes\n key1 = bytes(key)\n\n # Create the AES object\n aes_obj = AES.new(key1, AES.MODE_CBC, iv)\n # Encrypt the message\n MAC = aes_obj.encrypt(message1)\n # Return the MAC of the message\n return MAC",
"def _hmac_sha256(key, msg):\n\n return hmac.new(key, msg, hashlib.sha256).digest()",
"def pack_message(msg, hmac_key=None):\n end_fmt = MESSAGE_END_FMT_HMAC if hmac_key else MESSAGE_END_FMT\n # Create full message excluding CRC and suffix\n buffer = (\n struct.pack(\n MESSAGE_HEADER_FMT,\n PREFIX_VALUE,\n msg.seqno,\n msg.cmd,\n len(msg.payload) + struct.calcsize(end_fmt),\n )\n + msg.payload\n )\n if hmac_key:\n crc = hmac.new(hmac_key, buffer, sha256).digest()\n else:\n crc = binascii.crc32(buffer) & 0xFFFFFFFF\n # Calculate CRC, add it together with suffix\n buffer += struct.pack(end_fmt, crc, SUFFIX_VALUE)\n return buffer",
"def _hmac_create(self, password, shared_key):\n hmac_value = base64.b64encode(hmac.new(\n smart_str(shared_key),\n smart_str(password),\n hashlib.sha512).digest())\n return hmac_value",
"def _hmac_create(self, password, shared_key):\n hmac_value = base64.b64encode(hmac.new(\n smart_str(shared_key),\n smart_str(password),\n hashlib.sha512).digest())\n return hmac_value",
"def getHMACFunc(key, hex=True):\n h = hmac.new(key, digestmod=DIGESTMOD)\n def hmac_fn(value):\n h_tmp = h.copy()\n h_tmp.update(value)\n if hex:\n return h_tmp.hexdigest()\n else:\n return h_tmp.digest()\n return hmac_fn",
"def _encode_message(message):\n aes_key = get_settings()['aes_key'].encode('utf-8')\n hmac_key = get_settings()['hmac_key'].encode('utf-8')\n\n pad = lambda s: s + (AES.block_size - len(s) % AES.block_size) * chr(\n AES.block_size - len(s) % AES.block_size)\n init_vector = Random.new().read(AES.block_size)\n cipher = AES.new(aes_key, AES.MODE_CBC, init_vector)\n padded_message = pad(message)\n aes_message = init_vector + cipher.encrypt(padded_message)\n hmac_digest = hmac.new(bytes(hmac_key), bytes(aes_message), hashlib.sha1)\n\n return aes_message, hmac_digest",
"def create_hmac(self, secret: str, input: float) -> str:\n\n input_str = repr(input).encode(\"ascii\")\n input_hash = hashlib.sha1(secret + input_str).hexdigest().encode(\"ascii\")\n return hashlib.sha1(secret + input_hash).hexdigest()",
"def hmac(self, key: bytes, data: bytes, algorithm: str = 'sha256') -> bytes:\n hmac_obj = hmac.HMAC(key, self._get_algorithm(algorithm), default_backend())\n hmac_obj.update(data)\n return hmac_obj.finalize()",
"def Sign(self, msg):\n return hmac.new(self.key_bytes, msg, sha1).digest()",
"def _calculate_hmac(self, base_string, key):\n hmacs = dict()\n # --- MD5 ---\n hashed = hmac.new(key, base_string, hashlib.md5)\n hmac_md5 = hashed.digest().encode('base64').rstrip('\\n')\n hmacs['MD5'] = hmac_md5\n # --- SHA-1 ---\n hashed = hmac.new(key, base_string, hashlib.sha1)\n hmac_sha1 = hashed.digest().encode('base64').rstrip('\\n')\n hmacs['SHA-1'] = hmac_sha1\n # --- SHA-224 ---\n hashed = hmac.new(key, base_string, hashlib.sha224)\n hmac_sha224 = hashed.digest().encode('base64').rstrip('\\n')\n hmacs['SHA-224'] = hmac_sha224\n # --- SHA-256 ---\n hashed = hmac.new(key, base_string, hashlib.sha256)\n hmac_sha256 = hashed.digest().encode('base64').rstrip('\\n')\n hmacs['SHA-256'] = hmac_sha256\n # --- SHA-384 ---\n hashed = hmac.new(key, base_string, hashlib.sha384)\n hmac_sha384 = hashed.digest().encode('base64').rstrip('\\n')\n hmacs['SHA-384'] = hmac_sha384\n # --- SHA-512 ---\n hashed = hmac.new(key, base_string, hashlib.sha512)\n hmac_sha512 = hashed.digest().encode('base64').rstrip('\\n')\n hmacs['SHA-512'] = hmac_sha512\n return hmacs",
"def buildHmacSha256AuthHeader(mac_key_id,mac_key,method,resource,hostname,port):\n debugMain('HMAC SHA 256')\n debugDetail('mac key id: %s'%repr(mac_key_id))\n debugDetail('mac key: %s'%repr(mac_key))\n\n timestamp = int(time.time())\n nonce = randomString()\n\n msg = '\\n'.join([str(timestamp), nonce, method, resource, hostname, str(port), '', ''])\n debugDetail('input to hash: '+repr(msg))\n debugRaw(msg)\n \n digest = hmac.new(removeUnicode(mac_key),removeUnicode(msg),hashlib.sha256).digest()\n mac = removeUnicode(b64encode(digest).decode()) # this produces unicode for some reason\n authHeader = 'MAC id=\"%s\" ts=\"%s\" nonce=\"%s\" mac=\"%s\"'%(removeUnicode(mac_key_id), timestamp, nonce, mac)\n debugDetail('auth header:')\n debugRaw(authHeader)\n return authHeader",
"def CreateHMACKey(self):\n if self._HMAC_KEY == -1:\n self._HMAC_KEY = ''.join(\n [chr(random.randint(48, 122)) for i in range(20)])\n self._HMAC_KEY = bytes(self._HMAC_KEY, encoding='utf-8')\n return self._HMAC_KEY.decode('utf-8')",
"def compute_signature(msg):\n hashkey = memcache.Client().get('CURL_TEST_SERVER_HASHKEY')\n h = hmac.new(hashkey, msg, hashlib.sha1)\n signature = urllib.quote(base64.b64encode(h.digest()))\n return signature",
"def hmac(key, data, algorithm):\n if algorithm == CryptographicMeta.SHA1:\n algorithm = hashlib.sha1\n else:\n raise NotImplementedError\n return hmac.new(key, data, algorithm).digest()",
"def digest(self, message):\n\n hasher = hashlib.md5()\n hasher.update(message)\n digest = hasher.digest()[0:self.HASHLEN]\n\n return binascii.hexlify(digest)",
"def _generate_signature(self, key, msg):\n key = to_bytes(key)\n msg = to_bytes(msg)\n\n hash_obj = hmac.new(key, msg=msg, digestmod=hashlib.sha256)\n digest = hash_obj.digest() # abstract\n\n signature = base64.b64encode(digest) # Signature\n return to_unicode(signature)",
"def make_signature(secret: VersionedSecret, message: str, max_age: datetime.timedelta) -> bytes:\n version = 1\n expiration = int(time.time() + max_age.total_seconds())\n header = _HEADER_FORMAT.pack(version, expiration)\n digest = _compute_digest(secret.current, header, message)\n return base64.urlsafe_b64encode(header + digest)",
"def compute_header_hmac_hash(context):\n\n return hmac.new(\n hashlib.sha512(\n b'\\xff' * 8 +\n hashlib.sha512(\n context.header.value.dynamic_header.master_seed.data +\n context.transformed_key +\n b'\\x01'\n ).digest()\n ).digest(),\n context.header.data,\n hashlib.sha256\n ).digest()",
"def _MakeEmsaMessageSha256(self, msg, modulus_size, logf=None):\r\n magic_sha256_header = [0x30, 0x31, 0x30, 0xd, 0x6, 0x9, 0x60, 0x86, 0x48,\r\n 0x1, 0x65, 0x3, 0x4, 0x2, 0x1, 0x5, 0x0, 0x4, 0x20]\r\n\r\n hash_of_msg = hashlib.sha256(msg).digest() #???\r\n\r\n self._Log(logf, 'sha256 digest of msg %s: [%s]' % (msg, hash_of_msg.encode('hex')))\r\n\r\n encoded = ''.join([chr(c) for c in magic_sha256_header]) + hash_of_msg\r\n\r\n msg_size_bits = modulus_size + 8-(modulus_size % 8) # Round up to next byte\r\n\r\n pad_string = chr(0xFF) * (msg_size_bits / 8 - len(encoded) - 3)\r\n return chr(0) + chr(1) + pad_string + chr(0) + encoded",
"def Generate(size=keyinfo.HMAC_SHA1.default_size):\n key_bytes = util.RandBytes(size / 8)\n key_string = util.Encode(key_bytes)\n return HmacKey(key_string, size)"
] | [
"0.7477667",
"0.72997296",
"0.71323496",
"0.6831234",
"0.6760007",
"0.6723533",
"0.67172796",
"0.66682434",
"0.66546047",
"0.65924144",
"0.6565024",
"0.64207625",
"0.6401138",
"0.6389563",
"0.6375475",
"0.63350046",
"0.6037575",
"0.60342425",
"0.5973662",
"0.59505445",
"0.593986",
"0.5882473",
"0.5866859",
"0.58491784",
"0.58319736",
"0.580288",
"0.568288",
"0.5657133",
"0.5654333",
"0.5641124"
] | 0.8378977 | 0 |
Decrypts a given ciphertext with the given key, using AESCFB. message The ciphertext to decrypt (byte string). key The AES key (16 bytes). iv The original IV used for encryption. returns The cleartext (byte string) | def decrypt(ciphertext, key, iv):
cipher = AES.new(key, AES.MODE_CFB, iv)
msg = cipher.decrypt(ciphertext)
return msg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decrypt_cbc(key, ciphertext):\n\tmessage = ''\n\tfor i in range(0, len(ciphertext)/16 - 1):\n\t\tiv = ciphertext[i*16:(i+1)*16]\n\t\tinputblock = ciphertext[(i+1)*16:(i+2)*16]\n\t\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\t\tmessage +=cipher.decrypt(inputblock)\n\tif ord(message[-1]) <=16:\n\t\tmessage = message[:-ord(message[-1])]\n\treturn message",
"def decrypt_ctr(key, ciphertext):\n\tmessage = ''\n\tiv = ciphertext[0:16]\n\tfor i in range(16, len(ciphertext), 16):\n\t\tinputblock = ciphertext[i:i+16]\n\t\tcipher = AES.new(key, AES.MODE_ECB)\n\t\txorkey = cipher.encrypt(long_to_bytes(bytes_to_long(iv)+(i/16-1)))\n\t\tif len(inputblock) == 16:\n\t\t\tmessage += strxor(inputblock, xorkey)\n\t\telse:\n\t\t\tmessage += strxor(inputblock, xorkey[:len(inputblock)])\n\treturn message",
"def decrypt(ciphertext, key):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\n\tif not isPython2():\n\t\tif isString(ciphertext):\n\t\t\tciphertext = ciphertext.encode(\"latin-1\")\n\t\tif isString(key):\n\t\t\tkey = key.encode(\"latin-1\")\n\t\t\n\tiv = ciphertext[:AES.block_size]\n\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\tplaintext = cipher.decrypt(ciphertext[AES.block_size:])\n\treturn plaintext",
"def decrypt(ciphertext):\n # AES decrypt\n iv = ciphertext[:16]\n ciphertext = ciphertext[16:]\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(aes.decrypt(ciphertext))",
"def decrypt(\r\n key: bytes,\r\n cipher_text: bytes,\r\n) -> str:\r\n block_size = 16\r\n iv = cipher_text[:block_size]\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n plain_text = cipher.decrypt(cipher_text[block_size:]).decode('utf-8')\r\n return _unpad(plain_text)",
"def cbc_decrypt(encrypted, key, iv):\n aes = AES.new(key, AES.MODE_CBC, iv)\n return strip_padding(aes.decrypt(base64.b64decode(encrypted)).decode())",
"def decrypt(data, key, iv):\n decryptor = AES.new(key, AES.MODE_CBC, iv=iv)\n return decryptor.decrypt(data)",
"def decrypt( raw, key, iv ):\n result = ''\n tmp_iv = iv \n ciphertext = pad(raw)\n\n for i in xrange(0, len(ciphertext) / BS):\n lower_bound = i * 16\n upper_bound = (i+1) * 16\n \n tmp = AES.new(key, AES.MODE_OFB, tmp_iv).decrypt( ciphertext[lower_bound:upper_bound] )\n tmp_iv = ciphertext[lower_bound:upper_bound]\n result += tmp\n\n return result",
"def decrypt(self, ciphertext, key):\n iv = ciphertext[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n plaintext = cipher.decrypt(ciphertext[AES.block_size:])\n return self.pkcs7_unpad(plaintext)",
"def AES_decrypt(ciphertext: bytes) -> Text:\n text = b64decode(ciphertext)\n cipher = AES.new(secret_key, mode, IV)\n return Padding.unpad(cipher.decrypt(text), bs).decode('utf-8')",
"def decrypt(ciphertext, key, iv, tag, associated_data=''):\n\n decryptor = Cipher(\n algorithms.AES(key), modes.GCM(iv, tag),\n backend=default_backend()).decryptor()\n\n decryptor.authenticate_additional_data(associated_data)\n\n return decryptor.update(ciphertext) + decryptor.finalize()",
"def decrypt_ctr(self, ciphertext, iv):\n assert len(iv) == 16\n\n blocks = []\n nonce = iv\n for ciphertext_block in split_blocks(ciphertext):\n # CTR mode decrypt: ciphertext XOR decrypt(nonce)\n block = xor_bytes(ciphertext_block, self.decrypt_block(nonce))\n blocks.append(block)\n nonce = inc_bytes(nonce)\n\n return unpad(b''.join(blocks))",
"def decrypt(self, message):\n message = base64.b64decode(message)\n initialization_vector = message[:self._block_size]\n cipher = AES.new(self._key, AES.MODE_CBC, initialization_vector)\n raw_message = cipher.decrypt(message[self._block_size:])\n return self._remove_padding(raw_message).decode('utf-8')",
"def decrypt(key, ciphertext):\n data = fk(keyGen(key)[1], ip(ciphertext))\n return fp(fk(keyGen(key)[0], swapNibbles(data)))",
"def decryptAESCTR(key, iv, ciphertext):\n cipher = Cipher(algorithms.AES(key), modes.CTR(iv), backend=default_backend())\n decryptor = cipher.decryptor()\n return decryptor.update(ciphertext) + decryptor.finalize()",
"def decrypt_message(K, iv, ciphertext, tag):\n aes = Cipher(\"aes-128-gcm\")\n plain = aes.quick_gcm_dec(K, iv, ciphertext, tag)\n \n \n return plain.encode(\"utf8\")",
"def decrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n decrypted = aes.decrypt(text)\r\n return decrypted",
"def decrypt_message(self, message):\n\t\tf = Fernet(self.key)\n\t\treturn f.decrypt(message)",
"def decrypt(self, message):\n #check validity of _private_key\n if self._private_key is None:\n raise Exception(\"invalid private key\")\n\n output = \"\"\n\n d = self._private_key[0]\n n = self._private_key[1]\n\n for i in xrange(len(ciphertext)):\n m = pow(ciphertext[i], d, n)\n output += int_to_string(m)\n return output",
"def decrypt(self,message, key):\n return self.translateMessage(message, key, \"decrypt\")",
"def decrypt(data, key, iv, save_path=None):\n if isinstance(data, str):\n with open(data, 'rb') as f:\n data = f.read()\n pad_ch = '\\0'\n length = int(data[:16].rstrip(pad_ch.encode('utf-8')).decode('utf-8'))\n data = data[16:]\n key = _pad16(key)\n iv = _pad16(iv)\n cipher = AES.new(key, AES.MODE_CBC, iv)\n data = cipher.decrypt(data)\n data = data[:length]\n if save_path:\n with open(save_path, 'wb') as f:\n f.write(data)\n return data",
"def decrypt_aes256(data, key, iv):\n decryptor = AES.new(key, AES.MODE_CBC, iv)\n return decryptor.decrypt(data)",
"def decrypt(ciphertext):\n base_decode = {'16': base64.b16decode,\n '32': base64.b32decode, '64': base64.b64decode}\n cleartext = ciphertext+''\n for i in range(encrypt_times):\n cleartext = base_decode[get_base(cleartext)](cleartext)\n return cleartext",
"def decrypt(ciphertext: str, key: str) -> str:\n return encrypt(ciphertext, key)",
"def fernet_decript(key,message):\n\tf = Fernet(key)\n\treturn f.decrypt(message)",
"def decrypt(private_key, ciphertext):\n if len(ciphertext) < 512 + 16:\n return None\n msg_header = ciphertext[:512]\n msg_iv = ciphertext[512:512+16]\n msg_body = ciphertext[512+16:]\n try:\n symmetric_key = PKCS1_OAEP.new(private_key).decrypt(msg_header)\n except ValueError:\n return None\n if len(symmetric_key) != 32:\n return None\n return AES.new(symmetric_key,\n mode=AES.MODE_CFB,\n IV=msg_iv).decrypt(msg_body)",
"def decrypt_msg(msg, query, padding, iv=None, blocksize=16, threads=1):\n # Input validation\n msg = bytearray(msg)\n assert len(msg) % blocksize == 0\n if iv is not None:\n iv = bytearray(iv)\n assert len(iv) == blocksize\n msg = iv + msg\n else:\n assert len(msg) > blocksize\n\n # Split into \"iv\", ciphertext pairs\n blocks = chop(bytearray(msg), blocksize)\n pairs = zip(blocks, blocks[1:])\n\n # Decrypt every pair seperately (to minimize query size)\n logger.info('Decrypting %d block[s] of data using a padding oracle' % len(pairs))\n out = bytearray()\n for n, (iv, block) in enumerate(pairs):\n logger.info('Decrypting block %d' % n)\n out += decrypt(iv, block, query, padding, threads)\n logger.info('Decrypted block: %s' % hex(out[-blocksize:]))\n return out",
"def decrypt_message(encrypted_message):",
"def decrypt(self, key, msg, b64decode=True):\n if b64decode:\n msg = base64.b64decode(msg)\n iv = msg[:self.cipher.block_size]\n cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)\n\n padded = cipher.decrypt(msg[self.cipher.block_size:])\n l = ord(padded[-1:]) + 1\n plain = padded[:-l]\n return plain",
"def decrypt_message(message: bytes, receiver_private_key: RsaKey) -> bytes:\n iv = message[:IV_LEN]\n enc_aes_key = message[IV_LEN:IV_LEN + receiver_private_key.size_in_bytes()] # Assume encryption has been done with same key size\n enc_message = message[IV_LEN + receiver_private_key.size_in_bytes():]\n\n cipher_rsa = PKCS1_OAEP.new(receiver_private_key)\n aes_key = cipher_rsa.decrypt(enc_aes_key)\n\n cipher_aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(cipher_aes.decrypt(enc_message), AES.block_size) # Padding have to be removed"
] | [
"0.77688974",
"0.7527659",
"0.748",
"0.74041873",
"0.7396227",
"0.7378443",
"0.7366361",
"0.7350202",
"0.7348135",
"0.7346627",
"0.723203",
"0.71131873",
"0.7042764",
"0.70285034",
"0.69840527",
"0.6957698",
"0.69559014",
"0.68933415",
"0.68483377",
"0.68203044",
"0.6804833",
"0.6774077",
"0.6772038",
"0.6770311",
"0.6756726",
"0.6721904",
"0.66755146",
"0.66646117",
"0.66583496",
"0.66280806"
] | 0.87284034 | 0 |
Creates a preprocessing graph for a batch given a function that processes a single image. | def preprocess_batch(images_batch, preproc_func=None):
if preproc_func is None:
return images_batch
with tf.variable_scope('preprocess'):
images_list = tf.split(images_batch, int(images_batch.shape[0]))
result_list = []
for img in images_list:
reshaped_img = tf.reshape(img, img.shape[1:])
processed_img = preproc_func(reshaped_img)
result_list.append(tf.expand_dims(processed_img, axis=0))
result_images = tf.concat(result_list, axis=0)
return result_images | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _build_tracking_preprocessing(input_shape):\n\n def preprocessing(input_img, **kwargs):\n\n to_normalize = False if np.percentile(input_img, 98) > 1.0 else True\n\n if len(input_img.shape) == 4:\n print(\n \"Only preprocessing single image, we will consider the first one of the batch\"\n )\n image = input_img[0] * 255.0 if to_normalize else input_img[0] * 1.0\n else:\n image = input_img * 255.0 if to_normalize else input_img * 1.0\n\n image = cv2.resize(image, input_shape)\n x, _ = transform_test(mx.nd.array(image), min(input_shape))\n return x\n\n return preprocessing",
"def get_preprocess_fn(**preprocessing_kwargs):\n\n def _preprocess_fn(data):\n \"\"\"The preprocessing function that is returned.\"\"\"\n\n # Validate input\n if not isinstance(data, dict) or 'image' not in data:\n raise ValueError('Argument `data` must be a dictionary, '\n 'not %s' % str(type(data)))\n\n # Apply all the individual steps in sequence.\n image = data['image']\n image = decode_image(image)\n image = normalize_value_range(image)\n image = get_multiscale_patches(image, **preprocessing_kwargs)\n\n data['image'] = image\n return data\n\n return _preprocess_fn",
"def _trace_preprocessing_fn_v1(preprocessing_fn, specs):\n with tf.compat.v1.Graph().as_default() as graph:\n with tf.compat.v1.name_scope('inputs'):\n structured_inputs = batched_placeholders_from_specs(specs)\n # In order to avoid a bug where import_graph_def fails when the\n # input_map and return_elements of an imported graph are the same\n # (b/34288791), we avoid using the placeholder of an input column as an\n # output of a graph. We do this by applying tf.identity to all inputs of\n # the preprocessing_fn. Note this applies at the level of raw tensors.\n # TODO(b/34288791): Remove this workaround and use a shallow copy of\n # inputs instead. A shallow copy is needed in case\n # self._preprocessing_fn mutates its input.\n copied_inputs = tf_utils.copy_tensors(structured_inputs)\n\n structured_outputs = preprocessing_fn(copied_inputs)\n return graph, structured_inputs, structured_outputs",
"def map_fn(image, label):\n if is_training and train_mode == 'pretrain':\n xs = []\n for _ in range(2): # Two transformations\n xs.append(preprocess_fn_pretrain(image))\n image = tf.concat(xs, -1)\n else:\n image = preprocess_fn_finetune(image)\n label = tf.one_hot(label, num_classes)\n return image, label",
"def map_fn(image, label):\n if is_training and FLAGS.train_mode == 'pretrain':\n xs = []\n for _ in range(2): # Two transformations\n xs.append(preprocess_fn_pretrain(image))\n image = tf.concat(xs, -1)\n else:\n image = preprocess_fn_finetune(image)\n label = tf.one_hot(label, num_classes)\n return image, label",
"def preprocess_graph(self):\n image = tf.placeholder(\n tf.float32,\n shape=[self.img_h, self.img_w, self.col_channels])\n patches = self.create_patches(image)\n return {'image': image,\n 'patches': patches}",
"def get_preprocess_fn(is_training, is_pretrain):\n # Disable test cropping for small images (e.g. CIFAR)\n if FLAGS.image_size <= 32:\n test_crop = False\n else:\n test_crop = True\n color_jitter_strength = FLAGS.color_jitter_strength if is_pretrain else 0.\n return functools.partial(\n data_util.preprocess_image,\n height=FLAGS.image_size,\n width=FLAGS.image_size,\n is_training=is_training,\n color_jitter_strength=color_jitter_strength,\n test_crop=test_crop)",
"def _make_process_op(self):\n\n with tf.variable_scope(\"state_preprocess\"):\n self.input_state = tf.placeholder(shape=[210, 160, 3], dtype=tf.uint8)\n output = tf.image.rgb_to_grayscale(self.input_state)\n output = tf.image.crop_to_bounding_box(output, 34, 0, 160, 160)\n output = tf.image.resize_images(output, [84, 84], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n output = tf.to_float(output) / 255.0\n output = tf.transpose(output, perm=[2, 1, 0])\n\n return output",
"def _create_preprocess_fn(\n num_epochs: int,\n batch_size: int,\n merge_case: bool,\n shuffle_buffer_size: int = emnist_dataset.MAX_CLIENT_DATASET_SIZE,\n use_cache: bool = True,\n use_prefetch: bool = True,\n) -> Callable[[tf.data.Dataset], tf.data.Dataset]:\n @tf.function\n def merge_mapping(elem):\n original_label_to_merged_label = tf.constant([\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,\n 12, 38, 39, 40, 41, 42, 18, 19, 20, 21, 22, 43, 24, 25, 44, 45, 28, 46,\n 30, 31, 32, 33, 34, 35\n ])\n return collections.OrderedDict(\n label=original_label_to_merged_label[elem['label']],\n pixels=elem['pixels'])\n\n base_preprocess_fn = emnist_dataset.create_preprocess_fn(\n num_epochs=num_epochs,\n batch_size=batch_size,\n shuffle_buffer_size=shuffle_buffer_size)\n\n def preprocess_fn(dataset: tf.data.Dataset):\n if merge_case:\n dataset = dataset.map(merge_mapping)\n if use_cache:\n dataset = dataset.cache()\n dataset = base_preprocess_fn(dataset)\n if use_prefetch:\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset\n\n return preprocess_fn # pytype: disable=bad-return-type",
"def _preprocess_fn(data):\n\n # Validate input\n if not isinstance(data, dict) or 'image' not in data:\n raise ValueError('Argument `data` must be a dictionary, '\n 'not %s' % str(type(data)))\n\n # Apply all the individual steps in sequence.\n image = data['image']\n image = decode_image(image)\n image = normalize_value_range(image)\n image = get_multiscale_patches(image, **preprocessing_kwargs)\n\n data['image'] = image\n return data",
"def _create_preprocess_fn(\n num_epochs: int,\n batch_size: int,\n shuffle_buffer_size: int = _NUM_EXAMPLES_PER_CLIENT,\n use_cache: bool = True,\n use_prefetch: bool = True,\n) -> Callable[[tf.data.Dataset], tf.data.Dataset]:\n base_preprocess_fn = cifar100_dataset.create_preprocess_fn(\n num_epochs=num_epochs,\n batch_size=batch_size,\n crop_shape=_CROP_SHAPE,\n distort_image=_DISTORT_TRAIN_IMAGES,\n # Set buffer to 1 to disable shuffling since is not necessary for eval.\n shuffle_buffer_size=shuffle_buffer_size)\n\n def preprocess_fn(dataset: tf.data.Dataset):\n if use_cache:\n dataset = dataset.cache()\n dataset = base_preprocess_fn(dataset)\n if use_prefetch:\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset\n\n return preprocess_fn",
"def _get_batch_fn(dataset):\n def get_batch(idx):\n x_bat = dataset['input'][idx]\n y_bat = dataset['label'][idx]\n x_bat, y_bat = preprocess(x_bat, y_bat)\n\n return x_bat, y_bat\n\n return get_batch",
"def preprocess(file_path, model_preprocess_function):\n img = image.load_img(file_path, target_size=(224, 224))\n x = image.img_to_array(img)\n # x = np.expand_dims(x, axis=0)\n x = model_preprocess_function(x)\n return x",
"def preprocessing_fn(inputs: Dict[str, Union[tf.Tensor, tf.SparseTensor]]) -> Dict[str, tf.Tensor]:\n outputs = {}\n\n image_features = tf.map_fn(\n lambda x: tf.io.decode_jpeg(x[0], channels=3),\n inputs[constants.IMAGE_KEY],\n dtype=tf.uint8\n )\n\n # image_features = tf.cast(image_features, tf.float32)\n image_features = tf.image.resize(image_features, [constants.HEIGHT, constants.WIDTH])\n image_features = tf.keras.applications.efficientnet.preprocess_input(image_features)\n\n outputs[_transformed_name(constants.IMAGE_KEY)] = image_features\n # TODO(b/157064428): Support label transformation for Keras.\n # Do not apply label transformation as it will result in wrong evaluation.\n outputs[_transformed_name(constants.LABEL_KEY)] = inputs[constants.LABEL_KEY]\n return outputs",
"def process_data(image, label):\n # https://www.tensorflow.org/api_docs/python/tf/numpy_function\n # Given a python function func wrap this function as an operation in a TensorFlow function.\n # func must take numpy arrays as its arguments and return numpy arrays as its outputs.\n # Comparison to tf.py_function: tf.py_function and tf.numpy_function are very similar, \n # except that tf.numpy_function takes numpy arrays, and not tf.Tensors. \n # If you want the function to contain tf.Tensors, and have any TensorFlow operations executed in the function be differentiable, \n # please use tf.py_function.\n aug_img = tf.numpy_function(func=aug_fn, inp=[image], Tout=tf.float32)\n return aug_img, label",
"def preprocess(config: Config) -> None:\n print(colored(\"preprocessing:\", attrs=[\"bold\"]))\n factory = PreprocessingFactory()\n factory.process(config)",
"def preprocess_image(self, inputs):\n raise NotImplementedError('preprocess_image method not implemented.')",
"def preprocess(image, gt_image, height, width):\n\n # Convert the image dtypes to tf.float32 if needed\n if image.dtype != tf.float32:\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n # Convert the image dtypes to tf.int32 if needed\n if gt_image.dtype != tf.int32:\n gt_image = tf.image.convert_image_dtype(gt_image, dtype=tf.int32)\n\n '''# Compute number of pixels needed to pad images\n # in order to respect FCN factor requirement\n top, bottom, left, right = get_paddings(height, width, 32)\n new_height = height + top + bottom\n new_width = width + left + right\n\n # Pad images if necessary\n image = tf.image.resize_image_with_crop_or_pad(image, new_height, new_width)\n gt_image = tf.image.resize_image_with_crop_or_pad(gt_image, new_height, new_width)\n '''\n\n # Subtract off the mean and divide by the variance of the pixels\n image = tf.image.per_image_standardization(image)\n\n # Shape TF tensors\n image.set_shape(shape=(height, width, 3))\n gt_image.set_shape(shape=(height, width, 1))\n\n # Dowscale images to save memory and time ;)\n image = tf.image.resize_images(image, size=(256, 256))\n gt_image = tf.squeeze(tf.image.resize_images(gt_image, size=(256, 256)))\n\n # Perform one-hot-encoding on the ground truth image\n label_ohe = one_hot_encode(gt_image)\n\n return image, label_ohe",
"def pre_processing_function(label, filename: str, augmentor: Augmentor = None):\n image = imread(filename)\n if augmentor is not None:\n image = np.round(augmentor.run(image)).astype(np.uint8)\n\n return image, label",
"def create():\n with torch.set_grad_enabled(False):\n model = torch.hub.load(\n \"pytorch/vision:v0.6.0\", \"vgg11\", pretrained=True).eval()\n\n with_cuda = torch.cuda.is_available()\n if with_cuda:\n model.to(\"cuda\")\n else:\n logging.warn(\"Running on CPU, no CUDA detected.\")\n\n def call(features):\n images = features[\"image\"].numpy()\n # Normalize according to the documentation. Note that the pro-processing\n # will already have the range normalized to [0, 1].\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n images_normalized = (images - mean) / std\n # Reshape from [batch, h, w, c] -> [batch, c, h, w]\n images_normalized_bchw = np.transpose(\n images_normalized, [0, 3, 1, 2]).astype(np.float32).copy()\n with torch.no_grad():\n images_torch = torch.from_numpy(images_normalized_bchw)\n if with_cuda:\n images_torch = images_torch.to(\"cuda\")\n logits = model(images_torch)\n return torch.nn.functional.softmax(logits, dim=-1).cpu().numpy()\n\n preprocess_config = \"resize_small(256)|central_crop(224)|value_range(0,1)\"\n preprocess_fn = pipeline_builder.get_preprocess_fn(\n preprocess_config, remove_tpu_dtypes=False)\n return call, preprocess_fn",
"def _preprocess_image(self, input_data):\n image = self.preprocessor.preprocess(input_data.images)\n return InputData(images=image, labels=input_data.labels)",
"def _parse_function(self, example_proto):\n\n # Currently only supports jpeg and png.\n # Need to use this logic because the shape is not known for\n # tf.image.decode_image and we rely on this info to\n # extend label if necessary.\n def _decode_image(content, channels):\n return tf.cond(\n tf.image.is_jpeg(content),\n lambda: tf.image.decode_jpeg(content, channels),\n lambda: tf.image.decode_png(content, channels))\n\n features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/filename':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/height':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/width':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/segmentation/class/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/segmentation/class/format':\n tf.FixedLenFeature((), tf.string, default_value='png'),\n }\n\n parsed_features = tf.parse_single_example(example_proto, features)\n\n image = _decode_image(parsed_features['image/encoded'], channels=3)\n\n label = None\n if self.split_name != common.TEST_SET:\n label = _decode_image(\n parsed_features['image/segmentation/class/encoded'], channels=1)\n\n image_name = parsed_features['image/filename']\n if image_name is None:\n image_name = tf.constant('')\n\n sample = {\n common.IMAGE: image,\n common.IMAGE_NAME: image_name,\n common.HEIGHT: parsed_features['image/height'],\n common.WIDTH: parsed_features['image/width'],\n }\n\n if label is not None:\n if label.get_shape().ndims == 2:\n label = tf.expand_dims(label, 2)\n elif label.get_shape().ndims == 3 and label.shape.dims[2] == 1:\n pass\n else:\n raise ValueError('Input label shape must be [height, width], or '\n '[height, width, 1].')\n\n label.set_shape([None, None, 1])\n\n sample[common.LABELS_CLASS] = label\n\n return sample",
"def construct_graph(network_class: Type[InferenceNetwork],\n config: Path, checkpoint_dir: str,\n batch_size: int,\n batches_per_step: int,\n image_filenames: Tuple[str],\n loop: bool,\n preprocess_fn: Callable,\n num_ipus: int,\n mode: str,\n save_graph_pb: bool) -> Tuple[tf.Operation, tf.Operation, tf.Operation]:\n # Model specific config\n with open(config.as_posix()) as file_stream:\n try:\n config_dict = yaml.safe_load(file_stream)\n except yaml.YAMLError as exc:\n tf.logging.error(exc)\n\n config_dict['network_name'] = config.stem\n if 'dtype' not in config_dict:\n config_dict[\"dtype\"] = 'float16'\n\n # Create inference optimized frozen graph definition\n network = network_class(input_shape=config_dict[\"input_shape\"],\n num_outputs=1000, batch_size=batch_size,\n data_type=config_dict['dtype'],\n config=config_dict,\n checkpoint_dir=checkpoint_dir)\n\n # Export frozen graph to event file to view in Tensorboard\"\n if save_graph_pb:\n log_dir = Path(f\"{config_dict['network_name']}_graph\")\n graph_filename = f\"{log_dir}/{config_dict['network_name']}_graph.pb\"\n if not log_dir.exists():\n log_dir.mkdir()\n with tf.io.gfile.GFile(graph_filename, \"wb\") as f:\n f.write(network.optimized_graph.SerializeToString())\n logging.info(\"%d ops in the final graph.\" % len(network.optimized_graph.node))\n import_to_tensorboard(graph_filename, log_dir=log_dir.as_posix())\n\n # Reset graph before creating one on the IPU\n tf.reset_default_graph()\n\n # Create dataset\n dataset = get_dataset(image_filenames, batch_size, loop=loop, preprocess_fn=preprocess_fn,\n img_width=config_dict[\"input_shape\"][1],\n img_height=config_dict[\"input_shape\"][0], dtype=config_dict['dtype'])\n\n # Set up graph on device, connect infeed and outfeed to the graph.\n num_replicas = num_ipus if mode == 'replicated' else 1\n infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset, device_ordinal=0, feed_name=\"infeed\",\n replication_factor=num_replicas)\n outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue(device_ordinal=0, feed_name=\"outfeed\",\n outfeed_mode=ipu_outfeed_queue.IPUOutfeedMode.ALL,\n replication_factor=num_replicas)\n\n def comp_fn():\n def body(img):\n with scopes.ipu_scope('/device:IPU:0'):\n probs = tf.import_graph_def(network.optimized_graph,\n input_map={network.graph_input: img},\n name=\"optimized\",\n return_elements=[network.graph_output])[0]\n outfeed_op = outfeed_queue.enqueue(probs)\n # Note that enqueue happens on the IPU.\n return outfeed_op\n\n return loops.repeat(batches_per_step,\n body,\n [],\n infeed_queue)\n\n loop_op = ipu_compiler.compile(comp_fn, [])\n\n # The dequeue of the outfeed needs to happen on the CPU.\n with tf.device('cpu'):\n outfeed_dequeue = outfeed_queue.dequeue()\n\n ipu_utils.move_variable_initialization_to_cpu()\n return loop_op, infeed_queue.initializer, outfeed_dequeue",
"def _parse_function(filename, label):\n\n raw_input = tf.io.read_file(filename=filename)\n\n image_decoded = tf.image.decode_jpeg(contents=raw_input, channels=3)\n # image_decoded = tf.image.decode_png(contents=raw_input, channels=3)\n # image_decoded = tf.image.decode_image(contents=raw_input)\n\n image_decoded = tf.image.convert_image_dtype(image_decoded, tf.float32)\n # image_decoded = tf.cast(image_decoded, tf.int32)\n\n image_decoded = tf.image.resize_images(images=image_decoded, size=[load_size, load_size],\n method=tf.image.ResizeMethod.AREA,\n align_corners=True)\n\n # image_size = image_decoded.shape.as_list()\n if mode == 'train':\n image_decoded = tf.image.resize_image_with_crop_or_pad(image_decoded, load_size + 4, load_size + 4)\n image_decoded = tf.random_crop(image_decoded, [load_size, load_size, 3])\n image_decoded = tf.image.random_flip_left_right(image_decoded)\n # Brightness/saturation/constrast provides small gains .2%~.5% on cifar.\n image_decoded = tf.image.random_brightness(image_decoded, max_delta=63. / 255.)\n image_decoded = tf.image.random_saturation(image_decoded, lower=0.5, upper=1.5)\n image_decoded = tf.image.random_contrast(image_decoded, lower=0.2, upper=1.8)\n image_decoded = tf.image.per_image_standardization(image_decoded)\n\n return image_decoded, label",
"def _build_image_processing(self, shift_ratio=0):\n with tf.device(self.cpu_device):\n subset = 'train'\n image_producer_ops = []\n image_producer_stages = []\n images_splits, labels_splits = self.image_preprocessor.minibatch(\n self.dataset,\n subset=subset,\n use_datasets=self.params.use_datasets,\n cache_data=self.params.cache_data,\n shift_ratio=shift_ratio)\n images_shape = images_splits[0].get_shape()\n labels_shape = labels_splits[0].get_shape()\n for device_num in range(len(self.devices)):\n image_producer_stages.append(\n data_flow_ops.StagingArea(\n [images_splits[0].dtype, labels_splits[0].dtype],\n shapes=[images_shape, labels_shape]))\n return (image_producer_ops, image_producer_stages)",
"def preprocess_image(self, inputs):\n return utils.preprocess_image(inputs, mode='custom_tf')",
"def create_graph(self, feature, **kwargs):\n self.input_size = feature.shape[1:3]\n\n net = PSPNet101({'data': feature}, is_training=True, num_classes=self.class_num)\n self.pred = net.layers['conv6']\n pred = tf.image.resize_bilinear(self.pred, self.input_size)\n self.output_size = pred.shape[1:3]\n self.output = tf.nn.softmax(pred)",
"def preprocess_image(image, height, width,\n is_training=False,\n bbox=None,\n fast_mode=True,\n add_image_summaries=True):\n if is_training:\n return preprocess_for_train(image, height, width, bbox, fast_mode,\n add_image_summaries=add_image_summaries)\n else:\n return preprocess_for_eval(image, height, width, central_fraction=None)",
"def imagenet_preprocess(image, label):\n i = image\n i = tf.cast(i, tf.float32)\n i = tf.image.resize_with_crop_or_pad(i, 224, 224)\n if model_name == 'ResNet50' or model_name == 'ResNet152':\n i = tf.keras.applications.resnet.preprocess_input(i)\n else:\n i = tf.keras.applications.densenet.preprocess_input(i)\n return (i, label)",
"def process(image):\n pass"
] | [
"0.6597117",
"0.6509189",
"0.6342552",
"0.6341532",
"0.63325167",
"0.6088182",
"0.6045632",
"0.60382193",
"0.6022813",
"0.60152334",
"0.5975242",
"0.59513503",
"0.59479606",
"0.58793926",
"0.58744824",
"0.58571494",
"0.58446604",
"0.5805253",
"0.5803639",
"0.58029455",
"0.57428765",
"0.5699657",
"0.5677763",
"0.5674219",
"0.5663091",
"0.5648002",
"0.5646169",
"0.5619665",
"0.56020457",
"0.559518"
] | 0.69064957 | 0 |
it flushes the queue containing the processed and chosen pages on the csv output file | def save(self):
if len(self.queue):
#logging.info('Saving %d pages' % len(self.queue))
for q in self.queue:
if q and q != '':
if self.output:
print >> self.output, q
else:
print q
self.queue = [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __csv_flush_record(self, all=False):\n data_file_name = self.data_file_name\n if all is True:\n data_file_name = data_file_name.split('all')[0] + 'csv'\n\n with io.open(data_file_name, 'a', encoding='utf-8') as data_file:\n if sys.version_info[0] < 3:\n self.__queue_data_writer = UnicodeWriter(data_file)\n else:\n self.__queue_data_writer = csv.writer(data_file)\n self.__queue_data_writer.writerows([row for row in self.__queue])\n data_file.flush()",
"def push_results_to_file(file_name,queue_name, aux_q,queue_service):\n # verify the queues exist\n queue_service.create_queue(queue_name)\n queue_service.create_queue(aux_q)\n # open file for write\n f1=open('./{0}.csv'.format(file_name), 'w+')\n while queue_service.get_queue_metadata(queue_name).approximate_message_count > 0:\n messages = queue_service.get_messages(queue_name,1)\n if len(messages)>0 :\n for message in messages: \n line = '{0},{1},{2}'.format(message.id,message.insertion_time,message.content)\n queue_service.put_message(aux_q,line)\n f1.write(line)\n f1.write('\\n')\n queue_service.delete_message(queue_name, message.id, message.pop_receipt)\n f1.close()",
"def flush(self):\n if self.index < self.bufsize:\n self.writer(\n self.linesep.join(self.read1_batch[0:self.index]),\n self.linesep.join(self.read2_batch[0:self.index]))\n else:\n self.writer(\n self.linesep.join(self.read1_batch),\n self.linesep.join(self.read2_batch))\n self.writer(self.linesep, self.linesep)\n self.index = 0",
"def _queue_csv_writer_job(self, executor, futures_to_cb, fn, description, metadata_storage):\n self._logger.debug('Queueing CSV job to write out {0}...'.format(description))\n futures_to_cb[executor.submit(fn)] = partial(self._process_csv_writer_job, description, metadata_storage)",
"def _drain_queue(self):\n while self.queue:\n self._export_batch()",
"def export_files(self):\n # Clear the progress bars\n self.scanFilesProgressBar.setValue(0)\n self.scanFilesProgressBar.setMaximum(len(self.analzye_results))\n\n # Reset the index to start the export process\n self.export_file_index = 0\n\n # Export the file\n # This will start the loop\n # When one thread is complete, a new one will be started\n if len(self.analzye_results) > 0:\n self.export_file(self.analzye_results[0])",
"def run(self):\n page = self.fetch_data(self.url)\n stock_list = self.pop_stock_list(page)\n self.write_csv(stock_list)",
"def writer(Q, filepath):\n \n print(\"Starting Writer\")\n with open(filepath, \"wt\") as out_file:\n tsv_writer = csv.writer(out_file, delimiter='\\t')\n tsv_writer.writerow([\"user_id\", \"items_actioned_on\"])\n \n with open(filepath, \"a\") as out_file:\n tsv_writer = csv.writer(out_file, delimiter='\\t')\n i = 0\n while 1:\n m = Q.get()\n if m == \"kill\":\n print(\"\\nStoping Writer\")\n break\n if len(m) == 2:\n user_id = m[0]\n #print(\"GEt\",user_id)\n _user_truth = m[1]\n tsv_writer.writerow([user_id, f\",\".join(_user_truth) ])\n if i % 1000 == 0:\n print(f\"{i:,} / {GlobalVar.value:,} ({i/GlobalVar.value * 100:.2f})%\", end=\"\\r\", flush=True)\n i+=1",
"def save(self,queue):\r\n filename = \"data\\\\\" + datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S') + \".csv\"\r\n idx = 0\r\n\r\n while True:\r\n try:\r\n times,A,B,C,a,b,d,override = queue.get()\r\n\r\n # save in csv\r\n if self._allow_save or override:\r\n if idx == 0:\r\n with open(filename,'w',newline='') as csvfile:\r\n writer = csv.writer(csvfile,delimiter=',')\r\n writer.writerow([\"Time (sec)\",\\\r\n \"ChA (V)\",\"ChB (V)\",\"ChC (V)\",\\\r\n var1(),var2(),var3()])\r\n with open(filename,'a',newline='') as csvfile:\r\n writer = csv.writer(csvfile,delimiter=',')\r\n writer.writerow([str(times[0]),str(A[0]),str(B[0]),str(C[0]),\r\n str(a),str(b),str(d)])\r\n for t,va,vb,vc in zip(times[1:],A[1:],B[1:],C[1:]):\r\n writer.writerow([str(t),str(va),str(vb),str(vc)])\r\n idx += 1\r\n \r\n except:\r\n traceback.print_exc(file=sys.stdout)",
"def _endProgressPrinting(self, nummetrics):\n if not self._quiet:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n logging.info(\"Wrote metrics for {} requests to CSV.\".format(nummetrics))",
"def csv_data():\r\n for row in data:\r\n writer.writerow(row)\r\n csv_data = read_and_flush()\r\n yield csv_data",
"def flush(self):",
"def flush(self):",
"def flush(self):",
"def flush(self):",
"def flush(self):",
"def flush(self):",
"def run(self):\n try:\n with open(SAVE_PATH, \"a\") as f:\n writer = csv.DictWriter(f, fieldnames=CSV_FIELDNAMES)\n if not self.resume_run:\n writer.writeheader()\n for img_dir in os.listdir(FRAMES_PATH):\n for frame_id in os.listdir(os.path.join(FRAMES_PATH, img_dir)):\n file_path = os.path.join(FRAMES_PATH, img_dir, frame_id)\n print(file_path)\n msg = {\"user_id\": \"5\", \"file_path\": file_path, \"count\": 3}\n if msg:\n data = self.message_handler(msg)\n else:\n LOGGER.error(\n \"Some error in consumer: %s\", msg\n )\n if data:\n writer.writerow(data)\n f.flush()\n except KeyboardInterrupt:\n self.close()",
"def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )",
"def run(self):\n empty = False\n while not empty:\n try:\n # Grab fields\n url = self.genre_urls.get()\n namestamp = \"{}.csv\".format(str(int(round(time.time() * 1000000))))\n # GET request\n self.logger.info('Attempting to request %s', url)\n self.crawler.set_url(url)\n series = self.crawler.get_series()\n self.logger.info('Attempting to write %s', url)\n # Grab writer -> writes series\n csv_dir = './{}/{}'.format(self.directory, namestamp)\n writer = csv.writer(open(csv_dir, 'wb'))\n writer.writerow(Series.fields)\n for s in series:\n writer.writerow(s.to_line())\n self.logger.info('Wrote %s', namestamp)\n except Exception, e: # pylint: disable=W0703\n print e\n finally:\n self.genre_urls.task_done()\n empty = self.genre_urls.empty()",
"def _write_csv_lists(self):\n with futures.ProcessPoolExecutor(max_workers=self._nworkers) as executor:\n # We use ProcessPoolExecutor as these tasks are CPU intensive. This means that we do not have access\n # to the self._conn, self._metadata_conn and self._logger objects within the functions (they are removed\n # during pickling, see __getstate__)\n #\n futures_to_cb = {}\n md = defaultdict(list)\n self._queue_csv_writer_job(executor, futures_to_cb, self._write_delta_csv_blacklist, 'delta blacklist', md)\n for op in self._operators:\n for fn, desc in \\\n [(self._write_delta_csv_notifications_list, 'delta notifications list for operator {0}'),\n (self._write_delta_csv_exceptions_list, 'delta exceptions list for operator {0}')]:\n self._queue_csv_writer_job(executor, futures_to_cb, partial(fn, op.id), desc.format(op.id), md)\n\n if not self._no_full_lists:\n for fn, desc in [(self._write_full_csv_blacklist, 'full blacklist')]:\n self._queue_csv_writer_job(executor, futures_to_cb, fn, desc, md)\n for op in self._operators:\n for fn, desc in \\\n [(self._write_full_csv_notifications_list, 'full notifications list for operator {0}'),\n (self._write_full_csv_exceptions_list, 'full exceptions list for operator {0}')]:\n self._queue_csv_writer_job(executor, futures_to_cb, partial(fn, op.id), desc.format(op.id), md)\n\n self._wait_for_futures(futures_to_cb)\n\n metadata.add_optional_job_metadata(self._metadata_conn, 'dirbs-listgen', self._run_id, **md)\n\n self._logger.info('Zipping up lists...')\n with zipfile.ZipFile(os.path.join(self._output_dir, '{0}_blacklist.zip'.format(self._date_str)), 'w') as zf:\n for csv_path in glob.glob(os.path.join(self._output_dir, '{0}_blacklist*.csv'.format(self._date_str))):\n zf.write(csv_path, arcname=os.path.basename(csv_path))\n os.remove(csv_path)\n\n for op in self._operators:\n with zipfile.ZipFile(os.path.join(self._output_dir,\n '{0}_notifications_{1}.zip'.format(self._date_str, op.id)), 'w') as zf:\n for csv_path in glob.glob(os.path.join(self._output_dir,\n '{0}_notifications_{1}*.csv'.format(self._date_str, op.id))):\n zf.write(csv_path, arcname=os.path.basename(csv_path))\n os.remove(csv_path)\n with zipfile.ZipFile(os.path.join(self._output_dir,\n '{0}_exceptions_{1}.zip'.format(self._date_str, op.id)), 'w') as zf:\n for csv_path in glob.glob(os.path.join(self._output_dir,\n '{0}_exceptions_{1}*.csv'.format(self._date_str, op.id))):\n zf.write(csv_path, arcname=os.path.basename(csv_path))\n os.remove(csv_path)\n self._logger.info('Zipped up lists')",
"def _process_change(self):\n self._sort_records()\n self._store_writer.to_file(self.records)\n self._store_writer.to_csv_file(self.records)",
"def _flush(self):",
"async def wrap_up_processing_reports(self):\n if hasattr(Config(), 'results'):\n new_row = []\n for item in self.recorded_items:\n item_value = {\n 'global_round':\n self.current_global_round,\n 'round':\n self.current_round,\n 'accuracy':\n self.accuracy * 100,\n 'average_accuracy':\n self.average_accuracy * 100,\n 'edge_agg_num':\n Config().algorithm.local_rounds,\n 'local_epoch_num':\n Config().trainer.epochs,\n 'training_time':\n max([\n report.training_time for (report, __) in self.updates\n ]),\n 'round_time':\n time.perf_counter() - self.round_start_time\n }[item]\n new_row.append(item_value)\n\n if Config().is_edge_server():\n result_csv_file = f'{Config().result_dir}result_{Config().args.id}.csv'\n else:\n result_csv_file = f'{Config().result_dir}result.csv'\n\n csv_processor.write_csv(result_csv_file, new_row)\n\n if Config().is_edge_server():\n # When a certain number of aggregations are completed, an edge client\n # needs to be signaled to send a report to the central server\n if self.current_round == Config().algorithm.local_rounds:\n logging.info(\n '[Server #%d] Completed %s rounds of local aggregation.',\n os.getpid(),\n Config().algorithm.local_rounds)\n self.model_aggregated.set()\n\n self.current_round = 0\n self.new_global_round_begins.clear()\n # Wait until a new global round begins\n # to avoid selecting clients before a new global round begins\n await self.new_global_round_begins.wait()",
"async def _get_records_and_write_to_file(\n commons_url, pages, num_processes, max_concurrent_requests\n):\n max_requests = int(max_concurrent_requests / num_processes)\n logging.debug(f\"max concurrent requests per process: {max_requests}\")\n lock = asyncio.Semaphore(max_requests)\n queue = asyncio.Queue()\n write_to_file_task = asyncio.ensure_future(_parse_from_queue(queue))\n await asyncio.gather(\n *(\n _put_records_from_page_in_queue(page, commons_url, lock, queue)\n for page in pages\n )\n )\n await queue.put(\"DONE\")\n await write_to_file_task",
"def complete_split_tasks(self):\n self._split_pdf(\n int(self.start_page.get()), int(self.end_page.get())) # need to validate input here\n self._clean_up_page_entry()\n self._display_info_texts()",
"def print_chunk_progress(self, actions):\r\n for action in actions:\r\n if (self._cur_print % constants.ES_BULK_CHUNK_SIZE == 0) & (self._cur_print > 0):\r\n print(\"{0} emails converted. Starting bulk import (chunk size: {1})...\".format(self._cur_print,\r\n constants.ES_BULK_CHUNK_SIZE))\r\n self._cur_print += 1\r\n yield action",
"def write_csv(filename, i, q):\n with open(os.path.join(\"Data\", filename), 'a', newline='') as csvfile:\n writ = csv.writer(csvfile)\n j = 0\n k = len(q)\n while j < k:\n l = q.popleft()\n tak = l[0]\n #puts most important/salient points of info for health/phenotype\n #genomes - ident for health genes, weight for phenotype genes -\n #into lists for output\n healthchr_a = []\n healthchr_b = []\n if isinstance(tak.genome, tg.health_genome):\n for a in tak.genome.healthchr_a:\n healthchr_a.append(a.ident)\n for b in tak.genome.healthchr_b:\n healthchr_b.append(b.ident)\n pref = None\n if isinstance(tak.genome, tg.phen_genome):\n pref = [tak.genome.phen_gene_a.weight,\n tak.genome.phen_gene_b.weight,\n tak.pref]\n #first generation has 'str' parents rather than agent parents\n if tak.gen != 0:\n parents0 = tak.parents[0].ident\n parents1 = tak.parents[1].ident\n else:\n parents0 = tak.parents[0]\n parents1 = tak.parents[1]\n writ.writerow([i, l[2], tak.ident, parents0, parents1,\n tak.age, tak.gen, len(tak.children),\n tak.mating_attempts, tak.accum_pain, tak.cod,\n l[1], tak.genome.mut_record, tak.parent_degree,\n tak.parent_genoverlap,\n (tak.genome.disorder_count if \\\n isinstance(tak.genome, tg.health_genome)\\\n else \"\"),\n healthchr_a, healthchr_b, pref])\n j += 1",
"def dump_queue(self):\n self.set_polling_many(self.queue)\n self.queue = []",
"def print_processor(print_que):\n print(termcolor.colored(\"!--DO NOT CLOSE--!\", \"red\"))\n print(len(print_que))\n ID_LIMIT = 40\n run = True\n jobs_ran = 0\n while run:\n Q_Jobs = 0\n if len(print_que) > 0:\n if \"10.56.54.162\" in print_que[0]:\n Q_Jobs = print_status(\"10.56.54.162\")\n else:\n Q_Jobs = print_status(\"10.56.54.156\")\n if Q_Jobs >= ID_LIMIT:\n print(\"Printed so Far: \", str(jobs_ran))\n print(\"Waiting For Jobs to Clear Up\")\n # input(\n # \"Please Confirm Printers Will Support 40 More Job IDS before pressing enter: \")\n jobs_ran = 0\n time.sleep(100)\n continue\n if len(print_que) > 0:\n if(\"banner\" not in print_que[0]):\n os.system(print_que[0])\n print((str(print_que[0]).replace(\n \"C:/Windows/System32/lpr.exe -S 10.56.54.\", \"\").replace(\n '-P PS \"C:/S/SO/', \"\").split(\"-J\")[0]))\n print_que.pop(0)\n jobs_ran += 1\n else:\n print(termcolor.colored(\"\\n!--PROCESSING CAUGHT UP--!: \", \"green\"))\n run = False\n jobs_ran += 1"
] | [
"0.62164724",
"0.61966234",
"0.61901706",
"0.60896474",
"0.60148954",
"0.6007137",
"0.59981954",
"0.5972252",
"0.5877508",
"0.5872668",
"0.58681136",
"0.5849548",
"0.5849548",
"0.5849548",
"0.5849548",
"0.5849548",
"0.5849548",
"0.5731122",
"0.57085747",
"0.5690632",
"0.5645362",
"0.5640488",
"0.56274337",
"0.56140697",
"0.5538929",
"0.5514355",
"0.5506371",
"0.55056274",
"0.54967487",
"0.54762137"
] | 0.6936618 | 0 |
fp = Some derivative function of x and y x0 = Current x value y0 = Current y value ts = time step Returns y1 using RungeKutta method | def rk45(fp, x0, y0, ts):
k1 = fp(x0, y0)
k2 = fp(x0 +ts/2, y0+ts/2*k1)
k3 = fp(x0 +ts/2, y0+ts/2*k2)
k4 = fp(x0 +ts, y0+ts*k3)
return y0 + ts/6*(k1+2*k2+2*k3+k4) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ret_f(t,y):\n\n f = np.zeros(3)\n f[0] = 77.27*(y(1) - y(0)*y(1)+ y(0)-8.375e-6*y(0)*y(0))\n f[1] = (1.0/77.27)*(-y(1)-y(0)*y(1)+y(2))\n f[2] = 0.161*(y(0)-y(2))\n\n return f",
"def runge_kutta(func, x0, time):\n dt = time[1] - time[0]\n x = np.array(x0)\n val = []\n\n for t in time:\n val.append(x)\n\n k1 = np.array([f(t, x) for f in func])\n k2 = np.array([f(t+dt/2, x+dt*k1/2) for f in func])\n k3 = np.array([f(t+dt/2, x+dt*k2/2) for f in func])\n k4 = np.array([f(t+dt, x+dt*k3) for f in func])\n\n x = x + dt*(k1 + 2*k2 + 2*k3 + k4)/6\n\n return val",
"def dynstall_oye_dxdt(t,fs,u,p):\n alpha = u['alpha'](t)\n f_st = p['F_st'](alpha)\n return 1/p['tau'] * (f_st - fs)",
"def Runge_Kutta_4(diff_fun: Callable[..., float],\\\n x_range: Tuple[float, float, float], initial_value: np.array,\\\n params: Tuple) -> Tuple[np.array, np.array]:\n\n # Extract the time step for ease of use and readability.\n dx = x_range[2]\n # Get the size of the parameter vector.\n vec_size = len(initial_value)\n # Initialize the output arrays.\n x = np.arange(x_range[0], x_range[1] + dx, dx)\n num_steps = len(x)\n y = np.array([np.zeros(vec_size) for i in range(num_steps)])\n y[0] = initial_value\n\n # Numerically compute the differential equation's parameters over the given \n # range.\n for n in range(1, num_steps):\n k1 = step_1(diff_fun, x[n-1], y[n-1], params, dx)\n k2 = step_2(diff_fun, x[n-1], y[n-1], params, dx, k1)\n k3 = step_3(diff_fun, x[n-1], y[n-1], params, dx, k2)\n k4 = step_4(diff_fun, x[n-1], y[n-1], params, dx, k3)\n y[n] = y[n-1] + ((k1 + (2 * k2) + (2 * k3) + k4) / 6)\n\n return x, y",
"def dK_dtheta(self,X,X2,target):\r\n if X2 is None: X2 = X\r\n FX = np.column_stack([f(X) for f in self.F])\r\n FX2 = np.column_stack([f(X2) for f in self.F])\r\n DER = np.zeros((self.n,self.n,self.n))\r\n for i in range(self.n):\r\n DER[i,i,i] = np.sqrt(self.weights[i])\r\n dw = self.variance * mdot(FX,DER,self.G_1,np.diag(np.sqrt(self.weights)),FX2.T)\r\n dv = mdot(FX,np.diag(np.sqrt(self.weights)),self.G_1,np.diag(np.sqrt(self.weights)),FX2.T)\r\n np.add(target[:,:,0],np.transpose(dv,(0,2,1)), target[:,:,0])\r\n np.add(target[:,:,1:],np.transpose(dw,(0,2,1)), target[:,:,1:])",
"def rate(self, t, y):\n k1, k2 = self._k(y[-1])\n if y[1] > 1e-6:\n dydt = [(self.parameters.y1 * k1 + self.parameters.y2 * k2) * y[1],\n -(k1 + k2) * y[1]]\n else:\n dydt = [0, 0]\n return dydt",
"def f1d(t,y,float_params,sigmaI): #sigmastep is an array\n \n ## y is Ntot0 ##\n\n # unpack parameters\n Nbar, Nstar, sigma0, nu_kin_mlyperus, DoverdeltaX2 = float_params \n\n # Ntot is passed in, Fqll calculated from Ntot\n Ntot0 = np.ascontiguousarray(y)\n Nqll0 = Nbar - Nstar * np.sin(2*np.pi*(Ntot0))\n\n # Calc surface deposition, dNtot_dt before diffusion\n m = (Nqll0 - (Nbar - Nstar))/(2*Nstar)\n sigmaM = (sigmaI - m * sigma0)/(1+m*sigma0)\n depsurf = nu_kin_mlyperus * sigmaM\n dNtot_dt = depsurf\n\n # Diffusion\n dy = diffuse_1d(Nqll0,DoverdeltaX2)\n dNtot_dt += dy \n\n # Package for output, only values of dNtot\n derivs = dNtot_dt\n return derivs",
"def cal_f_RK(yt, dyt, f, df, int_INV_D_pre, vw_div_vw0, fcn_D, cond_GT):\n phi_b = cond_GT['phi_bulk']\n ed = cond_GT['epsilon_d']\n\n y_new = yt + dyt\n f_new = f + df\n int_INV_D = int_INV_D_pre\n if df != 0.: # it is related with half-step for RK4 method\n int_INV_D += (dyt/2.)*(1./fcn_D(f, cond_GT) + 1./fcn_D(f_new, cond_GT))\n return (-1./ed)*(vw_div_vw0/fcn_D(f_new, cond_GT))*(f_new - phi_b*(1. - exp(-(vw_div_vw0/ed)*int_INV_D)))",
"def dK_dtheta(self, dL_dK, X, X2, target):\r\n\r\n X,slices = X[:,:-1],index_to_slices(X[:,-1])\r\n if X2 is None:\r\n X2,slices2 = X,slices\r\n else:\r\n X2,slices2 = X2[:,:-1],index_to_slices(X2[:,-1])\r\n #rdist = X[:,0][:,None] - X2[:,0][:,None].T\r\n rdist = X - X2.T\r\n ly=1/self.lengthscaleY\r\n lu=np.sqrt(3)/self.lengthscaleU\r\n\r\n rd=rdist.shape[0]\r\n dktheta1 = np.zeros([rd,rd])\r\n dktheta2 = np.zeros([rd,rd])\r\n dkUdvar = np.zeros([rd,rd])\r\n dkYdvar = np.zeros([rd,rd])\r\n\r\n # dk dtheta for UU\r\n UUdtheta1 = lambda dist: np.exp(-lu* dist)*dist + (-dist)*np.exp(-lu* dist)*(1+lu*dist)\r\n UUdtheta2 = lambda dist: 0\r\n #UUdvar = lambda dist: (1 + lu*dist)*np.exp(-lu*dist)\r\n UUdvar = lambda dist: (1 + lu* np.abs(dist)) * np.exp(-lu * np.abs(dist))\r\n\r\n # dk dtheta for YY\r\n\r\n dk1theta1 = lambda dist: np.exp(-ly*dist)*2*(-lu)/(lu+ly)**3\r\n #c=np.sqrt(3)\r\n #t1=c/lu\r\n #t2=1/ly\r\n #dk1theta1=np.exp(-dist*ly)*t2*( (2*c*t2+2*t1)/(c*t2+t1)**2 -2*(2*c*t2*t1+t1**2)/(c*t2+t1)**3 )\r\n\r\n dk2theta1 = lambda dist: 1*(\r\n np.exp(-lu*dist)*dist*(-ly+2*lu-lu*ly*dist+dist*lu**2)*(ly-lu)**(-2) + np.exp(-lu*dist)*(-2+ly*dist-2*dist*lu)*(ly-lu)**(-2)\r\n +np.exp(-dist*lu)*(ly-2*lu+ly*lu*dist-dist*lu**2)*2*(ly-lu)**(-3)\r\n +np.exp(-dist*ly)*2*(ly-lu)**(-2)\r\n +np.exp(-dist*ly)*2*(2*lu-ly)*(ly-lu)**(-3)\r\n )\r\n\r\n dk3theta1 = lambda dist: np.exp(-dist*lu)*(lu+ly)**(-2)*((2*lu+ly+dist*lu**2+lu*ly*dist)*(-dist-2/(lu+ly))+2+2*lu*dist+ly*dist)\r\n\r\n #dktheta1 = lambda dist: self.varianceU*self.varianceY*(dk1theta1+dk2theta1+dk3theta1)\r\n\r\n\r\n\r\n\r\n dk1theta2 = lambda dist: np.exp(-ly*dist) * ((lu+ly)**(-2)) * ( (-dist)*(2*lu+ly) + 1 + (-2)*(2*lu+ly)/(lu+ly) )\r\n\r\n dk2theta2 =lambda dist: 1*(\r\n np.exp(-dist*lu)*(ly-lu)**(-2) * ( 1+lu*dist+(-2)*(ly-2*lu+lu*ly*dist-dist*lu**2)*(ly-lu)**(-1) )\r\n +np.exp(-dist*ly)*(ly-lu)**(-2) * ( (-dist)*(2*lu-ly) -1+(2*lu-ly)*(-2)*(ly-lu)**(-1) )\r\n )\r\n\r\n dk3theta2 = lambda dist: np.exp(-dist*lu) * (-3*lu-ly-dist*lu**2-lu*ly*dist)/(lu+ly)**3\r\n\r\n #dktheta2 = lambda dist: self.varianceU*self.varianceY*(dk1theta2 + dk2theta2 +dk3theta2)\r\n\r\n # kyy kernel\r\n #k1 = lambda dist: np.exp(-ly*dist)*(2*lu+ly)/(lu+ly)**2\r\n #k2 = lambda dist: (np.exp(-lu*dist)*(ly-2*lu+lu*ly*dist-lu**2*dist) + np.exp(-ly*dist)*(2*lu-ly) ) / (ly-lu)**2\r\n #k3 = lambda dist: np.exp(-lu*dist) * ( (1+lu*dist)/(lu+ly) + (lu)/(lu+ly)**2 )\r\n k1 = lambda dist: np.exp(-ly*dist)*(2*lu+ly)/(lu+ly)**2\r\n k2 = lambda dist: (np.exp(-lu*dist)*(ly-2*lu+lu*ly*dist-lu**2*dist) + np.exp(-ly*dist)*(2*lu-ly) ) / (ly-lu)**2\r\n k3 = lambda dist: np.exp(-lu*dist) * ( (1+lu*dist)/(lu+ly) + (lu)/(lu+ly)**2 )\r\n #dkdvar = k1+k2+k3\r\n\r\n #cross covariance kernel\r\n kyu3 = lambda dist:np.exp(-lu*dist)/(lu+ly)*(1+lu*(dist+1/(lu+ly)))\r\n\r\n # dk dtheta for UY\r\n dkcrtheta2 = lambda dist: np.exp(-lu*dist) * ( (-1)*(lu+ly)**(-2)*(1+lu*dist+lu*(lu+ly)**(-1)) + (lu+ly)**(-1)*(-lu)*(lu+ly)**(-2) )\r\n dkcrtheta1 = lambda dist: np.exp(-lu*dist)*(lu+ly)**(-1)* ( (-dist)*(1+dist*lu+lu*(lu+ly)**(-1)) - (lu+ly)**(-1)*(1+dist*lu+lu*(lu+ly)**(-1)) +dist+(lu+ly)**(-1)-lu*(lu+ly)**(-2) )\r\n #dkuyp dtheta\r\n #dkuyp dtheta1 = self.varianceU*self.varianceY* (dk1theta1() + dk2theta1())\r\n #dkuyp dtheta2 = self.varianceU*self.varianceY* (dk1theta2() + dk2theta2())\r\n #dkuyp dVar = k1() + k2()\r\n\r\n\r\n #dkyup dtheta\r\n #dkyun dtheta1 = self.varianceU*self.varianceY* (dk1theta1() + dk2theta1())\r\n #dkyun dtheta2 = self.varianceU*self.varianceY* (dk1theta2() + dk2theta2())\r\n #dkyup dVar = k1() + k2() #\r\n\r\n\r\n\r\n\r\n for i, s1 in enumerate(slices):\r\n for j, s2 in enumerate(slices2):\r\n for ss1 in s1:\r\n for ss2 in s2:\r\n if i==0 and j==0:\r\n #target[ss1,ss2] = kuu(np.abs(rdist[ss1,ss2]))\r\n dktheta1[ss1,ss2] = self.varianceU*self.varianceY*UUdtheta1(np.abs(rdist[ss1,ss2]))\r\n dktheta2[ss1,ss2] = 0\r\n dkUdvar[ss1,ss2] = UUdvar(np.abs(rdist[ss1,ss2]))\r\n dkYdvar[ss1,ss2] = 0\r\n elif i==0 and j==1:\r\n #target[ss1,ss2] = np.where( rdist[ss1,ss2]>0 , kuyp(np.abs(rdist[ss1,ss2])), kuyn(np.abs(rdist[s1[0],s2[0]]) ) )\r\n #dktheta1[ss1,ss2] =\r\n #dktheta2[ss1,ss2] =\r\n #dkdvar[ss1,ss2] = np.where( rdist[ss1,ss2]>0 , kuyp(np.abs(rdist[ss1,ss2])), kuyn(np.abs(rdist[s1[0],s2[0]]) ) )\r\n dktheta1[ss1,ss2] = np.where( rdist[ss1,ss2]>0 , dkcrtheta1(np.abs(rdist[ss1,ss2])) ,self.varianceU*self.varianceY*(dk1theta1(np.abs(rdist[ss1,ss2]))+dk2theta1(np.abs(rdist[ss1,ss2]))) )\r\n dktheta2[ss1,ss2] = np.where( rdist[ss1,ss2]>0 , dkcrtheta2(np.abs(rdist[ss1,ss2])) ,self.varianceU*self.varianceY*(dk1theta2(np.abs(rdist[ss1,ss2]))+dk2theta2(np.abs(rdist[ss1,ss2]))) )\r\n dkUdvar[ss1,ss2] = np.where( rdist[ss1,ss2]>0 , kyu3(np.abs(rdist[ss1,ss2])) ,k1(np.abs(rdist[ss1,ss2]))+k2(np.abs(rdist[ss1,ss2])) )\r\n dkYdvar[ss1,ss2] = dkUdvar[ss1,ss2]\r\n elif i==1 and j==1:\r\n #target[ss1,ss2] = kyy(np.abs(rdist[ss1,ss2]))\r\n dktheta1[ss1,ss2] = self.varianceU*self.varianceY*(dk1theta1(np.abs(rdist[ss1,ss2]))+dk2theta1(np.abs(rdist[ss1,ss2]))+dk3theta1(np.abs(rdist[ss1,ss2])))\r\n dktheta2[ss1,ss2] = self.varianceU*self.varianceY*(dk1theta2(np.abs(rdist[ss1,ss2])) + dk2theta2(np.abs(rdist[ss1,ss2])) +dk3theta2(np.abs(rdist[ss1,ss2])))\r\n dkUdvar[ss1,ss2] = (k1(np.abs(rdist[ss1,ss2]))+k2(np.abs(rdist[ss1,ss2]))+k3(np.abs(rdist[ss1,ss2])) )\r\n dkYdvar[ss1,ss2] = dkUdvar[ss1,ss2]\r\n else:\r\n #target[ss1,ss2] = np.where( rdist[ss1,ss2]>0 , kyup(np.abs(rdist[ss1,ss2])), kyun(np.abs(rdist[s1[0],s2[0]]) ) )\r\n dktheta1[ss1,ss2] = np.where( rdist[ss1,ss2]>0 ,self.varianceU*self.varianceY*(dk1theta1(np.abs(rdist[ss1,ss2]))+dk2theta1(np.abs(rdist[ss1,ss2]))) , dkcrtheta1(np.abs(rdist[ss1,ss2])) )\r\n dktheta2[ss1,ss2] = np.where( rdist[ss1,ss2]>0 ,self.varianceU*self.varianceY*(dk1theta2(np.abs(rdist[ss1,ss2]))+dk2theta2(np.abs(rdist[ss1,ss2]))) , dkcrtheta2(np.abs(rdist[ss1,ss2])) )\r\n dkUdvar[ss1,ss2] = np.where( rdist[ss1,ss2]>0 , k1(np.abs(rdist[ss1,ss2]))+k2(np.abs(rdist[ss1,ss2])), kyu3(np.abs(rdist[ss1,ss2])) )\r\n dkYdvar[ss1,ss2] = dkUdvar[ss1,ss2]\r\n\r\n\r\n target[0] += np.sum(self.varianceY*dkUdvar * dL_dK)\r\n target[1] += np.sum(self.varianceU*dkYdvar * dL_dK)\r\n target[2] += np.sum(dktheta1*(-np.sqrt(3)*self.lengthscaleU**(-2)) * dL_dK)\r\n target[3] += np.sum(dktheta2*(-self.lengthscaleY**(-2)) * dL_dK)",
"def EstimateKFTimeStep(u1,y1,z0,Xxd,Xud,Yx,Yu,P0,Q,R):\n \n # estimate next step\n z1m = Xxd.dot(z0) + Xud.dot(u1)\n y1hat = Yx.dot(z1m) + Yu.dot(u1)\n P1m = (Xxd.dot(P0)).dot(Xxd.T) + Q\n \n # Calculate Kalman gain\n # same as Lk from [1] - And their Rtilde_k is G*P1m*G'+R\n Kk = np.dot(P1m,Yx.T).dot( np.linalg.inv(((Yx.dot(P1m)).dot(Yx.T) + R))) \n # update estimate with measurement\n z1 = z1m + Kk.dot(y1 - y1hat)\n \n P1 = (np.eye(Xxd.shape[0]) - Kk.dot(Yx) ).dot(P1m)\n return z1,P1,Kk",
"def dK_dtheta(self, dL_dK, X, X2, target):\r\n if X2 is None: X2 = X\r\n dist = np.abs(X - X2.T)\r\n\r\n ly=1/self.lengthscaleY\r\n lu=np.sqrt(3)/self.lengthscaleU\r\n #ly=self.lengthscaleY\r\n #lu=self.lengthscaleU\r\n\r\n dk1theta1 = np.exp(-ly*dist)*2*(-lu)/(lu+ly)**3\r\n #c=np.sqrt(3)\r\n #t1=c/lu\r\n #t2=1/ly\r\n #dk1theta1=np.exp(-dist*ly)*t2*( (2*c*t2+2*t1)/(c*t2+t1)**2 -2*(2*c*t2*t1+t1**2)/(c*t2+t1)**3 )\r\n \r\n dk2theta1 = 1*( \r\n np.exp(-lu*dist)*dist*(-ly+2*lu-lu*ly*dist+dist*lu**2)*(ly-lu)**(-2) + np.exp(-lu*dist)*(-2+ly*dist-2*dist*lu)*(ly-lu)**(-2) \r\n +np.exp(-dist*lu)*(ly-2*lu+ly*lu*dist-dist*lu**2)*2*(ly-lu)**(-3) \r\n +np.exp(-dist*ly)*2*(ly-lu)**(-2)\r\n +np.exp(-dist*ly)*2*(2*lu-ly)*(ly-lu)**(-3)\r\n )\r\n \r\n dk3theta1 = np.exp(-dist*lu)*(lu+ly)**(-2)*((2*lu+ly+dist*lu**2+lu*ly*dist)*(-dist-2/(lu+ly))+2+2*lu*dist+ly*dist)\r\n\r\n dktheta1 = self.varianceU*self.varianceY*(dk1theta1+dk2theta1+dk3theta1)\r\n\r\n\r\n\r\n\r\n dk1theta2 = np.exp(-ly*dist) * ((lu+ly)**(-2)) * ( (-dist)*(2*lu+ly) + 1 + (-2)*(2*lu+ly)/(lu+ly) )\r\n\r\n dk2theta2 = 1*(\r\n np.exp(-dist*lu)*(ly-lu)**(-2) * ( 1+lu*dist+(-2)*(ly-2*lu+lu*ly*dist-dist*lu**2)*(ly-lu)**(-1) )\r\n +np.exp(-dist*ly)*(ly-lu)**(-2) * ( (-dist)*(2*lu-ly) -1+(2*lu-ly)*(-2)*(ly-lu)**(-1) )\r\n )\r\n\r\n dk3theta2 = np.exp(-dist*lu) * (-3*lu-ly-dist*lu**2-lu*ly*dist)/(lu+ly)**3\r\n\r\n dktheta2 = self.varianceU*self.varianceY*(dk1theta2 + dk2theta2 +dk3theta2)\r\n\r\n\r\n\r\n k1 = np.exp(-ly*dist)*(2*lu+ly)/(lu+ly)**2\r\n k2 = (np.exp(-lu*dist)*(ly-2*lu+lu*ly*dist-lu**2*dist) + np.exp(-ly*dist)*(2*lu-ly) ) / (ly-lu)**2 \r\n k3 = np.exp(-lu*dist) * ( (1+lu*dist)/(lu+ly) + (lu)/(lu+ly)**2 )\r\n dkdvar = k1+k2+k3\r\n \r\n #target[0] dk dvarU\r\n #target[1] dk dvarY\r\n #target[2] dk d theta1\r\n #target[3] dk d theta2 \r\n target[0] += np.sum(self.varianceY*dkdvar * dL_dK)\r\n target[1] += np.sum(self.varianceU*dkdvar * dL_dK)\r\n target[2] += np.sum(dktheta1*(-np.sqrt(3)*self.lengthscaleU**(-2)) * dL_dK)\r\n target[3] += np.sum(dktheta2*(-self.lengthscaleY**(-2)) * dL_dK)",
"def derv(self, t, y):\n x = y[0];\n xc = y[1];\n n = y[2];\n\n Bhat = self.G * (1.0 - n) * self.alpha0(t) * (1 - 0.4 * x) * (1 - 0.4 * xc);\n\n dydt = np.zeros(3)\n\n dydt[0] = sp.pi / 12.0 * (xc + Bhat);\n dydt[1] = sp.pi / 12.0 * (self.mu * (xc - 4.0 / 3.0 * pow(xc, 3.0)) - x * (\n pow(24.0 / (0.99669 * self.taux), 2.0) + self.kparam * Bhat));\n dydt[2] = 60.0 * (self.alpha0(t) * (1.0 - n) - self.delta * n);\n\n return (dydt)",
"def dK_dtheta(self,dL_dK,X,X2,target):\r\n if X2 is None: X2 = X\r\n FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)\r\n FX2 = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X2)\r\n\r\n La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)), self.a[1]*self.basis_omega, self.a[2]*self.basis_omega**2, self.a[3]*self.basis_omega**3))\r\n Lo = np.column_stack((self.basis_omega, self.basis_omega, self.basis_omega, self.basis_omega))\r\n Lp = np.column_stack((self.basis_phi, self.basis_phi+np.pi/2, self.basis_phi+np.pi, self.basis_phi+np.pi*3/2))\r\n r,omega,phi = self._cos_factorization(La,Lo,Lp)\r\n Gint = self._int_computation( r,omega,phi, r,omega,phi)\r\n\r\n Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]\r\n F1lower = np.array(self._cos(self.basis_alpha*self.basis_omega,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n F2lower = np.array(self._cos(self.basis_alpha*self.basis_omega**2,self.basis_omega,self.basis_phi+np.pi)(self.lower))[:,None]\r\n\r\n #dK_dvar\r\n dK_dvar = 1./self.variance*mdot(FX,self.Gi,FX2.T)\r\n\r\n #dK_dlen\r\n da_dlen = [-3*self.a[0]/self.lengthscale, -2*self.a[1]/self.lengthscale, -self.a[2]/self.lengthscale, 0.]\r\n db_dlen = [0., 4*self.b[1]/self.lengthscale, 2*self.b[2]/self.lengthscale, 2*self.b[3]/self.lengthscale, 2*self.b[4]/self.lengthscale]\r\n dLa_dlen = np.column_stack((da_dlen[0]*np.ones((self.n_basis,1)), da_dlen[1]*self.basis_omega, da_dlen[2]*self.basis_omega**2, da_dlen[3]*self.basis_omega**3))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dlen,Lo,Lp)\r\n dGint_dlen = self._int_computation(r1,omega1,phi1, r,omega,phi)\r\n dGint_dlen = dGint_dlen + dGint_dlen.T\r\n dlower_terms_dlen = db_dlen[0]*np.dot(Flower,Flower.T) + db_dlen[1]*np.dot(F2lower,F2lower.T) + db_dlen[2]*np.dot(F1lower,F1lower.T) + db_dlen[3]*np.dot(F2lower,Flower.T) + db_dlen[4]*np.dot(Flower,F2lower.T)\r\n dG_dlen = 15*self.lengthscale**4/(400*np.sqrt(5))*Gint + 3*self.lengthscale**5/(400*np.sqrt(5))*dGint_dlen + dlower_terms_dlen\r\n dK_dlen = -mdot(FX,self.Gi,dG_dlen/self.variance,self.Gi,FX2.T)\r\n\r\n #dK_dper\r\n dFX_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X ,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X)\r\n dFX2_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X2,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X2)\r\n\r\n dLa_dper = np.column_stack((-self.a[0]*self.basis_omega/self.period, -self.a[1]*self.basis_omega**2/self.period, -self.a[2]*self.basis_omega**3/self.period, -self.a[3]*self.basis_omega**4/self.period))\r\n dLp_dper = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi,self.basis_phi+np.pi*3/2,self.basis_phi))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dper,Lo,dLp_dper)\r\n\r\n IPPprim1 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))\r\n IPPprim1 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi/2))\r\n IPPprim2 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + self.upper*np.cos(phi-phi1.T))\r\n IPPprim2 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + self.lower*np.cos(phi-phi1.T))\r\n #IPPprim2[0,0] = 2*(self.upper**2 - self.lower**2)*np.cos(phi[0,0])*np.cos(phi1[0,0])\r\n IPPprim = np.where(np.isnan(IPPprim1),IPPprim2,IPPprim1)\r\n\r\n IPPint1 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi)\r\n IPPint1 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi)\r\n IPPint2 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./2*self.upper**2*np.cos(phi-phi1.T)\r\n IPPint2 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./2*self.lower**2*np.cos(phi-phi1.T)\r\n #IPPint2[0,0] = (self.upper**2 - self.lower**2)*np.cos(phi[0,0])*np.cos(phi1[0,0])\r\n IPPint = np.where(np.isnan(IPPint1),IPPint2,IPPint1)\r\n\r\n dLa_dper2 = np.column_stack((-self.a[1]*self.basis_omega/self.period, -2*self.a[2]*self.basis_omega**2/self.period, -3*self.a[3]*self.basis_omega**3/self.period))\r\n dLp_dper2 = np.column_stack((self.basis_phi+np.pi/2, self.basis_phi+np.pi, self.basis_phi+np.pi*3/2))\r\n r2,omega2,phi2 = self._cos_factorization(dLa_dper2,Lo[:,0:2],dLp_dper2)\r\n\r\n dGint_dper = np.dot(r,r1.T)/2 * (IPPprim - IPPint) + self._int_computation(r2,omega2,phi2, r,omega,phi)\r\n dGint_dper = dGint_dper + dGint_dper.T\r\n\r\n dFlower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n dF1lower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega**2/self.period,self.basis_omega,self.basis_phi+np.pi)(self.lower)+self._cos(-self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n dF2lower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega**3/self.period,self.basis_omega,self.basis_phi+np.pi*3/2)(self.lower) + self._cos(-2*self.basis_alpha*self.basis_omega**2/self.period,self.basis_omega,self.basis_phi+np.pi)(self.lower))[:,None]\r\n\r\n dlower_terms_dper = self.b[0] * (np.dot(dFlower_dper,Flower.T) + np.dot(Flower.T,dFlower_dper))\r\n dlower_terms_dper += self.b[1] * (np.dot(dF2lower_dper,F2lower.T) + np.dot(F2lower,dF2lower_dper.T)) - 4*self.b[1]/self.period*np.dot(F2lower,F2lower.T)\r\n dlower_terms_dper += self.b[2] * (np.dot(dF1lower_dper,F1lower.T) + np.dot(F1lower,dF1lower_dper.T)) - 2*self.b[2]/self.period*np.dot(F1lower,F1lower.T)\r\n dlower_terms_dper += self.b[3] * (np.dot(dF2lower_dper,Flower.T) + np.dot(F2lower,dFlower_dper.T)) - 2*self.b[3]/self.period*np.dot(F2lower,Flower.T)\r\n dlower_terms_dper += self.b[4] * (np.dot(dFlower_dper,F2lower.T) + np.dot(Flower,dF2lower_dper.T)) - 2*self.b[4]/self.period*np.dot(Flower,F2lower.T)\r\n\r\n dG_dper = 1./self.variance*(3*self.lengthscale**5/(400*np.sqrt(5))*dGint_dper + 0.5*dlower_terms_dper)\r\n dK_dper = mdot(dFX_dper,self.Gi,FX2.T) - mdot(FX,self.Gi,dG_dper,self.Gi,FX2.T) + mdot(FX,self.Gi,dFX2_dper.T)\r\n\r\n # np.add(target[:,:,0],dK_dvar, target[:,:,0])\r\n target[0] += np.sum(dK_dvar*dL_dK)\r\n #np.add(target[:,:,1],dK_dlen, target[:,:,1])\r\n target[1] += np.sum(dK_dlen*dL_dK)\r\n #np.add(target[:,:,2],dK_dper, target[:,:,2])\r\n target[2] += np.sum(dK_dper*dL_dK)",
"def step_2(f: Callable[..., float], x: float, y: np.array, params: Tuple,\\\n h: float, k1: np.array) -> np.array:\n\n # Initialize the output vector.\n n = len(y)\n y_int = np.zeros(n)\n\n # Find dym/dx using the given function, then use it to compute dym-1/dx.\n y_int[0] = f(x + (h / 2), y + (k1 / 2), *params) * h\n\n # Starting with dym-1/dx, compute the other values down to y/dx.\n for i in range(1, n):\n y_int[i] = (y[n-i] + (k1[n-i] / 2)) * h\n\n # Reverse the output vector so y/dx is on top.\n y_int = np.flipud(y_int)\n\n return y_int",
"def _dK_ode_dtheta(self, target):\r\n t_ode = self._t[self._index>0]\r\n dL_dK_ode = self._dL_dK[self._index>0, :]\r\n index_ode = self._index[self._index>0]-1\r\n if self._t2 is None:\r\n if t_ode.size==0:\r\n return \r\n t2_ode = t_ode\r\n dL_dK_ode = dL_dK_ode[:, self._index>0]\r\n index2_ode = index_ode\r\n else:\r\n t2_ode = self._t2[self._index2>0]\r\n dL_dK_ode = dL_dK_ode[:, self._index2>0]\r\n if t_ode.size==0 or t2_ode.size==0:\r\n return\r\n index2_ode = self._index2[self._index2>0]-1\r\n\r\n h1 = self._compute_H(t_ode, index_ode, t2_ode, index2_ode, stationary=self.is_stationary, update_derivatives=True)\r\n #self._dK_ddelay = self._dh_ddelay\r\n self._dK_dsigma = self._dh_dsigma\r\n\r\n if self._t2 is None:\r\n h2 = h1\r\n else:\r\n h2 = self._compute_H(t2_ode, index2_ode, t_ode, index_ode, stationary=self.is_stationary, update_derivatives=True)\r\n\r\n #self._dK_ddelay += self._dh_ddelay.T\r\n self._dK_dsigma += self._dh_dsigma.T\r\n # C1 = self.sensitivity\r\n # C2 = self.sensitivity\r\n\r\n # K = 0.5 * (h1 + h2.T)\r\n # var2 = C1*C2\r\n # if self.is_normalized:\r\n # dk_dD1 = (sum(sum(dL_dK.*dh1_dD1)) + sum(sum(dL_dK.*dh2_dD1.T)))*0.5*var2\r\n # dk_dD2 = (sum(sum(dL_dK.*dh1_dD2)) + sum(sum(dL_dK.*dh2_dD2.T)))*0.5*var2\r\n # dk_dsigma = 0.5 * var2 * sum(sum(dL_dK.*dK_dsigma))\r\n # dk_dC1 = C2 * sum(sum(dL_dK.*K))\r\n # dk_dC2 = C1 * sum(sum(dL_dK.*K))\r\n # else:\r\n # K = np.sqrt(np.pi) * K\r\n # dk_dD1 = (sum(sum(dL_dK.*dh1_dD1)) + * sum(sum(dL_dK.*K))\r\n # dk_dC2 = self.sigma * C1 * sum(sum(dL_dK.*K))\r\n\r\n\r\n # dk_dSim1Variance = dk_dC1\r\n # Last element is the length scale.\r\n (dL_dK_ode[:, :, None]*self._dh_ddelay[:, None, :]).sum(2)\r\n\r\n target[-1] += (dL_dK_ode*self._dK_dsigma/np.sqrt(2)).sum()\r\n\r\n\r\n # # only pass the gradient with respect to the inverse width to one\r\n # # of the gradient vectors ... otherwise it is counted twice.\r\n # g1 = real([dk_dD1 dk_dinvWidth dk_dSim1Variance])\r\n # g2 = real([dk_dD2 0 dk_dSim2Variance])\r\n # return g1, g2\"\"\"\r",
"def rk4(f, y, t, dt):\n k1 = f(y, t)\n k2 = f(y + 0.5 * k1 * dt, t + 0.5 * dt)\n k3 = f(y + 0.5 * k2 * dt, t + 0.5 * dt)\n k4 = f(y + k3 * dt, t + dt)\n\n res = y + float(1) / 6 * dt * (k1 + 2 * k2 + 2 * k3 + k4)\n return res",
"def dK_dtheta(self,dL_dK,X,X2,target):\r\n if X2 is None: X2 = X\r\n FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)\r\n FX2 = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X2)\r\n\r\n La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)),self.a[1]*self.basis_omega,self.a[2]*self.basis_omega**2))\r\n Lo = np.column_stack((self.basis_omega,self.basis_omega,self.basis_omega))\r\n Lp = np.column_stack((self.basis_phi,self.basis_phi+np.pi/2,self.basis_phi+np.pi))\r\n r,omega,phi = self._cos_factorization(La,Lo,Lp)\r\n Gint = self._int_computation( r,omega,phi, r,omega,phi)\r\n\r\n Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]\r\n F1lower = np.array(self._cos(self.basis_alpha*self.basis_omega,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n\r\n #dK_dvar\r\n dK_dvar = 1./self.variance*mdot(FX,self.Gi,FX2.T)\r\n\r\n #dK_dlen\r\n da_dlen = [-6/self.lengthscale**3,-2*np.sqrt(3)/self.lengthscale**2,0.]\r\n db_dlen = [0.,2*self.lengthscale/3.]\r\n dLa_dlen = np.column_stack((da_dlen[0]*np.ones((self.n_basis,1)),da_dlen[1]*self.basis_omega,da_dlen[2]*self.basis_omega**2))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dlen,Lo,Lp)\r\n dGint_dlen = self._int_computation(r1,omega1,phi1, r,omega,phi)\r\n dGint_dlen = dGint_dlen + dGint_dlen.T\r\n dG_dlen = self.lengthscale**2/(4*np.sqrt(3))*Gint + self.lengthscale**3/(12*np.sqrt(3))*dGint_dlen + db_dlen[0]*np.dot(Flower,Flower.T) + db_dlen[1]*np.dot(F1lower,F1lower.T)\r\n dK_dlen = -mdot(FX,self.Gi,dG_dlen/self.variance,self.Gi,FX2.T)\r\n\r\n #dK_dper\r\n dFX_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X ,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X)\r\n dFX2_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X2,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X2)\r\n\r\n dLa_dper = np.column_stack((-self.a[0]*self.basis_omega/self.period, -self.a[1]*self.basis_omega**2/self.period, -self.a[2]*self.basis_omega**3/self.period))\r\n dLp_dper = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi,self.basis_phi+np.pi*3/2))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dper,Lo,dLp_dper)\r\n\r\n IPPprim1 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))\r\n IPPprim1 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi/2))\r\n IPPprim2 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + self.upper*np.cos(phi-phi1.T))\r\n IPPprim2 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + self.lower*np.cos(phi-phi1.T))\r\n #IPPprim2[0,0] = 2*(self.upper**2 - self.lower**2)*np.cos(phi[0,0])*np.cos(phi1[0,0])\r\n IPPprim = np.where(np.isnan(IPPprim1),IPPprim2,IPPprim1)\r\n\r\n IPPint1 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi)\r\n IPPint1 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi)\r\n IPPint2 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./2*self.upper**2*np.cos(phi-phi1.T)\r\n IPPint2 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./2*self.lower**2*np.cos(phi-phi1.T)\r\n #IPPint2[0,0] = (self.upper**2 - self.lower**2)*np.cos(phi[0,0])*np.cos(phi1[0,0])\r\n IPPint = np.where(np.isnan(IPPint1),IPPint2,IPPint1)\r\n\r\n dLa_dper2 = np.column_stack((-self.a[1]*self.basis_omega/self.period, -2*self.a[2]*self.basis_omega**2/self.period))\r\n dLp_dper2 = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi))\r\n r2,omega2,phi2 = self._cos_factorization(dLa_dper2,Lo[:,0:2],dLp_dper2)\r\n\r\n dGint_dper = np.dot(r,r1.T)/2 * (IPPprim - IPPint) + self._int_computation(r2,omega2,phi2, r,omega,phi)\r\n dGint_dper = dGint_dper + dGint_dper.T\r\n\r\n dFlower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n dF1lower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega**2/self.period,self.basis_omega,self.basis_phi+np.pi)(self.lower)+self._cos(-self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n\r\n dG_dper = 1./self.variance*(self.lengthscale**3/(12*np.sqrt(3))*dGint_dper + self.b[0]*(np.dot(dFlower_dper,Flower.T)+np.dot(Flower,dFlower_dper.T)) + self.b[1]*(np.dot(dF1lower_dper,F1lower.T)+np.dot(F1lower,dF1lower_dper.T)))\r\n\r\n dK_dper = mdot(dFX_dper,self.Gi,FX2.T) - mdot(FX,self.Gi,dG_dper,self.Gi,FX2.T) + mdot(FX,self.Gi,dFX2_dper.T)\r\n\r\n # np.add(target[:,:,0],dK_dvar, target[:,:,0])\r\n target[0] += np.sum(dK_dvar*dL_dK)\r\n #np.add(target[:,:,1],dK_dlen, target[:,:,1])\r\n target[1] += np.sum(dK_dlen*dL_dK)\r\n #np.add(target[:,:,2],dK_dper, target[:,:,2])\r\n target[2] += np.sum(dK_dper*dL_dK)",
"def dK_dtheta(self,dL_dK,X,X2,target):\r\n if X2 is None: X2 = X\r\n FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)\r\n FX2 = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X2)\r\n\r\n La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)),self.a[1]*self.basis_omega))\r\n Lo = np.column_stack((self.basis_omega,self.basis_omega))\r\n Lp = np.column_stack((self.basis_phi,self.basis_phi+np.pi/2))\r\n r,omega,phi = self._cos_factorization(La,Lo,Lp)\r\n Gint = self._int_computation( r,omega,phi, r,omega,phi)\r\n\r\n Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]\r\n\r\n #dK_dvar\r\n dK_dvar = 1./self.variance*mdot(FX,self.Gi,FX2.T)\r\n\r\n #dK_dlen\r\n da_dlen = [-1./self.lengthscale**2,0.]\r\n dLa_dlen = np.column_stack((da_dlen[0]*np.ones((self.n_basis,1)),da_dlen[1]*self.basis_omega))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dlen,Lo,Lp)\r\n dGint_dlen = self._int_computation(r1,omega1,phi1, r,omega,phi)\r\n dGint_dlen = dGint_dlen + dGint_dlen.T\r\n dG_dlen = 1./2*Gint + self.lengthscale/2*dGint_dlen\r\n dK_dlen = -mdot(FX,self.Gi,dG_dlen/self.variance,self.Gi,FX2.T)\r\n\r\n #dK_dper\r\n dFX_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X ,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X)\r\n dFX2_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X2,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X2)\r\n\r\n dLa_dper = np.column_stack((-self.a[0]*self.basis_omega/self.period, -self.a[1]*self.basis_omega**2/self.period))\r\n dLp_dper = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dper,Lo,dLp_dper)\r\n\r\n IPPprim1 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))\r\n IPPprim1 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi/2))\r\n IPPprim2 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + self.upper*np.cos(phi-phi1.T))\r\n IPPprim2 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + self.lower*np.cos(phi-phi1.T))\r\n #IPPprim2[0,0] = 2*(self.upper**2 - self.lower**2)*np.cos(phi[0,0])*np.cos(phi1[0,0])\r\n IPPprim = np.where(np.isnan(IPPprim1),IPPprim2,IPPprim1)\r\n\r\n IPPint1 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi)\r\n IPPint1 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi)\r\n IPPint2 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./2*self.upper**2*np.cos(phi-phi1.T)\r\n IPPint2 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./2*self.lower**2*np.cos(phi-phi1.T)\r\n #IPPint2[0,0] = (self.upper**2 - self.lower**2)*np.cos(phi[0,0])*np.cos(phi1[0,0])\r\n IPPint = np.where(np.isnan(IPPint1),IPPint2,IPPint1)\r\n\r\n dLa_dper2 = np.column_stack((-self.a[1]*self.basis_omega/self.period))\r\n dLp_dper2 = np.column_stack((self.basis_phi+np.pi/2))\r\n r2,omega2,phi2 = dLa_dper2.T,Lo[:,0:1],dLp_dper2.T\r\n\r\n dGint_dper = np.dot(r,r1.T)/2 * (IPPprim - IPPint) + self._int_computation(r2,omega2,phi2, r,omega,phi)\r\n dGint_dper = dGint_dper + dGint_dper.T\r\n\r\n dFlower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n\r\n dG_dper = 1./self.variance*(self.lengthscale/2*dGint_dper + self.b[0]*(np.dot(dFlower_dper,Flower.T)+np.dot(Flower,dFlower_dper.T)))\r\n\r\n dK_dper = mdot(dFX_dper,self.Gi,FX2.T) - mdot(FX,self.Gi,dG_dper,self.Gi,FX2.T) + mdot(FX,self.Gi,dFX2_dper.T)\r\n\r\n target[0] += np.sum(dK_dvar*dL_dK)\r\n target[1] += np.sum(dK_dlen*dL_dK)\r\n target[2] += np.sum(dK_dper*dL_dK)",
"def f(self, x , u , t = 0 ):\n y = x[0]\n dy = x[1]\n \n \n ddy = - y + self.mu * dy * ( 1 - y**2 )\n \n dx = np.zeros(self.n) # State derivative vector\n \n dx[0] = dy\n dx[1] = ddy\n \n return dx",
"def f(y):\n \n\n k = 1.0\n return y*(1-y)",
"def cost_derivative(self,output_results,y):\r\n\t\treturn (output_results-y)",
"def _derivativeTerm(self):\n\n\t\treturn self._Kd * (self._getErrorFunction() - self._previousError) / self._dt",
"def runge_kutta(self, y_n, t_n, delta):\n k1 = delta*self.y_prime(t_n, y_n)\n k2 = delta*self.y_prime(t_n+delta/2, y_n+k1/2)\n k3 = delta*self.y_prime(t_n+delta/2, y_n+k2/2)\n k4 = delta*self.y_prime(t_n+delta, y_n+k3)\n return y_n + (k1 + 2*(k2+k3) + k4)/6 #, t_n+delta",
"def test_tan_2ndord_2vars():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.tan(x/y)\n df_dxdy = lambda x, y: -(y/np.cos(x/y)**2 + 2*x*np.tan(x/y)/np.cos(x/y)**2) / y**3\n assert equals(f.derivative_at((x, x), {x: 1.5, y:2.5}, order=2),\n f.derivative_at( x, {x: 1.5, y:2.5}, order=2)) \n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n f.derivative_at((y, x), {x: 1.5, y:2.5}, order=2))\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n df_dxdy(1.5, 2.5))",
"def f( self , x , u , t ):\n \n dx = np.zeros(self.n) # State derivative vector\n \n ################################################\n # Place holder: put the equations of motion here\n raise NotImplementedError\n ################################################\n \n return dx",
"def MyGRRK3_step(f, t, qn, dt, r, e, w):\r\n assert((not np.any(np.isnan(t))) and np.all(np.isfinite(t)) and\r\n np.all(np.isreal(t))), \\\r\n \"t must be real, finite and not NaN\"\r\n assert((not np.any(np.isnan(dt))) and np.all(np.isfinite(dt)) and\r\n np.all(np.isreal(dt))), \\\r\n \"dt must be real, finite and not NaN\"\r\n assert(len(qn) == 2), \"qn must have length 2\"\r\n assert(hasattr(f, '__call__')), \\\r\n \"f must be a callable function\"\r\n assert((not np.any(np.isnan(r))) and np.all(np.isfinite(r)) and\r\n np.all(np.isreal(r))), \\\r\n \"r must be real, finite and not NaN\"\r\n assert((not np.any(np.isnan(e))) and np.all(np.isfinite(e)) and\r\n np.all(np.isreal(e))), \\\r\n \"e must be real, finite and not NaN\"\r\n assert((not np.any(np.isnan(w))) and np.all(np.isfinite(w)) and\r\n np.all(np.isreal(w))), \\\r\n \"w must be real, finite and not NaN\"\r\n\r\n def F(k0):\r\n \"\"\"\r\n Function defines the set of nonlinear equations describing k1 and k2\r\n of the third order explicit Runge-Kutta algorithm\r\n\r\n Parameters\r\n ----------\r\n\r\n k0 : vector\r\n intial guess for roots of the problem\r\n\r\n Returns\r\n -------\r\n\r\n f3 : vector\r\n set of nonlinear equations of k1 and k2\r\n \"\"\"\r\n assert((not np.any(np.isnan(k0))) and np.all(np.isfinite(k0)) and\r\n np.all(np.isreal(k0))),\\\r\n \"k0 must be real, finite and not NaN\"\r\n assert(len(k0) == 4), \"K must have length 4\"\r\n assert(hasattr(F, '__call__')), \\\r\n \"F must be a callable function\"\r\n k1 = np.array([k0[0], k0[1]])\r\n k2 = np.array([k0[2], k0[3]])\r\n f1 = k1 - np.array([f(t + dt / 3,\r\n qn + (dt / 12) * (5 * k1 - k2), r, e, w)])\r\n f2 = k2 - np.array([f(t + dt,\r\n qn + (dt / 4) * (3 * k1 + k2), r, e, w)])\r\n f3 = np.reshape(np.array([f1, f2]), (4,))\r\n return f3\r\n\r\n k0 = np.reshape(np.array([f(t + dt / 3, qn, r, e, w),\r\n f(t + dt, qn, r, e, w)]), (4,))\r\n k = fsolve(F, k0)\r\n k1 = np.array([k[0], k[1]])\r\n k2 = np.array([k[2], k[3]])\r\n qnpG1 = qn + (dt / 4) * (3 * k1 + k2)\r\n return qnpG1",
"def rk2(x,t,h,f):\n\n k1=h*f(x,t)\n k2=h*f(x+k1/2,t+h/2)\n\n return x + k2",
"def two_pt_deriv(tseries):\n dy = tseries.diff(2).shift(-1)\n dx = pd.Series(tseries.index).diff(2).shift(-1)\n return dy.apply(lambda x: x.values / dx.values, axis=0)",
"def f(t,y):\n return np.array([lam*y[0] + (1.0-lam)*np.cos(t) - (1.0+lam)*np.sin(t)])",
"def van_der_pol_oscillator_deriv(x, t):\n \n x1 = x[1]\n y = -nu * (x[0] ** 2.0 - 1.0) * x[1] - x[0]\n result = np.array([x1, y])\n return result"
] | [
"0.6522133",
"0.64265984",
"0.6275279",
"0.62596136",
"0.62343985",
"0.61961025",
"0.61825293",
"0.61632925",
"0.6147124",
"0.6130278",
"0.6122552",
"0.6110482",
"0.61021954",
"0.6073846",
"0.60545695",
"0.6047062",
"0.6024261",
"0.6020555",
"0.6002816",
"0.59823495",
"0.5972657",
"0.59481657",
"0.59444326",
"0.5939445",
"0.5848792",
"0.5843851",
"0.5835104",
"0.5801529",
"0.57899004",
"0.5784669"
] | 0.66746724 | 0 |
x = list of x values y = list of y values Returns integral of y over x. Assumes full lists / ran post simulation | def trapezoidalPost(x,y):
integral = 0
for ndx in range(1,len(x)):
integral+= (y[ndx]+y[ndx-1])/2 * (x[ndx]-x[ndx-1])
return integral | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def integrate(x, y, xmin, xmax):\n indexes = get_interval(x, xmin, xmax)\n integral = np.trapz(y[indexes], x[indexes])\n\n return integral",
"def integral(requestContext, seriesList):\n results = []\n for series in seriesList:\n newValues = []\n current = 0.0\n for val in series:\n if val is None:\n newValues.append(None)\n else:\n current += val\n newValues.append(current)\n newName = \"integral(%s)\" % series.name\n newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)\n newSeries.pathExpression = newName\n results.append(newSeries)\n return results",
"def integral (self):\n dx = self.xbins[1] - self.xbins[0]\n dy = self.ybins[1] - self.ybins[0]\n return self.sum * (dx * dy)",
"def calc(x_list):\n\n y_list = [x**2 + 2*x + 1 for x in x_list]\n\n return y_list",
"def solve_integral(integrand, y):\n solnarr = np.empty(len(y))\n for i in range(len(y)):\n yy = y[i]\n soln = quad(integrand, 0, np.inf, args=(yy))\n solnarr[i] = soln[0]\n return solnarr",
"def integral( self ):\n integral = 0.0\n for i in range( 1, self.GetN() ):\n previousPoint = ( ROOT.Double(), ROOT.Double() )\n thisPoint = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i-1, previousPoint[0], previousPoint[1] )\n self.GetPoint( i, thisPoint[0], thisPoint[1] )\n \n integral += (thisPoint[0]-previousPoint[0]) * (thisPoint[1]+previousPoint[1])/2.0\n return integral",
"def integralFunction(xa, ya, xb, yb):\n return psi(xb, yb) - psi(xa, ya)",
"def piecewise_integrate(x, y, a, b):\n assert x[0] == a\n assert x[-1] <= b\n output = 0.\n num_x = len(x)\n if x[-1] == b:\n for idx in range(num_x - 1):\n output += y[idx] * (x[idx+1] - x[idx])\n else:\n for idx in range(num_x):\n if idx < num_x - 1:\n output += y[idx] * (x[idx+1] - x[idx])\n else:\n output += y[idx] * (b - x[idx])\n return output",
"def integrate(self, x0, t0, tend, N=100):\n h = np.double(tend-t0)/N\n t = np.zeros((N+1,1)); t[0]=t0\n x = x0.copy(); y = [x0.copy()]\n for i in xrange(N):\n g = self.rhs.Applyg(x) # evaluate vector g(x)\n A = lambda v: self.rhs.ApplyDf(x,v) # ----------- TODO: test this after implementing procedural A*x support\n x = self.matexp(A,x,h) + h*self.phi1(A,g,h)\n y.append(x)\n t[i+1] = t[i]+h\n return t,np.array(y)",
"def integrate(self, t):",
"def trapezoid_integral(f, xrange, intervals):\n \n a, b = min(xrange), max(xrange)\n delta_x = (b-a)/intervals\n x = np.arange(1, intervals)\n \n int_out = f(a)\n int_out += f(b)\n int_out += sum(2*f(a+x*delta_x))\n \n return delta_x/2*int_out",
"def integrate_range(self, lower, upper):\n if upper>self.upper:\n upper=self.upper\n if lower<self.lower:\n lower = self.lower\n\n i_l = int(np.floor((lower-self.lower)/self._dx))\n i_u = int(np.floor((upper-self.lower)/self._dx))\n #print \"i_l \",i_l,\" i_u \",i_u\n total = 0.0\n for i in range(i_l,i_u):\n total+= self.y[i]*self._dx\n return total",
"def sp_integrate_1D ( func , xmin , xmax , *args , **kwargs ) : \n from scipy import integrate\n ##\n result = integrate.quad ( func , xmin , xmax , *args , **kwargs )\n return result[0]",
"def integrate(u, j, h):\n y = u[j]\n return (h/2.)*((y[0]+y[-1]) + 2*sum(y[1:-1]))",
"def integral ( self, xmin , xmax , ymin , ymax , nevents = True ) :\n if self.xminmax() :\n xmn , xmx = self.xminmax()\n xmin = max ( xmin , xmn )\n xmax = min ( xmax , xmx )\n\n if self.yminmax() : \n ymn , ymx = self.yminmax() \n ymin = max ( ymin , ymn )\n ymax = min ( ymax , ymx )\n\n value , todo = 0 , True \n \n ## 1) make a try to use analytical integral (could be fast)\n if self.tricks :\n try:\n if hasattr ( self.pdf , 'setPars' ) : self.pdf.setPars() \n fun = self.pdf.function()\n value , todo = fun.integral ( xmin , xmax , ymin , ymax ) , False \n except:\n pass\n\n ## use numerical integration \n from ostap.math.integral import integral2 as _integral2\n\n extended = self.pdf.canBeExtended() or isinstance ( self.pdf , ROOT.RooAddPdf )\n\n if todo and extended : value = _integral2 ( self , xmin , xmax , ymin , ymax )\n elif todo :\n \n ## use unormalized PDF here to speed up the integration \n ifun = lambda x, y : self ( x , y , error = False , normalized = False )\n value = _integral2 ( ifun , xmin , xmax , ymin , ymax )\n norm = self.pdf.getNorm ( self.vars )\n value /= norm\n\n if nevents and self.pdf.mustBeExtended () :\n evts = self.pdf.expectedEvents( self.vars )\n if evts <= 0 or iszero ( evts ) :\n self.warning ( \"integral: expectedEvents is %s\" % evts )\n value *= evts \n\n return value",
"def integrate(self, *args, **kws):\n gap_handling = kws.get('gap_handling', 'error')\n wbin = Spectrum._groom_integration_ranges(*args)\n\n # check for gap overlap\n # sometimes small numerical errors cause trouble, so compare to size\n # of pixels\n if self.any_gap_overalp(wbin):\n if gap_handling == 'error':\n raise ValueError('Some of the integration ranges cover gaps '\n 'in the spectrum.')\n elif gap_handling == 'zero':\n pass\n else:\n raise ValueError('gap_handling parameter not recognized')\n\n wunit = wbin.unit\n Fs, Es = [], []\n for spec in self.spectra:\n wrange = spec.wbins[[0, -1]].to(wunit).value\n xbins = utils.rangeset_intersect(wbin.value, [wrange])\n if len(xbins) > 0:\n F, E = spec.integrate(xbins*wunit)\n else:\n Funit = spec.y.unit*wunit\n F = 0*Funit\n E = None if spec.e is None else 0*Funit\n Fs.append(F); Es.append(E)\n\n F = sum(Fs)\n if any(E is None for E in Es):\n E = None\n else:\n E = _np.sqrt(sum([E**2 for E in Es]))\n return F, E",
"def _evaluate(self, x, y):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n f = (1 - alpha) * self.xInterpolators[y_pos - 1](\n x\n ) + alpha * self.xInterpolators[y_pos](x)\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n f = np.zeros(m) + np.nan\n if y.size > 0:\n for i in range(1, self.y_n):\n c = y_pos == i\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n f[c] = (1 - alpha) * self.xInterpolators[i - 1](\n x[c]\n ) + alpha * self.xInterpolators[i](x[c])\n return f",
"def Integrar(Matriz, x, y, z):\n i = (integrate.simps(integrate.simps(integrate.simps(Matriz, x=z), x=y),x=x))\n return i",
"def rectangular_integral(f, xrange, intervals):\n int_out = 0\n delta_x = (max(xrange)-min(xrange))/intervals\n new_xrange = np.linspace(min(xrange), max(xrange), intervals)\n for x in new_xrange:\n int_out += f(x)\n return delta_x*int_out",
"def eval(self, x):\n self.__check_input__(x)\n return 418.9829 * self.dim - sum([y * np.sin(np.sqrt(abs(y))) for y in x])",
"def integral (self):\n return np.sum (self.values * (np.diff (self.bins)))",
"def sp_integrate_2D ( func ,\n xmin , xmax ,\n ymin , ymax , *args , **kwargs ) :\n from scipy import integrate\n ##\n result = integrate.dblquad ( func ,\n ymin ,\n ymax ,\n lambda x : xmin ,\n lambda x : xmax , \n *args , **kwargs )\n return result[0]",
"def CalcIntegral(self, Weights, Index):\n \n \n if self.ValueType=='single':\n valm=singleData(0.0)\n valv=singleData(0.0)\n elif self.ValueType == 'list':\n valm=listData(0.0,np.zeros(self.Results_ListLen,float))\n valv=listData(0.0,np.zeros(self.Results_ListLen,float))\n \n for pos,p in enumerate(Index):\n #print 'int', valm.val, self.Values[p].val\n valm += Weights[pos] * self.Values[p]\n valv += Weights[pos] * self.Values[p] * self.Values[p]\n \n return [valm,valv]",
"def compute_integrals(self):\n\n integrals = self.dat.data.reshape(-1, self.nvdofs).T.dot(self.cell_volumes)\n integrals_lift = Function(self._function_space)\n integrals_lift.dat.data[:] = np.tile(integrals,(self.nhdofs,))\n\n return integrals_lift",
"def lin_int(xs, ys):\n return scipy.interpolate.interp1d(xs, ys)",
"def integrate(self, residuals, y0, ydot0, t_eval, events=None):\n raise NotImplementedError",
"def integrateCartesian(self,xl,xu,yl,yu,**kwargs):\n import scipy.integrate as itg\n\n oldcoordsys = self.incoordsys\n try:\n self.incoordsys = 'cartesian'\n f = lambda y,x:self((x,y))\n\n res = itg.dblquad(f,xl,xu,lambda y:yl,lambda y:yu,**kwargs)\n finally:\n self.incoordsys = oldcoordsys\n\n self.lastintegrate = res\n return res[0]",
"def integrate(self, x, dx):\n raise NotImplementedError(\"Not implemented yet.\")",
"def parse_file_and_compute_integrals(file_path):\r\n\twith open(file_path,\"r\") as input_file:\r\n\r\n\t\t# intialize batch counter and \"current\" lists\r\n\t\tbatch_counter = 1\r\n\t\tz_current = [] # z_current is the set of z^2 values that constitute a single batch\r\n\t\tt_current = [] # t_current is the set of t values that constitute a single batch\r\n\t\tintegral_list = []\r\n\r\n\t\t# iterate through file\r\n\t\tfor line in input_file:\r\n\t\t\t\r\n\t\t\tline = line.strip()\r\n\t\t\tif line and line not in SKIP_LINE_LIST:\r\n\t\t\t\t\t# extract t,x,y,z from the line\r\n\t\t\t\t\tline_list = line.split(\" \")\r\n\t\t\t\t\tt = float(line_list[1])\r\n\t\t\t\t\tx = float(line_list[2])\r\n\t\t\t\t\ty = float(line_list[3])\r\n\t\t\t\t\tz = float(line_list[4])\r\n\r\n\t\t\t\t\tif t>FROM_SECOND:\r\n\t\t\t\t\t\tz_current.append(z*z)\r\n\t\t\t\t\t\tt_current.append(t)\r\n\r\n\t\t\t\t\tif t>(FROM_SECOND + batch_counter*BATCH_TIME_PERIOD_IN_SECONDS):\r\n\t\t\t\t\t\tintegral_value = np.trapz(z_current,t_current,axis=-1)\r\n\t\t\t\t\t\tintegral_list.append(integral_value)\r\n\t\t\t\t\t\tbatch_counter = batch_counter + 1\r\n\t\t\t\t\t\tz_current = []\r\n\t\t\t\t\t\tt_current = []\r\n\r\n\t\t\t\t\tif t>TO_SECOND:\r\n\t\t\t\t\t\tbreak\r\n\r\n\t\tintegral_value = np.trapz(z_current,t_current,axis=-1)\r\n\t\tintegral_list.append(integral_value)\r\n\r\n\t\treturn integral_list",
"def _evalPoly(self,a,x):\n y = a[0]\n for i in range(1,len(a)):\n y = self.F.Multiply(y, x)\n y = self.F.Add(y, a[i])\n return y"
] | [
"0.6894297",
"0.6820419",
"0.66159004",
"0.65011495",
"0.646472",
"0.63715506",
"0.636244",
"0.63209325",
"0.6220988",
"0.614587",
"0.6096252",
"0.60424936",
"0.59458",
"0.59325993",
"0.59302056",
"0.5927584",
"0.5856488",
"0.58477163",
"0.5845826",
"0.58155376",
"0.57960236",
"0.5786537",
"0.5785088",
"0.57801783",
"0.5778135",
"0.5776316",
"0.5772409",
"0.57408863",
"0.572258",
"0.57198346"
] | 0.6857058 | 1 |
Filter out url extracted locations with desired country | def locationFilter(locList, filterList, set_Country): # filter out location out of certain region
print('start filter location')
filteredLoc = []
for loc in locList:
print(loc[0])
if len(loc) > 0:
for l in loc[1]:
country = l.split(',')[2]
if set_Country in country and not any(e in l for e in filterList):
print(l)
filteredLoc.append((loc[0], l))
return filteredLoc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def locFromText(set_Country, textList, filterList):\n loc = []\n print('Start extracting locations from texts')\n for t in textList:\n # print(row)\n text = t[1]\n if len(text) > 0:\n text = re.sub(r'[^\\w]', ' ', text) # remove symbol\n\n places = geograpy.get_place_context(text=text)\n addStr = places.address_strings\n for add in addStr:\n country = add.split(',')[2] # get country name from extracted address_strings\n # print(country)\n if set_Country in country and not any(e in add for e in filterList):\n # print('City:', add)\n loc.append((t[0], add))\n return loc",
"def test_get_country_by_geo_location(self):\n pass",
"def get_locations_by_country(df, country):\n locations = list(df[df.country == country].location.values)\n return locations",
"def target_extract(path, country, lat_col, lon_col, crs='EPSG:4326'):\n # Read input from path\n df = pd.read_table(path, sep=None, engine='python')\n\n # Create GeoDataFrame with geometry\n gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(\n df[lon_col], df[lat_col]), crs=crs)\n\n # Get and read the country boundaries\n world = gpd.read_file(shpreader.natural_earth(resolution='10m',\n category='cultural',\n name='admin_0_countries')\n )\n\n country_geom = world[world['ADMIN'] == country.capitalize()].geometry\n country_geom.crs = 'EPSG:4326'\n\n # Clip to records within country\n subset = gpd.clip(gdf, country_geom).reset_index(drop=True)\n # subset = gdf.cx[country_geom]\n\n return subset",
"def getCountry(soup):\n title_details = self.getAdditionalDetails(soup)\n pattern = r'country_of_origin.*?>(.*?)<'\n country = re.findall(pattern, str(title_details))\n return country",
"def get_all_country_urls(self, proxies=None):\n html_rsp = self._get_url_wrapper('https://socialblade.com/youtube/top/100', proxies=proxies)\n if not html_rsp:\n return False\n country_id_list = self._extract_country_ids(html_rsp)\n url_list = list()\n for country_id in country_id_list:\n url_list.append('https://socialblade.com/youtube/top/country/' + country_id)\n return url_list",
"def get_all_locations(self):",
"def _feature_country_process(self):\n if 'Country' not in self._df_invoice_line.columns:\n return\n\n list_countries_keep = ['United Kingdom']\n rows_before = self._df_invoice_line.shape[0]\n \n df_invoice_line_new = pd.DataFrame()\n for country in list_countries_keep : \n df_invoice_line_new = df_invoice_line_new.append(\\\n self._df_invoice_line[self._df_invoice_line['Country']==country]\\\n , ignore_index=True)\n\n self.df_invoice_line = df_invoice_line_new\n del(df_invoice_line_new)\n \n rows_after = self._df_invoice_line.shape[0] \n _print_stat_rows(\"Countries filtering : \",rows_before, rows_after)\n\n \n #-------------------------------------------------------------------------\n # Due to the fact only one country is used, then this feature is dropped\n #-------------------------------------------------------------------------\n list_col_to_keep = [col for col in self._df_invoice_line.columns \\\n if col not in 'Country']\n \n self._df_invoice_line = self._df_invoice_line[list_col_to_keep] \n\n return",
"def filter_city(input_city: str) -> str:\n # input_city = string.capwords(input_city.lower())\n result = filterString(input_city).cities\n return result",
"def filter_plants_by_region_id(region_id, year, host='localhost', area=0.5):\n\n state_dict = {\n 'Alabama':'AL',\n 'Alaska':'AK',\n 'Arizona':'AZ',\n 'Arkansas':'AR',\n 'California':'CA',\n 'Colorado':'CO',\n 'Connecticut':'CT',\n 'Delaware':'DE',\n 'Florida':'FL',\n 'Georgia':'GA',\n 'Hawaii':'HI',\n 'Idaho':'ID',\n 'Illinois':'IL',\n 'Indiana':'IN',\n 'Iowa':'IA',\n 'Kansas':'KS',\n 'Kentucky':'KY',\n 'Louisiana':'LA',\n 'Maine':'ME',\n 'Maryland':'MD',\n 'Massachusetts':'MA',\n 'Michigan':'MI',\n 'Minnesota':'MN',\n 'Mississippi':'MS',\n 'Missouri':'MO',\n 'Montana':'MT',\n 'Nebraska':'NE',\n 'Nevada':'NV',\n 'New Hampshire':'NH',\n 'New Jersey':'NJ',\n 'New Mexico':'NM',\n 'New York':'NY',\n 'North Carolina':'NC',\n 'North Dakota':'ND',\n 'Ohio':'OH',\n 'Oklahoma':'OK',\n 'Oregon':'OR',\n 'Pennsylvania':'PA',\n 'Rhode Island':'RI',\n 'South Carolina':'SC',\n 'South Dakota':'SD',\n 'Tennessee':'TN',\n 'Texas':'TX',\n 'Utah':'UT',\n 'Vermont':'VT',\n 'Virginia':'VA',\n 'Washington':'WA',\n 'West Virginia':'WV',\n 'Wisconsin':'WI',\n 'Wyoming':'WY'\n }\n\n print \"Getting region name from database...\"\n query = \"SELECT regionabr FROM ventyx_nerc_reg_region WHERE gid={}\".format(\n region_id)\n region_name = connect_to_db_and_run_query(query=query,\n database='switch_gis', host=host)['regionabr'][0]\n counties_path = os.path.join('other_data', '{}_counties.tab'.format(region_name))\n \n if not os.path.exists(counties_path):\n # assign county if (area)% or more of its area falls in the region\n query = \"SELECT name, state\\\n FROM ventyx_nerc_reg_region regions CROSS JOIN us_counties cts\\\n JOIN (SELECT DISTINCT state, state_fips FROM us_states) sts \\\n ON (sts.state_fips=cts.statefp) \\\n WHERE regions.gid={} AND\\\n ST_Area(ST_Intersection(cts.the_geom, regions.the_geom))/\\\n ST_Area(cts.the_geom)>={}\".format(region_id, area)\n print \"\\nGetting counties and states for the region from database...\"\n region_counties = pd.DataFrame(connect_to_db_and_run_query(query=query,\n database='switch_gis', host=host)).rename(columns={'name':'County','state':'State'})\n region_counties.replace(state_dict, inplace=True)\n region_counties.to_csv(counties_path, sep='\\t', index=False)\n else:\n print \"Reading counties from .tab file...\"\n region_counties = pd.read_csv(counties_path, sep='\\t', index_col=None)\n\n generators = pd.read_csv(\n os.path.join('processed_data','generation_projects_{}.tab'.format(year)), sep='\\t')\n generators.loc[:,'County'] = generators['County'].map(lambda c: str(c).title())\n\n print \"\\nRead in data for {} generators, of which:\".format(len(generators))\n print \"--{} are existing\".format(len(generators[generators['Operational Status']=='Operable']))\n print \"--{} are proposed\".format(len(generators[generators['Operational Status']=='Proposed']))\n\n generators_with_assigned_region = generators.loc[generators['Nerc Region'] == region_name]\n generators = generators[generators['Nerc Region'].isnull()]\n generators_without_assigned_region = pd.merge(generators, region_counties, how='inner', on=['County','State'])\n generators = pd.concat([\n generators_with_assigned_region,\n generators_without_assigned_region],\n axis=0)\n generators.replace(\n to_replace={'Energy Source':coal_codes, 'Energy Source 2':coal_codes,\n 'Energy Source 3':coal_codes}, value='COAL', inplace=True)\n generators_columns = list(generators.columns)\n\n existing_gens = generators[generators['Operational Status']=='Operable']\n proposed_gens = generators[generators['Operational Status']=='Proposed']\n\n print \"=======\"\n print \"Filtered to {} projects in the {} region, of which:\".format(\n len(generators), region_name)\n print \"--{} are existing with {:.0f} GW of capacity\".format(\n len(existing_gens), existing_gens['Nameplate Capacity (MW)'].sum()/1000.0)\n print \"--{} are proposed with {:.0f} GW of capacity\".format(\n len(proposed_gens), proposed_gens['Nameplate Capacity (MW)'].sum()/1000.0)\n print \"=======\"\n\n return generators",
"def _extract_country_ids(html_rsp):\n soup = BeautifulSoup(html_rsp, 'html.parser')\n country_id_list = list()\n for option in soup.find_all('option'):\n c_id = option.get('value')\n # Only country ids have len 2 on this website. Filter out non values to avoid errors.\n if c_id is not None and len(c_id) == 2 and c_id not in country_id_list:\n country_id_list.append(c_id)\n return country_id_list",
"def findCountryCode(self):\n RecordsWithCountry = []\n for state in pycountry.subdivisions:\n #print(state.name)\n for record in self.Records: \n if state.name == record.state:\n #print(state.country, record.state)\n r = RecordCountry(date=record.date,\n country=state.country.alpha_3,\n impressions=record.impressions,\n CTR=record.CTR)\n self.Records.remove(record)\n RecordsWithCountry.append(r)\n for record in self.Records: \n r = RecordCountry(date=record.date,\n country=\"XXX\",\n impressions=record.impressions,\n CTR=record.CTR)\n RecordsWithCountry.append(r)\n self.Records = RecordsWithCountry",
"def scrape_callback(url, html):\r\n fields = ('area', 'population', 'iso', 'country', 'capital',\r\n 'continent', 'tld', 'currency_code', 'currency_name',\r\n 'phone', 'postal_code_format', 'postal_code_regex',\r\n 'languages', 'neighbours')\r\n if re.search('/view/', url):\r\n tree = fromstring(html)\r\n all_rows = [\r\n tree.xpath('//tr[@id=\"places_%s__row\"]/td[@class=\"w2p_fw\"]' % field)[0].text_content()\r\n for field in fields]\r\n print(url, all_rows)",
"def queryset(self, request, queryset):\n if self.value() is None:\n return queryset\n return queryset.filter(data__qg_location__0__country__icontains=self.value())",
"def filter_external(self, queryset, name, value):\n\n if str2bool(value):\n return queryset.filter(location__external=True)\n else:\n return queryset.exclude(location__external=True)",
"def filter_region_graph(data, region):\r\n MetaDct = data[1]\r\n f_MetaDct = {}\r\n for idx in MetaDct:\r\n if idx != ',':\r\n if MetaDct[idx].region == region:\r\n f_MetaDct[idx] = MetaDct[idx].country\r\n return f_MetaDct",
"def process(source):\n file = urlopen(source, context=ssl_context)\n tree = ElementTree.parse(file)\n\n for item in tree.getroot():\n location = item.find(TAGS['LOCATION'])\n if location is not None:\n print(location.text)\n\n file.close()",
"def _filter_return_url(self, url, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n if url in entry[\"request\"][\"url\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append(temp)\r\n return matches",
"def country_flag(country):\n\tif not country:\n\t\treturn u''\n\tresult = Country.objects.filter(name__icontains=country)\n\tif result:\n\t\tc = result[0]\n\t\tiso = c.iso\n\t\tflag_location = iso_flag(iso)\n\t\treturn flag_location\n\treturn u''",
"def test_get_country_states(self):\n pass",
"def get_country(user, session, flag_pattern):\r\n page = \"https://www.fanfiction.net/u/\" + str(user)\r\n country = \"\"\r\n with closing(session.get(page, timeout=10.0, stream=True)) as r:\r\n lines = 0;\r\n for rline in r.iter_lines(chunk_size=10):\r\n lines += 1\r\n rstr = repr(rline)\r\n if rstr.find('Joined <sp') > 0:\r\n match = re.search(flag_pattern, rstr)\r\n if match:\r\n country = match.group(1)\r\n break\r\n if lines > 600:\r\n break\r\n return country",
"def country(alpha_2_code: str) -> None:",
"def extract_listing_location_from_result(soup, location):\r\n for div in soup.find_all(name='div', class_='pdate'):\r\n for city in div.find(name='span'):\r\n location.append(city)\r\n # print(locations)\r\n return location",
"def all_matched_searches(affiliations, de_facto_affiliations):\n geolocator = Nominatim()\n backup_geolocator = Google(\"AIzaSyCc3U_YDbluAh_Eja8Zc4e4PX04ndyDXgE\")\n iso_3166_1 = pd.read_csv(os.path.abspath(os.path.join(__file__, os.pardir,\n \"ISO_3166_1.csv\")), na_filter=False)\n iso_3166_2_us = pd.read_csv(os.path.abspath(os.path.join(__file__,\n os.pardir, \"ISO_3166_2_US.csv\")), na_filter=False)\n iso_dict = {**{country['Alpha-2 code']: [country[\n 'English short name (upper/lower case)'], country[\n 'Alpha-2 code'], country['Alpha-3 code']] for country in\n iso_3166_1.to_dict(orient='records')}, **{state['Code']: [\n state[\"Subdivision name\"], state['Code'], state['Code']] for\n state in iso_3166_2_us.to_dict(orient='records')}, 'unknown': [\n 'unknown'] * 3}\n countries = {**{country['Alpha-2 code']: country['Alpha-2 code'] for\n country in iso_3166_1.to_dict(orient='records')}, **{country[\n 'Alpha-3 code']: country['Alpha-2 code'] for country in\n iso_3166_1.to_dict(orient='records')}, **{country[\n 'English short name (upper/lower case)']: country[\n 'Alpha-2 code'] for country in iso_3166_1.to_dict(orient=\n 'records')}, **{state['Code']: state['Code'] for state in\n iso_3166_2_us.to_dict(orient='records')}, **{state[\n 'Subdivision name']: state['Code'] for state in\n iso_3166_2_us.to_dict(orient='records')}, 'unknown': 'unknown',\n '?': 'unknown', 'Taiwan': 'TW', \"PRC\": \"CN\", \"PR China\": \"CN\",\n \"UK\": \"GB\", \"United Kingdom\": \"GB\", \"Vietnam\": \"VN\",\n \"South Korea\": \"KR\", \"Macedonia\": \"MK\",\n \"Macedonia (FYROM)\": \"MK\", \"Iran (Islamic Republic of)\": \"IR\"}\n us = {'US', 'USA', 'United States', 'U.S.A', \"United States of America\"}\n us_states = {state['Subdivision name']: state['Code'] for state in\n iso_3166_2_us.to_dict(orient='records')}\n usa_states = dict()\n for state in us_states:\n usa_states[countries[state]] = countries[state]\n usa_states[countries[state][-2:]] = countries[state]\n if state not in countries:\n countries[state] = us_states[state]\n us_states = {**us_states, **usa_states}\n del usa_states\n country_count = {country: 0 for country in iso_dict}\n for k, v in affiliations.items():\n time.sleep(1)\n if \"country\" not in affiliations[k]:\n address_components = None\n while not address_components:\n time.sleep(1)\n try:\n address_components = [x.strip() for x in\n geolocator.reverse(k, language=\n 'en').address.split(',')]\n except GeocoderServiceError as g:\n try:\n address_components = list({com_g.strip() for com_g in [\n com_i for com_h in [com[0].split(\n ',') for com in\n backup_geolocator.reverse(k,\n language='en')] for com_i in com_h\n ]})\n except:\n print(colored(g, 'yellow'))\n next\n if bool([u for u in us if u in address_components]):\n local_states = [state for state in us_states if state in\n address_components]\n if bool(local_states):\n for state in local_states :\n affiliations[k][\"country\"] = us_states[state]\n country_count[affiliations[k][\"country\"]] = \\\n country_count[\n affiliations[\n k][\n \"country\"]\n ] + 1\n else:\n for country in countries:\n if \"country\" not in affiliations[k]:\n if country != 'United States of America' and country \\\n in address_components:\n affiliations[k][\"country\"] = countries[country]\n country_count[affiliations[k][\"country\"]] = \\\n country_count[\n affiliations[\n k][\n \"country\"]\n ] + 1\n if \"country\" not in affiliations[k]:\n country = input(colored(\"{}\\n{}? \".format(str(\n address_components), str(affiliations[k][\n \"affiliations\"])), 'magenta'))\n if len(country):\n affiliations[k][\"country\"] = countries[country]\n country_count[affiliations[k][\"country\"]] = country_count[\n affiliations[\n k][\"country\"]]\\\n + 1\n if \"country\" in affiliations[k]:\n print(\"{}: {}\".format(iso_dict[affiliations[k][\"country\"]][0], str(\n address_components)))\n save_heatmap_data(affiliations)\n return(affiliations, country_count)",
"def test_single_word_exeter(self):\n result = location.lookup_location('Exeter GB')\n\n self.assertEqual(result['country'], 'GB')",
"def country(name):\n return location_db().find(name=name)[\"country\"]",
"def test_get_countries(self):\n pass",
"def _extract_locs_ip_entities(ip_entities: Iterable[IpAddress]):\n if isinstance(ip_entities[0], list): # type: ignore\n return [\n ip[0][\"Location\"] # type: ignore\n for ip in ip_entities\n if bool(ip[0].Location) # type: ignore\n ]\n return [ip[\"Location\"] for ip in ip_entities if bool(ip.Location)]",
"def get_countriesdata(url, downloader, with_world=True):\n headers, iterator = downloader.get_tabular_rows(url, dict_form=True)\n countriesdata = dict()\n for row in iterator:\n countryiso3 = row[\"REF_AREA\"]\n countriesdata[countryiso3] = countriesdata.get(countryiso3, []) + [row]\n if with_world:\n countriesdata[WORLD] = countriesdata.get(WORLD, []) + [row]\n\n return countriesdata, headers",
"def clean_location(df):\n \n local = df['location'].astype(str)\n \n #geocoders read X St at Y St better than X & Y or X/Y\n local = local.str.replace(\"&\", \"at\")\n local = local.str.replace(\"/\", \"at\")\n \n #OpenAddress dataset has addresses in title case\n local = local.str.title()\n\n return df.assign(location=local.values)"
] | [
"0.64201874",
"0.61067545",
"0.57039845",
"0.56817716",
"0.561799",
"0.5469821",
"0.5450577",
"0.5419892",
"0.53635603",
"0.53333396",
"0.5332225",
"0.5315806",
"0.530927",
"0.53071046",
"0.5266682",
"0.5266216",
"0.5244607",
"0.52394813",
"0.5220792",
"0.52131206",
"0.5207788",
"0.519899",
"0.5193696",
"0.51761544",
"0.51682925",
"0.5153736",
"0.51415896",
"0.51384467",
"0.5127955",
"0.51238626"
] | 0.66518736 | 0 |
Geocoding, assign road name to place name | def placeToRoad(placeName):
# sleep(2)
g = gmaps.geocode(placeName)
roadNo, roadName = '', ''
zipCode, coor_Lat, coor_Lng = None, None, None
if len(g) > 0:
for ac in g[0]['address_components']:
if ac['types'] and len(ac['types']) > 0:
if ac['types'][0] == 'street_number':
try:
roadNo = ac['long_name']
except:
roadNo = ''
if ac['types'][0] == 'route':
try:
roadName = ac['long_name']
except:
roadName = ''
if ac['types'][0] == 'postal_code':
try:
zipCode = ac['long_name']
except:
zipCode = None
# if 'long_name' in g[0]['address_components'][0].keys(): # road no.
# if g[0]['address_components'][0]['types'][0] == 'street_number':
# try:
# roadNo = g[0]['address_components'][0]['long_name']
# except:
# roadNo = ''
#
# if 'long_name' in g[0]['address_components'][1].keys(): # road name
# if g[0]['address_components'][1]['types'][0] == 'route':
# try:
# roadName = g[0]['address_components'][1]['long_name']
# except:
# roadName = ''
#
# if 'long_name' in g[0]['address_components'][-1].keys(): # zip code
# if g[0]['address_components'][-1]['types'][0] == 'postal_code':
# try:
# zipCode = g[0]['address_components'][-1]['long_name']
# except:
# zipCode = None
if 'location' in g[0]['geometry'].keys():
try:
coor = g[0]['geometry']['location'] # APPROXIMATE location
coor_Lat = coor['lat']
coor_Lng = coor['lng']
except:
coor_Lat, coor_Lng = None, None
roadName = roadNo + ' ' + roadName
coor = (coor_Lat, coor_Lng)
return roadName, zipCode, coor | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def geo_coder(house_number, boro_code, street_name, zip_code): \r\n wa1 = '1B{}{}{}{}{}C{}{}'.format(rightpad(house_number, 16), rightpad('', 38), boro_code, rightpad('', 10), rightpad(street_name, 32), rightpad('', 113), rightpad(zip_code, 5))\r\n wa1 = rightpad(wa1, 1200)\r\n wa2 = rightpad('', 4300)\r\n NYCGeo.NYCgeo(wa1, wa2)\r\n return wa1, wa2",
"def geocode(self, resource):\n # Turn the different address components into a formatted string\n search_address = \", \".join(a for a in [resource.street,\n resource.city, resource.state,\n resource.zipcode, resource.country]\n if a is not None and not\n a.isspace())\n\n # Make sure we generated something meaningful\n if search_address and search_address is not None:\n # Now query the geocoder with this formatted string\n geolocator = GoogleV3(api_key=self.api_key)\n address, (latitude, longitude) = geolocator.geocode(search_address)\n\n # Update the resource based on the returned geopy.location.Location\n if address and not address.isspace():\n resource.fulladdress = address\n\n if latitude and longitude:\n resource.latitude = latitude\n resource.longitude = longitude\n\n # FUTURE: Perform additional normalization operations based\n # on the information in Location.raw\n pass",
"def geocode(self, geocoder):\n for term in self.terms:\n # No need to geocode regions\n if not term.get('region'):\n geo = geocoder.geocode(term['string'])\n if geo:\n term['geo'] = geo\n if not self.region:\n # TODO: descobrir regiao do ponto\n self.region = \"???\"\n else:\n self.region = term['region']",
"def place_by_name(place, API_KEY=API_KEY, FIND_PLACE_API_URL=FIND_PLACE_API_URL):\n params = {\n 'input': '{}'.format(place),\n 'fields':'name,geometry,formatted_address',\n 'inputtype':'textquery',\n 'key': API_KEY\n }\n\n # Do the request and get the response data\n response = requests.get(FIND_PLACE_API_URL, params=params)\n\n response = response.json()['candidates'][0]\n\n geodata = dict()\n geodata['lat'] = response['geometry']['location']['lat']\n geodata['lng'] = response['geometry']['location']['lng']\n geodata['address'] = response['formatted_address']\n\n return geodata",
"def roadToCoor(rn):\n # sleep(2)\n g = gmaps.geocode(rn)\n\n zipCode = None\n coor_Lat, coor_Lng, bbox_NE_Lat, bbox_NE_Lng, bbox_SW_Lat, bbox_SW_Lng = None, None, None, None, None, None\n if len(g) > 0:\n if len(g) > 0:\n for ac in g[0]['address_components']:\n try:\n if ac['types'][0] == 'postal_code':\n zipCode = ac['long_name']\n except:\n zipCode = None\n\n if 'location' in g[0]['geometry'].keys():\n try:\n coor = g[0]['geometry']['location'] # APPROXIMATE location\n coor_Lat = coor['lat']\n coor_Lng = coor['lng']\n except:\n coor_Lat, coor_Lng = None, None\n\n if 'bounds' in g[0]['geometry'].keys(): # bounding box\n try:\n bbox = g[0]['geometry']['bounds']\n bbox_NE_Lat = bbox['northeast']['lat']\n bbox_NE_Lng = bbox['northeast']['lng']\n bbox_SW_Lat = bbox['southwest']['lat']\n bbox_SW_Lng = bbox['southwest']['lng']\n except:\n bbox_NE_Lat, bbox_NE_Lng, bbox_SW_Lat, bbox_SW_Lng = None, None, None, None\n\n # g = geocoder.google(loc)\n # print(loc, g.latlng)\n coors = (coor_Lat, coor_Lng, bbox_NE_Lat, bbox_NE_Lng, bbox_SW_Lat, bbox_SW_Lng)\n return zipCode, coors",
"def update_street_name(name, mapping):\r\n m = street_type_re.search(name)\r\n if m:\r\n street_type = m.group()\r\n if street_type in list(mapping.keys()):\r\n better_street_type = mapping[street_type]\r\n name = street_type_re.sub(better_street_type, name)\r\n return name",
"def geolocate_address(self):\n self.geolocator = Nominatim(user_agent=\"fundaft\")\n\n # If latitude / longitude are missing, try to geocode them on the basis\n # of the address \n self.coords = [self.get_coords(address) if np.isnan(lat)\n else (lat, lon) for address, lat, lon in\n zip(self.df_ads['property_title'], \n self.df_ads['latitude'], \n self.df_ads['longitude'])]\n \n df = pd.DataFrame(self.coords, columns=['latitude', 'longitude'])\n \n # If new coordinates are not in Dublin, change to na again\n df = self.is_in_dublin(df)\n\n self.df_ads[[\"latitude\",\"longitude\"]] = df",
"def _geocode(self, phn, street, borough_code=None, zip=None):\n try:\n r = self._g[self.geofunction](house_number=phn, street=street, borough_code=borough_code, zip=zip)\n self.results.append(r)\n except GeosupportError as ge:\n if 'SIMILAR NAMES' in ge.result[\"Message\"]:\n list_of_street_names = ge.result['List of Street Names']\n r = [{\n 'street': s,\n 'borough_code': borough_code\n } for s in list_of_street_names]\n self.similiar_names.extend(r)",
"def displayName(self):\r\n return self.tr(\"PDOK Reverse Geocoder\")",
"def addressToName(self, address):\n pass",
"def update_name(name, mapping):\n m = street_type_re.search(name)\n if m:\n street_type = m.group()\n for key, value in mapping.iteritems():\n if street_type == key:\n name = name.replace(key,value)\n\n return name",
"def _process_place_info(self):\n places = self._plrevgeoloc.mapItem.sortedPlaceInfos\n\n # build a dictionary where key is placetype\n places_dict = {}\n for p in places:\n try:\n places_dict[p.placeType].append((p.name, p.area))\n except KeyError:\n places_dict[p.placeType] = [(p.name, p.area)]\n\n # build list to populate PlaceNames tuple\n place_info = []\n for field in range(18):\n try:\n # add the place names sorted by area (ascending)\n place_info.append(\n [\n p[0]\n for p in sorted(places_dict[field], key=lambda place: place[1])\n ]\n )\n except:\n place_info.append([])\n\n # fill in body_of_water for compatibility with Photos <= 4\n place_info.append(place_info[7] + place_info[9])\n\n place_names = PlaceNames(*place_info)\n self._names = place_names\n\n # build the name as it appears in Photos\n # the length of the name is variable and appears to be based on available\n # reverse geolocation data in the following order (left to right, joined by ',')\n # 8: area_of_interest\n # 11: region (I've only seen this applied to islands)\n # 4: locality / city\n # 2: administrative area (state/province)\n # 1: country\n # 9: inland_water\n # 7: ocean\n name = \", \".join(\n [\n p[0]\n for p in [\n place_names[8], # area of interest\n place_names[11], # region (I've only seen this applied to islands)\n place_names[4], # locality / city\n place_names[2], # administrative area (state/province)\n place_names[1], # country\n place_names[9], # inland_water\n place_names[7], # ocean\n ]\n if p and p[0]\n ]\n )\n self._name = name if name != \"\" else None",
"def address():\n # We start with generating the street name. For this we choose\n # between the most common prefixes and our own prefixes\n prefix = dice.randint(1, 100)\n if prefix <= 10: # 10%\n prefix = \"Haupt\"\n elif prefix <= 18: # 8%\n prefix = \"Schul\"\n elif prefix <= 25: # 7%\n prefix = \"Garten\"\n elif prefix <= 32: # 7%\n prefix = \"Dorf\"\n elif prefix <= 39: # 7%\n prefix = \"Bahnhof\"\n elif prefix <= 46: # 7%\n prefix = \"Wiesen\"\n elif prefix <= 52: # 6%\n prefix = \"Berg\"\n elif prefix <= 56: # 4%\n prefix = \"Kirch\"\n elif prefix <= 60: # 4%\n prefix = \"Wald\"\n elif prefix <= 64: # 4%\n prefix = \"Ring\"\n else:\n prefix = dice.choice(names.prefix)\n\n # Now we can add the suffix\n suffix = dice.randint(1, 100)\n if suffix <= 78:\n suffix = \"straße\"\n elif suffix <= 96:\n suffix = \"weg\"\n elif suffix <= 98:\n suffix = \"allee\"\n elif suffix == 99:\n suffix = \"ring\"\n elif suffix == 100:\n suffix = \"platz\"\n\n # When we have a city name as prefix, we need to capitalize the\n # suffix since it will be two words\n if prefix[-1] == \" \":\n suffix = suffix.capitalize()\n\n # Now we can add them together\n street = prefix + suffix\n\n # We need a house number as well. In Germany most numbers have\n # between one and four digits, so we will use this as base. Lower\n # numbers are more common, so we'll give it a 10% probability of\n # using 3 digits and 1% of using 4 digits\n digits = dice.randint(1, 100)\n if digits == 100:\n house_number = str(dice.randint(1000, 9999))\n elif digits >= 90:\n house_number = str(dice.randint(100, 999))\n else:\n house_number = str(dice.randint(1, 99))\n address_full = street + \" \" + house_number\n return address_full",
"def parse_streetname(self):\n index = self.index\n \n name = \"\"\n for i in range(4):\n if index + i == self.length:\n break\n if self.words[index+i]['word'] == ',':\n break\n # Hack\n if self.words[index+i]['word'] == 'doctor':\n self.words[index+i]['word'] = 'drive'\n break\n try:\n word = sttype[self.words[index+i]['word']]\n break\n except:\n try:\n word = vocab[self.words[index+i]['word']]\n if Vocabulary.STREET_TYPE in word['tag']:\n break\n if name != '':\n name += ' ' + word['lemma'][0]\n else:\n name = word['lemma'][0]\n except: \n if self.words[index+i]['word'][-2:] in [ 'th', 'st', 'nd', 'rd' ]:\n name = self.words[index+i]['word'][:-2]\n else:\n self.index += i\n _dir, _n = self.parse_streetdir()\n self.index -= i\n if _dir:\n break\n if name != '':\n name += ' ' + self.words[index+i]['word']\n else:\n name = self.words[index+i]['word']\n \n if i == 0 or i == 4:\n return None, 0\n else:\n return name, i",
"def makeAddressToGeocodeRequest(address):\n global headersGlobal, URL_addressToGeocode # get global variables\n\n key = variables.bingMapsAPIKey # api key\n\n # construct the url\n url = URL_addressToGeocode + str(address[0]) + \"/\" + str(address[1]) + \"/\" + str(address[2]) + \"/\" + str(\n address[3]) + \"/\" + str(address[4]) + \"?key=\" + key\n\n request = requests.get(url, headers=headersGlobal) # make the request\n return request # return the request",
"def update_city_name(name):\r\n if ', WA' or ',WA' in name:\r\n name = name.rstrip (', WA')\r\n return string.capwords(name)",
"def _process_place_info(self):\n places = self._place_names\n\n # build a dictionary where key is placetype\n places_dict = {}\n for p in places:\n # places in format:\n # [(5, \"St James's Park\", 45, 0), ]\n # 0: modelID\n # 1: name\n # 2: type\n # 3: area\n try:\n places_dict[p[2]].append((normalize_unicode(p[1]), p[3]))\n except KeyError:\n places_dict[p[2]] = [(normalize_unicode(p[1]), p[3])]\n\n # build list to populate PlaceNames tuple\n # initialize with empty lists for each field in PlaceNames\n place_info = [[]] * 19\n\n # add the place names sorted by area (ascending)\n # in Photos <=4, possible place type values are:\n # 45: areasOfInterest (The relevant areas of interest associated with the placemark.)\n # 44: body of water (includes both inlandWater and ocean)\n # 43: subLocality (Additional city-level information for the placemark.\n # 16: locality (The city associated with the placemark.)\n # 4: subAdministrativeArea (Additional administrative area information for the placemark.)\n # 2: administrativeArea (The state or province associated with the placemark.)\n # 1: country\n # mapping = mapping from PlaceNames to field in places_dict\n # PlaceNames fields map to the placeType value in Photos5 (0..17)\n # but place type in Photos <=4 has different values\n # hence (3, 4) means PlaceNames[3] = places_dict[4] (sub_administrative_area)\n mapping = [(1, 1), (2, 2), (3, 4), (4, 16), (18, 44), (8, 45)]\n for field5, field4 in mapping:\n try:\n place_info[field5] = [\n p[0]\n for p in sorted(places_dict[field4], key=lambda place: place[1])\n ]\n except KeyError:\n pass\n\n place_names = PlaceNames(*place_info)\n self._names = place_names\n\n # build the name as it appears in Photos\n # the length of the name is at most 3 fields and appears to be based on available\n # reverse geolocation data in the following order (left to right, joined by ',')\n # always has country if available then either area of interest and city OR\n # city and state\n # e.g. 4, 2, 1 OR 8, 4, 1\n # 8 (45): area_of_interest\n # 4 (16): locality / city\n # 2 (2): administrative area (state/province)\n # 1 (1): country\n name_list = []\n if place_names[8]:\n name_list.append(place_names[8][0])\n if place_names[4]:\n name_list.append(place_names[4][0])\n elif place_names[4]:\n name_list.append(place_names[4][0])\n if place_names[2]:\n name_list.append(place_names[2][0])\n elif place_names[2]:\n name_list.append(place_names[2][0])\n\n # add country\n if place_names[1]:\n name_list.append(place_names[1][0])\n\n name = \", \".join(name_list)\n self._name = name if name != \"\" else None",
"def parse_address_from_geocoding_response(geocoded_data: dict) -> str:\n return geocoded_data[\n 'response'][\n 'GeoObjectCollection'][\n 'featureMember'][0][\n 'GeoObject'][\n 'metaDataProperty'][\n 'GeocoderMetaData'][\n 'text']",
"def name(self):\r\n return \"pdok-reverse-geocoder\"",
"def reverse_geocoding(lat, lng, API_KEY=API_KEY, GEOCODE_API_URL=GEOCODE_API_URL):\n params = {\n 'latlng': '{},{}'.format(lat, lng),\n 'key': API_KEY\n }\n\n # Do the request and get the response data\n response = requests.get(GEOCODE_API_URL, params=params)\n response = response.json()\n geodata = parse_response(response)\n return geodata",
"def geocoding(address, API_KEY=API_KEY, GEOCODE_API_URL=GEOCODE_API_URL):\n # define the parameters of the search\n params = {\n 'address': '{}'.format(address),\n 'key': API_KEY\n }\n\n # Do the request and get the response data\n response = requests.get(GEOCODE_API_URL, params=params)\n response = response.json()\n\n geodata = parse_response(response)\n return geodata",
"def make_lad_lookup(geo_var_name=\"LAD20\"):\n name_lu = pd.read_csv(\n os.path.join(meta_location, \"LA_UA names and codes UK as at 04_20.csv\")\n )\n name_dict = name_lu.set_index(f\"{geo_var_name}CD\")[f\"{geo_var_name}NM\"].to_dict()\n return name_dict",
"def geocode(df, col):\r\n pass",
"def convert_names(self):\n names_map = {\n \"AL\": \"Alabama\",\n \"CT\": \"Conn\",\n \"CA\": \"Cali\",\n \"CO\": \"Colo\",\n \"DC\": \"District of Columbia\"\n }\n self[\"state_name\"] = self[\"abbrev\"].map(names_map)",
"def geocode(addr_str):\n\n\tbase_url = 'http://gis.oregonmetro.gov/rlisapi2/locate/'\n\turl_template = '{0}?token={1}&input={2}&form=json'\n\turl = url_template.format(base_url, token, addr_str)\n\tresponse = requests.get(url)\n\n\tif response.status_code != 200:\n\t\tprint 'unable to establish connection with rlis api'\n\t\tprint 'status code is: {0}'.format(response.status_code)\n\t\treturn response.status_code\n\t\n\tjson_rsp = response.json()\n\tif json_rsp['error']:\n\t\tprint 'the following address could not be geocoded:'\n\t\tprint '\\'{0}\\''.format(addr_str)\n\t\tprint 'the following error message was returned:'\n\t\tprint '\\'{0}\\''.format(json_rsp['error']), '\\n'\n\telse:\n\t\treturn json_rsp['data'][0]",
"def geocode(address):\n geo_data = requests.get(\"https://geocode.xyz/{}?json=1\".format(\n urllib.parse.quote_plus(address)))\n geo_json = json.loads(geo_data.content)\n\n return geo_json['standard']['city'], geo_json['latt'], geo_json['longt']",
"def get_place_details(self):\n self.google_api_url = 'https://maps.googleapis.com/maps/api/place/details/json?placeid={}&key={}'.format(self.place_id, api_key)\n self.r = requests.get(url=self.google_api_url)\n self.data = self.r.json()\n self.address_components = self.data['result']['address_components']\n\n for i in self.address_components:\n if i['types'][0] == 'locality':\n self.city = (i['long_name'])\n return (self.city)\n else:\n pass",
"def net_xy(street):\r\n\r\n # api-endpoint\r\n URL = \"https://ags.govmap.gov.il/Search/FreeSearch\"\r\n # headers\r\n headers = {\"Content-Type\": \"application/json\", \"charset\": \"utf-8\"}\r\n # location given here\r\n try:\r\n p = \"{\\\"keyword\\\": \\\"\" + street + \"\\\",\\\"LstResult\\\": null}\"\r\n PARAMS = p.encode(\"utf-8\")\r\n\r\n # sending get request and saving the response as response object\r\n r = requests.post(url=URL, data=PARAMS, headers=headers)\r\n\r\n # extracting data in json format\r\n data = r.json()\r\n\r\n # extracting latitude, longitude and formatted address\r\n # of the first matching location\r\n\r\n X = data['data']['Result'][0]['X']\r\n Y = data['data']['Result'][0]['Y']\r\n except Exception as e:\r\n print(e)\r\n # print('exception ddamammnnnnn')\r\n print(street)\r\n return 0,0\r\n return X,Y",
"def name_places(self):\n self.city_names = {}\n self.region_names = {}\n for city in self.cities:\n self.city_names[city] = self.lang.name(\"city\")\n for region in np.unique(self.territories):\n self.region_names[region] = self.lang.name(\"region\")",
"def normalizer(self, place, includeZeroPopulation=False, casing=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'locationNormalizer')\r\n\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json"
] | [
"0.6341097",
"0.6231902",
"0.6192851",
"0.61891156",
"0.6175963",
"0.6076743",
"0.6004888",
"0.5973303",
"0.594348",
"0.59281343",
"0.5834192",
"0.5829032",
"0.57439303",
"0.5721165",
"0.5709383",
"0.5661697",
"0.5651798",
"0.56299216",
"0.56124854",
"0.5551279",
"0.5521964",
"0.55146456",
"0.55131257",
"0.5512303",
"0.55075747",
"0.54777735",
"0.54752356",
"0.5450722",
"0.5450669",
"0.5425556"
] | 0.7276688 | 0 |
Raises Brightness of the lights | def high_bri(self):
for light in self.lights:
bri = self.b.get_light(light,'bri')
bri = bri + 50
if bri > 255:
bri = 255
self.b.set_light(light,'bri',bri) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def change_brightness(image, value):\n\n return change_light(image, value, \"v\")",
"def set_brightness(self, value):\n self.parent.backlight.set_brightness(value)",
"def _update_brightness(self):\n while self.current_brightness != self.brightness:\n next_color = RGB(r=int(self.color.r * (self.current_brightness/100.0)),\n g=int(self.color.g * (self.current_brightness/100.0)),\n b=int(self.color.b * (self.current_brightness/100.0)))\n self._update_color(next_color)\n diff = self.brightness - self.current_brightness\n # adjust current brightness to +/- 1\n self.current_brightness = self.current_brightness + \\\n (diff) / abs(diff)\n time.sleep(.05)\n # Final update to exact brightness and default if no change in brightness setting\n final_color = RGB(r=int(self.color.r * (self.brightness/100.0)),\n g=int(self.color.g * (self.brightness/100.0)),\n b=int(self.color.b * (self.brightness/100.0)))\n self._update_color(final_color)",
"def update(self):\n self._brightness = self._lj.get_load_level(self._index) / 99 * 255",
"def all_off():\n Leds.red_left.brightness = 0\n Leds.red_right.brightness = 0\n Leds.green_left.brightness = 0\n Leds.green_right.brightness = 0\n Leds.blue_left.brightness = 0\n Leds.blue_right.brightness = 0",
"def bright(self,l):\n if 1 <= l <= 4:\n self.send(\"\\x1f\\x58%c\" % l)\n else:\n raise ValueError('brightness values have to be between 1 and 4')",
"def set_amb_bright(self, level: int):\n return self.send(\"set_amb_bright\", [level])",
"def test_change_brightness_back_to_10():",
"def light_standby():\n for led in leds:\n led.on()\n\n rgb_driver.pulse(on_color=(scale[\"R\"], scale[\"G\"], scale[\"B\"]), off_color=(0,0,0))",
"def brightness(self, factor):\n\n channels = [\"r\", \"g\", \"b\"]\n total_lumes = clamp(self.get_luminance() + (255.0 * factor) - 255.0, 0.0, 255.0)\n\n if total_lumes == 255.0:\n # white\n self.r, self.g, self.b = 0xFF, 0xFF, 0xFF\n elif total_lumes == 0.0:\n # black\n self.r, self.g, self.b = 0x00, 0x00, 0x00\n else:\n # Adjust Brightness\n pts = (total_lumes - 0.299 * self.r - 0.587 * self.g - 0.114 * self.b)\n slots = set(channels)\n components = [float(self.r) + pts, float(self.g) + pts, float(self.b) + pts]\n count = 0\n for c in channels:\n overage, components[count] = self._get_overage(components[count])\n if overage:\n slots.remove(c)\n components = list(self._distribute_overage(components, overage, slots))\n count += 1\n\n self.r = clamp(round_int(components[0]), 0, 255) & 0xFF\n self.g = clamp(round_int(components[1]), 0, 255) & 0xFF\n self.b = clamp(round_int(components[2]), 0, 255) & 0xFF",
"def set_brightness(self, level):\n print(\"Got request to set brightness with level: %s\" % (level))\n # Home assistant sets brightness on a scale of 0 to 255\n if level > 0 and level < 255:\n new_level = level / 255\n print(\"Setting brightness to %s\" % (new_level))\n self.turn_on(r=self.r, g=self.g, b=self.b, brightness=new_level)\n self.client.publish(BRIGHTNESS_STATE_TOPIC, level) #publish",
"def getSurfaceBrightness(self):\n return self._sbrightn",
"def setUIBrightness(self, value):\n\n\t\t# print(value)\n\t\tself.col['window'] = QtGui.QColor(value, value, value)\n\t\tself.computeUIPalette()\n\t\tself.loadStyleSheet()",
"def test_set_and_get_led_brightness_level(self):",
"def lower_bri(self):\r\n for light in self.lights:\r\n bri = self.b.get_light(light,'bri')\r\n bri = bri - 50\r\n if bri < 0:\r\n bri = 1\r\n self.b.set_light(light,'bri',bri)",
"def set_brightness(distance):\n if math.floor(distance / 100) - 1 >= 0 and math.floor(distance / 100) - 1 <= 9:\n return 9 - (math.floor(distance / 100) - 1)\n elif math.floor(distance / 100) - 1 >= 0:\n return 1\n else:\n return 9",
"def get_brightness(self) -> int:\r\n if not self.backlight:\r\n return -1\r\n\r\n return self.backlight.brightness",
"def flicker_lights(self):\n print 'Lights Set'",
"def setBrightness(self, brightness):\n self._logger.debug(\"setBrightness\")",
"def set_brightness(self, brightness: int):\r\n if not self.backlight:\r\n return\r\n\r\n if brightness < 0 or brightness > 100:\r\n # Print an error, probably\r\n return\r\n\r\n self.backlight.brightness = brightness",
"def turn_on(self, **kwargs: Any) -> None:\n self._light.brightness = kwargs.get(ATTR_BRIGHTNESS, 255)\n self._light.turn_on()",
"def min_brightness(self):\n return .0",
"def lightness(self):\n min_component = min(self.red, self.green, self.blue)\n max_component = max(self.red, self.green, self.blue)\n avg = (max_component + min_component) / 2\n light = avg / 255\n return light",
"def set_color_brightness(color, brightness):\n if brightness == 0:\n put_light_state(False, 0, 0, 16000)\n else:\n put_light_state(\n True,\n 254 * color // 100,\n 254 * brightness // 100,\n 16000\n )",
"def adjust_brightness(image, delta):\r\n return _clip(image + delta * 255)",
"def the_user_changes_the_brightness_to(n):\n print(\"Changing brightness to \"+ str(n) +\"...\")\n web_app.change_property_softassert(\"brightness\",n)",
"def increase_brightness(image, value=18):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n\n lim = 255 - value\n v[v > lim] = 255\n v[v <= lim] += value\n\n final_hsv = cv2.merge((h, s, v))\n image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n return image",
"def light(brightness, filter):\n brightness = clamp(MIN_BRIGHTNESS, round(brightness), MAX_BRIGHTNESS)\n for col in range(DISPLAY_WIDTH):\n for row in range(DISPLAY_HEIGHT):\n if filter(col, row):\n microbit.display.set_pixel(col, row, brightness)",
"def brightness(self):\n return self.get_value('bri')",
"def config_brightness(self):\n orig_brightness, prev_brightness = self.brightness, self.brightness\n self.make_ui_group(False, 'Brightness:', self.brightness)\n\n while True:\n action_left, action_right = (self.button_left.action(),\n self.button_right.action())\n if action_left is RichButton.HOLD:\n return self.brightness is not orig_brightness, False # Config\n if action_right is RichButton.HOLD:\n return self.brightness is not orig_brightness, True # Paint\n if action_left is RichButton.TAP:\n self.brightness = max(0.0, self.brightness - 0.1)\n elif action_right is RichButton.TAP:\n self.brightness = min(1.0, self.brightness + 0.1)\n\n if self.brightness is not prev_brightness:\n self.rect.x = int(board.DISPLAY.width * (self.brightness - 1.0))\n prev_brightness = self.brightness"
] | [
"0.71973896",
"0.705513",
"0.70402426",
"0.70310324",
"0.701834",
"0.69162804",
"0.6844577",
"0.6786708",
"0.6758496",
"0.67494047",
"0.6733191",
"0.6731852",
"0.67068326",
"0.6687837",
"0.66576874",
"0.664437",
"0.6625247",
"0.66169715",
"0.66129184",
"0.6609615",
"0.66095114",
"0.65928346",
"0.65373534",
"0.65314025",
"0.6529507",
"0.6504484",
"0.650306",
"0.6500904",
"0.6454986",
"0.6452204"
] | 0.72851527 | 0 |
sets the output channel(s) | async def setoutput(self, ctx, chan: discord.Channel):
server = ctx.message.server
if server.id not in self.settings:
self.initial_config(server.id)
if server != chan.server:
return await self.bot.say("Stop trying to break this")
if chan.type != discord.ChannelType.text:
return await self.bot.say("That isn't a text channel")
if chan.id in self.settings[server.id]['output']:
return await self.bot.say("Channel already set as output")
if self.settings[server.id]['multiout']:
self.settings[server.id]['output'].append(chan.id)
self.save_json()
return await self.bot.say("Channel added to output list")
else:
self.settings[server.id]['output'] = [chan.id]
self.save_json()
return await self.bot.say("Channel set as output") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def output_channels(self, input_channels):\n pass",
"def setDescriptorChannels(self, dch): # real signature unknown; restored from __doc__\n pass",
"def setup_channels():\n\n # Setup channel encoders\n for c in channels:\n channels[c].setup()\n print()",
"def setOutput(self):\n self.stopListening()\n\n gpio.setup(self.bcm_id, gpio.OUT)\n self.mode = gpio.OUT",
"def PYDSO010SETCHAN(self):\n ctx = self.item_start() # always first line of test\n\n chan = ctx.item.chan\n if not (0 < chan < 5):\n self.logger.error(\"Invalid channel number: {} (1-4 accepted)\".format(chan))\n self.item_end(ResultAPI.RECORD_RESULT_INTERNAL_ERROR)\n return\n\n self.shared_lock(self.DSO).acquire()\n\n # reset the scope to a known state\n self.dso.write('*RST')\n if chan != 1: # after reset, chan 1 is already on\n self.dso.write(':CHANnel1:DISPlay OFF') # turn off channel 1\n self.dso.write(':CHANnel{}:DISPlay ON'.format(chan)) # turn off channel 1\n\n self.dso.write(':CHANnel{}:SCALe 100mV'.format(chan))\n\n vpp = self.dso.query(':MEASure:VPP? CHANnel{}'.format(chan))\n value = float(vpp)\n _result, _bullet = ctx.record.measurement(\"VPP{}\".format(chan), value, ResultAPI.UNIT_VOLTS)\n\n self.log_bullet(\"Switched to channel {}\".format(chan))\n self.log_bullet(_bullet)\n time.sleep(0.1) # give it some time to sit here, else its too fast\n self.shared_lock(self.DSO).release()\n self.item_end() # always last line of test",
"def output_channels(self, input_channels):\n return input_channels",
"def setup(self, channels):\n self.channels = channels[:]",
"def ChanOutState(state=\"off\", channel=1, inverted=0):\r\n if inverted == 0:\r\n inst.write(\"OUTP%i %s\" %(channel,state)) \r\n elif inverted == 1:\r\n inst.write(\"OUTP%i:COMP %s\" %(channel,state))",
"def __set_outputs__(self):\n self.__set_in_out_var__(None, 1)",
"def _set_channel_(self, channel):\n self._channel = channel",
"def setChannelOutput(self, Channel, Output, stringOnly=0):\n if Output:\n msg = \"OUTPut\"+str(Channel)+\":STATe ON\"\n else:\n msg = \"OUTPut\"+str(Channel)+\":STATe OFF\"\n\n if stringOnly==0:\n self.sendMessage(msg)\n else:\n return msg",
"def output(self, channel, value):\n self._check_mode()\n if channel in self.channels:\n print(f\"output fuer channel {channel} auf {value} gesetzt\")\n else:\n raise RuntimeError(\"The GPIO channel has not been set up as an OUTPUT\")",
"def setOutput(self, Output, stringOnly=0):\n self.setChannelOutput(1, Output, stringOnly)\n self.setChannelOutput(2, Output, stringOnly)\n self.setChannelOutput(3, Output, stringOnly)\n self.setChannelOutput(4, Output, stringOnly)",
"def set_output(self, channels, value, var, io_update=False):\n\n # Activate selected channels\n self._set_channels(channels)\n\n # Turn off linear sweep\n cfr_bytes = self._read('CFR')\n cfr_bytes[0] = 0\n cfr_bytes[1] &= 0x03 # sets everything except for the last 2 bits to 0\n self._write('CFR', cfr_bytes)\n\n if var == 'frequency':\n register = 'CFTW0' # Write FTW to CFTW0 register\n value = int(np.round(value / 1e3) * 1e3) # round frequency to 1 kHz.\n data = self._convert_frequency(value)\n\n elif var == 'phase':\n register = 'CPOW0' # write POW bytes into correct register\n data = self._convert_phase(value)\n\n elif var == 'amplitude':\n register = 'ACR' # Write to 'ACR' register\n data = self._convert_amplitude(value)\n\n self._write(register, data)\n self._update(var, channels, value)\n\n if io_update:\n self._io_update()",
"def test_switch_channels(self):\n\t\t# not available yet, experimental\n\t\tpass",
"def selectChannel(self,asic,chan, hsmode= 1 ):\n pass",
"def set_channel(self, channel):\n self.l1.setText(\"Channel: \" + str(channel))",
"def set_output(self, on=False):\r\n self.on = on",
"def set_channel(self, c, channel):\n try:\n self.binding.set_switcher_channel(channel)\n except Exception, e:\n self.handle_wavemeter_error(e)\n return False\n\n return True",
"def set_channel(cls, channel):\n cls.channel = channel",
"def set_specific_channel(channel_number):\n global interface\n\n print(\"Set channel to {} on interface {}\".format(channel_number, interface))\n system(f\"iwconfig {interface} channel {channel_number}\")",
"def change_channel():\n global interface\n\n print(\"Change channels for interface {}\".format(interface))\n channel_number = 1\n\n while True:\n system(f\"iwconfig {interface} channel {channel_number}\")\n channel_number = channel_number % 14 + 1\n sleep(0.5)",
"def default_channel(self) -> int:\r\n ...",
"def tie(self, output):\n self._vdma.readchannel.tie(output._vdma.writechannel)",
"def configure(self):\n\t\tself.outChannel = CAClient(self.pvstring + \".AOUT\")\n\t\tself.outChannel.configure()\n\t\tself.inChannel = CAClient(self.pvstring + \".TINP\")\n\t\tself.inChannel.configure()",
"def set_output(self, on=False):\r\n if on: self.write(\"OUTP ON\")\r\n else: self.write(\"OUTP OFF\")",
"def _channelList_changed(self):\n self.oscilloscope.visibleChannels = self.channelList",
"def onSetRelayOutput(self, event):",
"def set_output(self, iclass):\n self._custom_setter('output', iclass)\n self._custom_setter('presenting', True)",
"def reset(self, ch: int) -> None:\n\n GPIO.output(self.__channels[ch - 1], GPIO.HIGH)"
] | [
"0.72299033",
"0.64462674",
"0.64258164",
"0.63945687",
"0.6370218",
"0.63673383",
"0.62871987",
"0.62329954",
"0.62027675",
"0.6178937",
"0.617732",
"0.61016273",
"0.6085347",
"0.5988352",
"0.59880155",
"0.5981772",
"0.5967146",
"0.59572315",
"0.59504193",
"0.59496784",
"0.5947623",
"0.59274966",
"0.58841556",
"0.58411914",
"0.58359677",
"0.58245945",
"0.5819149",
"0.58190304",
"0.58003396",
"0.5783245"
] | 0.75566965 | 0 |
Toggles whether the suggestion box is enabled or not | async def suggest_toggle(self, ctx):
server = ctx.message.server
if server.id not in self.settings:
self.initial_config(server.id)
self.settings[server.id]['inactive'] = \
not self.settings[server.id]['inactive']
self.save_json()
if self.settings[server.id]['inactive']:
await self.bot.say("Suggestions disabled.")
else:
await self.bot.say("Suggestions enabled.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_searchEdit_textChanged(self, txt):\n self.searchButton.setEnabled(bool(txt))",
"def on_regex_search_toggle(self, event):\r\n\r\n if self.m_regex_search_checkbox.GetValue():\r\n update_autocomplete(self.m_searchfor_textbox, \"regex_search\")\r\n else:\r\n update_autocomplete(self.m_searchfor_textbox, \"literal_search\")\r\n event.Skip()",
"def __enableSpellingActions(self):\n from QScintilla.SpellChecker import SpellChecker\n spellingAvailable = SpellChecker.isAvailable()\n \n self.spellCheckAct.setEnabled(\n len(self.editors) != 0 and spellingAvailable)\n self.autoSpellCheckAct.setEnabled(spellingAvailable)",
"def as_you_type_toggle(self, event: Event) -> None:\n if self.spell_as_you_type:\n self.spell_as_you_type = False\n if not self.wrap_as_you_type:\n g.unregisterHandler('bodykey2', self.as_you_type_onkey)\n g.es(\"Spell as you type disabled\")\n return\n self.spell_as_you_type = True\n if not self.wrap_as_you_type:\n g.registerHandler('bodykey2', self.as_you_type_onkey)\n g.es(\"Spell as you type enabled\")",
"def entryToggle(self):\n status = \"normal\" if self.optionVar.get() == 4 else \"disabled\"\n for i in range(3):\n self.entry[i].configure(state=status)",
"def setSuggestionStrategy(self, value):\n if value == SuggestionStrategy.OCR:\n self.setBooleanOption(8, True)\n elif value == SuggestionStrategy.TYPO:\n self.setBooleanOption(8, False)\n else:\n raise VoikkoException(\"Invalid suggestion strategy\")",
"def __setAutoSpellChecking(self):\n enabled = self.autoSpellCheckAct.isChecked()\n Preferences.setEditor(\"AutoSpellCheckingEnabled\", enabled)\n for editor in self.editors:\n editor.setAutoSpellChecking()",
"def __editAutoComplete(self):\n self.activeWindow().autoComplete()",
"def __enableSearchEdit(self):\n self.__searchEdit.setEnabled(True)\n self.__filterIndices(self.__searchEdit.text())",
"def toggle_codecompletion_enter(self, checked):\r\n self.shell.set_codecompletion_enter(checked)\r\n CONF.set(self.ID, 'autocompletion/enter-key', checked)",
"def toggleShowOnlySelection(self):\r\n\t\tself.showOnlySelection = not self.showOnlySelection",
"def _enable_entry(self):\n self.insert_entry.configure(state=tk.NORMAL)\n self.insert_button.configure(state=tk.NORMAL)",
"def onStartAssistModeToggled(self, checked):\r\n # productive\r\n profprint()\r\n if checked:\r\n self.fiducialObturatorButton.checked = 0\r\n self.fiducialButton.checked = 0\r\n self.fiducialButton.text = \"2. Start Giving Needle Tips [CTRL + ENTER]\"\r\n self.start(self.addCTLPoints)\r\n self.startAssistModeButton.text = \"Stop Assisted Manual Segmentation\"\r\n else:\r\n self.stop()\r\n self.startAssistModeButton.text = \"Start Assisted Manual Segmentation\"",
"def suggestion(self):\n raise NotImplementedError()",
"def on_findtextCombo_editTextChanged(self, text):\n self.__enableFindButton()",
"def __showEditSpellingMenu(self):\n proj = e5App().getObject(\"Project\")\n projetOpen = proj.isOpen()\n pwl = e5App().getObject(\"Project\").getProjectDictionaries()[0]\n self.__editProjectPwlAct.setEnabled(projetOpen and bool(pwl))\n pel = e5App().getObject(\"Project\").getProjectDictionaries()[1]\n self.__editProjectPelAct.setEnabled(projetOpen and bool(pel))\n \n from QScintilla.SpellChecker import SpellChecker\n pwl = SpellChecker.getUserDictionaryPath()\n self.__editUserPwlAct.setEnabled(bool(pwl))\n pel = SpellChecker.getUserDictionaryPath(True)\n self.__editUserPelAct.setEnabled(bool(pel))",
"def toggle(self):\n self.checked = not self.checked\n if self.command:\n self.command(self.name)",
"def updateAuto(self):\r\n if self.varAutoParse.get():\r\n self.optionProfile.config(state=tk.NORMAL)\r\n else:\r\n self.optionProfile.config(state=tk.DISABLED)",
"def toggle(self) -> None:\n ...",
"def toggle(self) -> None:",
"def toggle(self) -> None:",
"def toggle(self):\n if self.is_enabled:\n self.disable()\n else:\n self.enable()",
"def comb_box_click(self):\n\n if self.checkBox.isChecked():\n self.completer.setFilterMode(Qt.MatchStartsWith)\n else:\n self.completer.setFilterMode(Qt.MatchContains)",
"def toggle(self, *_):\r\n \r\n global ac\r\n if self.author_f_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n elif self.author_m_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n elif self.author_l_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n else:\r\n self.add_a['state'] = 'disabled'",
"def __disableSearchEdit(self):\n self.__searchEdit.setEnabled(False)",
"def _ci_toggle(self, value):\n self.ui.comboBox_ci.setEnabled(value)",
"def handle_suggest():\n return 0",
"def text_to_find_changed(self, _):\n self.ui.findButton.setEnabled(self.ui.textToFind.size() > 0)",
"def toggle(self):",
"def on_replacetextCombo_editTextChanged(self, text):\n self.__enableFindButton()"
] | [
"0.66412807",
"0.63028073",
"0.6129605",
"0.6072069",
"0.6049684",
"0.6030341",
"0.59515285",
"0.59128994",
"0.58037853",
"0.576306",
"0.5755912",
"0.5748368",
"0.5699508",
"0.56507903",
"0.5613166",
"0.56094223",
"0.5581917",
"0.5581336",
"0.55632037",
"0.554556",
"0.554556",
"0.5538189",
"0.5514702",
"0.5501382",
"0.54945135",
"0.54848474",
"0.5477346",
"0.54767936",
"0.54746157",
"0.54664963"
] | 0.7504121 | 0 |
Get the request and client address from the socket, and wraps the connection in an SSL stream. | def get_request(
self,
) -> typing.Tuple[ssl.SSLSocket, typing.Tuple[str, int]]:
socket, addr = self.socket.accept()
stream = ssl.wrap_socket(
socket,
server_side=True,
keyfile=self.keyfile,
certfile=self.certfile,
ssl_version=self.ssl_version,
)
return stream, addr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ssl_wrap_socket(socket, ssl_options, server_hostname=..., **kwargs):\n ...",
"def get_request(self):\n client_socket, address = self.socket.accept()\n if self.ssl_context:\n client_socket = self.ssl_context.wrap_socket(client_socket,\n server_side=True)\n\n return client_socket, address",
"def SpoofSSL(self, request, connection):\n self.log.debug('Entering SpoofSSL')\n target_host = request.GetTargetHost()\n\n self.log.debug('target_host: %s:%s' % target_host)\n\n context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)\n\n if not self.cert:\n raise ValueError, 'self.cert not defined: Can not spoof SSL without cert'\n\n context.use_privatekey_file(self.cert)\n context.use_certificate_file(self.cert)\n\n self.log.debug('SSL context built')\n self.log.debug('Sending HTTP 200 OK to client')\n\n connection.sendall('HTTP/1.0 200 OK Connected\\r\\n\\r\\n')\n\n ssl_connection = OpenSSL.SSL.Connection(context, connection)\n ssl_connection.set_accept_state()\n self.log.debug('Select(ing) on connection socket')\n select.select([connection], [], [])\n self.log.debug('SSL calling do_handshake()')\n ssl_connection.do_handshake()\n self.log.debug('SSL do_handshake() completed')\n\n ssl_connection.state_string()\n\n self.log.debug('Building SSL fileobjects')\n new_connection_write = socket._fileobject(ssl_connection, 'w')\n new_connection_read = socket._fileobject(ssl_connection, 'r')\n new_connection = socket._fileobject(ssl_connection)\n self.log.debug('Done building SSL fileobjects')\n\n self.connection = ssl_connection\n self.wfile = new_connection_write\n self.rfile = new_connection_read\n\n return True",
"def ussl.wrap_socket(sock, server_side=False, keyfile=None, certfile=None, cert_reqs=CERT_NONE, ca_certs=None):\n pass",
"def wrap_socket(sock, server_side=False, keyfile=None, certfile=None, cert_reqs=CERT_NONE, ca_certs=None):\n ...",
"def optionally_wrap_socket(\n self, conn: socket.socket) -> Union[ssl.SSLSocket, socket.socket]:\n if self.config.certfile and self.config.keyfile:\n ctx = ssl.create_default_context(\n ssl.Purpose.CLIENT_AUTH)\n ctx.options |= ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1\n ctx.verify_mode = ssl.CERT_NONE\n ctx.load_cert_chain(\n certfile=self.config.certfile,\n keyfile=self.config.keyfile)\n conn = ctx.wrap_socket(conn, server_side=True)\n return conn",
"def wrap_socket(self, sock):\n return self.context.wrap_socket(sock, server_side=True)",
"def handle_connect(self):\n if self.use_ssl:\n self.ssl = ssl.wrap_socket(self.socket)\n self.set_socket(self.ssl)",
"async def _open_connection_https(self, location):\n sock = await connect_tcp(\n location[0],\n location[1],\n ssl_context=self.ssl_context,\n local_host=self.source_address,\n tls=True,\n tls_standard_compatible=False,\n )\n sock._active = True\n return sock",
"def _client(self, sock):\n # Now create the client side Connection. Similar boilerplate to the\n # above.\n client_ctx = Context(SSLv23_METHOD)\n client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)\n client_ctx.set_verify(\n VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,\n verify_cb,\n )\n client_store = client_ctx.get_cert_store()\n client_ctx.use_privatekey(\n load_privatekey(FILETYPE_PEM, client_key_pem)\n )\n client_ctx.use_certificate(\n load_certificate(FILETYPE_PEM, client_cert_pem)\n )\n client_ctx.check_privatekey()\n client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))\n client_conn = Connection(client_ctx, sock)\n client_conn.set_connect_state()\n return client_conn",
"def test_wrap_existing_socket(self) -> None:\n self.start_dummy_server()\n\n sock = socket.create_connection((self.host, self.port))\n with SSLTransport(\n sock, self.client_context, server_hostname=\"localhost\"\n ) as ssock:\n assert ssock.version() is not None\n ssock.send(sample_request())\n response = consume_socket(ssock)\n validate_response(response)",
"def _get_socket(self, secure=False, timeout=None):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(timeout or self.timeout)\n\n if secure:\n # Setting Purpose to CLIENT_AUTH might seem a bit backwards. But\n # SOS Access v4 is using SSL/TLS for encryption not authentications\n # and verification. There is no cert and no hostname to check so\n # setting the purpose to Client Auth diables that in a nifty way.\n self.context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n return self.context.wrap_socket(sock)\n\n else:\n return sock",
"def wrap_socket(self, sock):\n return sock",
"def _client_session(self, data):\n self._check_ca_certificate()\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((\"127.0.0.1\", self._app_port))\n secure_socket = ssl.wrap_socket(\n client_socket,\n cert_reqs=ssl.CERT_REQUIRED,\n ssl_version=ssl.PROTOCOL_TLSv1_2,\n ca_certs=self._ca_certificate_path,\n ciphers=self._ciphers)\n\n secure_socket.write(str.encode(data))\n secure_socket.close()\n client_socket.close()",
"def _socket_connect(endpoint: urllib.parse.ParseResult) -> typing.Union[ssl.SSLSocket, socket.socket]:\n address = endpoint.netloc.split(':')\n if endpoint.scheme == 'https':\n if len(address) == 1:\n address.append(443)\n context = ssl.SSLContext(ssl.PROTOCOL_TLS)\n context.verify_mode = ssl.CERT_REQUIRED\n context.check_hostname = True\n context.load_default_certs()\n sock = socket.socket()\n connection = context.wrap_socket(sock, server_hostname=address[0])\n else:\n if len(address) == 1:\n address.append(80)\n connection = socket.socket()\n if isinstance(address[1], str):\n address[1] = int(address[1])\n connection.connect((address[0], address[1]))\n return connection",
"def test_unwrap_existing_socket(self) -> None:\n\n def shutdown_handler(listener: socket.socket) -> None:\n sock = listener.accept()[0]\n ssl_sock = self.server_context.wrap_socket(sock, server_side=True)\n\n request = consume_socket(ssl_sock)\n validate_request(request)\n ssl_sock.sendall(sample_response())\n\n unwrapped_sock = ssl_sock.unwrap()\n\n request = consume_socket(unwrapped_sock)\n validate_request(request)\n unwrapped_sock.sendall(sample_response())\n\n self.start_dummy_server(shutdown_handler)\n sock = socket.create_connection((self.host, self.port))\n ssock = SSLTransport(sock, self.client_context, server_hostname=\"localhost\")\n\n # request/response over TLS.\n ssock.sendall(sample_request())\n response = consume_socket(ssock)\n validate_response(response)\n\n # request/response over plaintext after unwrap.\n ssock.unwrap()\n sock.sendall(sample_request())\n response = consume_socket(sock)\n validate_response(response)",
"def ClientApp(stream):\n\n def secured():\n print 'client secured!'\n write('QUIT')\n\n def read(line):\n line = line.strip()\n print 'server said: %r.' % line\n if line == \"YOU SAID: 'hello'\":\n write('STARTTLS')\n elif line == 'PROCEED':\n stream.starttls(secured)\n elif line == 'GOODBYE':\n stream.close()\n else:\n wait()\n\n def write(data):\n stream.write('%s\\n' % data)\n wait()\n\n def wait():\n stream.read_until('\\n', read)\n\n ## Begin\n print 'starting client: %r' % stream.socket.fileno()\n write('hello')",
"def wrap_socket(self, sock, server_side: bool = ..., do_handshake_on_connect: bool = ..., suppress_ragged_eofs: bool = ..., dummy: Optional[Any] = ...):\n ...",
"def __init__(\n self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True\n ):\n self.incoming = ssl.MemoryBIO()\n self.outgoing = ssl.MemoryBIO()\n\n self.suppress_ragged_eofs = suppress_ragged_eofs\n self.socket = socket\n\n self.sslobj = ssl_context.wrap_bio(\n self.incoming, self.outgoing, server_hostname=server_hostname\n )\n\n # Perform initial handshake.\n self._ssl_io_loop(self.sslobj.do_handshake)",
"def BypassSSL(self, request, connection, timeout=10):\n RSIZE = 8192\n request_log = ssl_proxy_log.Log()\n request_log.SetValue('request', request)\n request_log.SetValue('request_time', decimal.Decimal(repr(time.time())))\n request_log.SetValue('client', self.client_address)\n\n fetch_logging = logging.getLogger('BypassSSL')\n\n fetch_logging.debug('Entering BypassSSL')\n target_host = request.GetTargetHost()\n fetch_logging.debug('target_host: %s:%s' % target_host)\n\n request_log.TimeStart('total')\n\n fetch_logging.debug('Starting DNS resolution')\n request_log.TimeStart('dns')\n try:\n target_ip = socket.gethostbyname(target_host[0])\n except socket.gaierror, e:\n request_log.TimeStop('dns')\n self.SetErrorState(request_log, 503, 'DNS error: %s' % e)\n return request_log\n\n request_log.TimeStop('dns')\n request_log.SetValue('target_ip', target_ip)\n fetch_logging.debug('Completed DNS resolution')\n\n outbound_connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n outbound_connection.settimeout(timeout)\n\n client_id = connection.getpeername()\n\n fetch_logging.debug('Starting connect')\n try:\n request_log.SetValue('connect_begin', time.time())\n request_log.TimeStart('connect')\n outbound_connection.connect((target_ip, target_host[1]))\n request_log.TimeStop('connect')\n request_log.SetValue('connect_end', time.time())\n except socket.error, e:\n fetch_logging.debug('Connection to %s:%s failed: %s' % (target_host[0],\n target_host[1], e))\n request_log.TimeStop('connect')\n self.SetErrorState(request_log, 500, 'Connection Failed: %s' % e)\n return request_log\n\n # Once we've managed to open a connection to the target endpoint, tell\n # the client we're ready for more data.\n fetch_logging.debug('Sending HTTP 200 OK to client')\n connection.sendall('HTTP/1.0 200 OK Connected\\r\\n\\r\\n')\n\n request_log.SetValue('client_content-length', 0)\n request_log.SetValue('server_content-length', 0)\n\n\n client_data_len = 0\n server_data_len = 0\n\n request_log.TimeStart('body')\n while True:\n ready = select.select([outbound_connection, connection], [],\n [outbound_connection, connection])\n\n if ready[2]:\n fetch_logging.debug('Exception reported from select on: %s' %\n ready[2][0])\n request_log.TimeStop('body')\n request_log.TimeStop('total')\n request_log.SetValue('response_code', None)\n request_log.SetValue('response_disposition',\n 'exceptional condition from select')\n request_log.WriteLog(self.log_file)\n\n outbound_connection.close()\n return request_log\n\n for i in ready[0]:\n # For every socket that has data waiting, read data into buffer, and\n # dump on other socket.\n data = i.recv(RSIZE)\n if i == outbound_connection:\n target = connection\n host = target_host\n server_data_len += len(data)\n else:\n target = outbound_connection\n host = client_id\n client_data_len += len(data)\n\n if not data:\n # This is in the event of a zero byte read (ie connection closed)\n fetch_logging.debug('End of data on: %s:%s' % host)\n outbound_connection.close()\n request_log.TimeStop('body')\n request_log.TimeStop('total')\n request_log.SetValue('client_content-length', client_data_len)\n request_log.SetValue('server_content-length', server_data_len)\n request_log.SetValue('response_disposition', 'Success')\n return request_log\n\n # Send data from source -> target after it is ready to recv\n select.select([], [target], [])\n target.sendall(data)",
"def _ssl_client_handshake(self, connection):\r\n\r\n try:\r\n # unsets the handshake flag associated with the SSL, meaning\r\n # that the connection is considered to be currently under the\r\n # handshaking process (may succeed in the current tick)\r\n connection.ssl_handshake = False\r\n connection.ssl_connecting = True\r\n\r\n # tries to runs the handshake process, this represents\r\n # a series of small operations both of writing and reading\r\n # that a required to establish and guarantee a secure\r\n # connection from this moment on, note that this operation\r\n # may fail (non blocking issues) and further retries must\r\n # be attempted to finish establishing the connection\r\n _socket = connection.socket\r\n _socket.do_handshake()\r\n\r\n # sets the SSL handshake flag in the connection, effectively\r\n # indicating that the SSL handshake process has finished, note\r\n # that the connecting flag is also unset (SSL connect finished)\r\n connection.ssl_handshake = True\r\n connection.ssl_connecting = False\r\n\r\n # calls the end starter method in the connection so that the\r\n # connection gets notified that the current starter in process\r\n # has finished and that the next one should be called as\r\n # soon as possible to go further in the connection initialization\r\n connection.end_starter()\r\n\r\n # prints a debug information notifying the developer about\r\n # the finishing of the handshaking process for the connection\r\n self.debug(\"SSL Handshaking completed for connection\")\r\n\r\n # calls the proper callback on the connection meaning\r\n # that SSL is now enabled for that socket/connection and so\r\n # the communication between peers is now secured\r\n self.on_client_ssl(connection)\r\n except ssl.SSLError as error:\r\n # tries to retrieve the error code from the argument information\r\n # in the error, in case the error is defined in the list of\r\n # valid errors, the handshake is delayed until either a write\r\n # or read operation is available (retry process)\r\n error_v = error.args[0] if error.args else None\r\n if error_v in SSL_VALID_ERRORS:\r\n if error_v == ssl.SSL_ERROR_WANT_WRITE and\\\r\n not self.is_sub_write(_socket):\r\n self.sub_write(_socket)\r\n elif self.is_sub_write(_socket):\r\n self.unsub_write(_socket)\r\n else: raise",
"def connect(self):\n # Standard implementation from HTTPSConnection, which is not\n # designed for extension, unfortunately\n sock = socket.create_connection((self.host, self.port),\n self.timeout, self.source_address)\n if getattr(self, '_tunnel_host', None):\n self.sock = sock\n self._tunnel()\n\n # This is the only difference; default wrap_socket uses SSLv23\n self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1_2)",
"def ssl_connect(host, port = DEFAULT_SERVER_SSL_PORT, keyfile = None, \n certfile = None, ca_certs = None, ssl_version = None):\n return factory.ssl_connect(host, port, keyfile = keyfile, certfile = certfile,\n ssl_version = ssl_version, ca_certs = ca_certs, service = SlaveService)",
"def get_ssl_socket(self):\n _print(f\"THREADING IDENT: {threading.get_ident()}\")\n return Server.t_ssl_sockets.get(threading.get_ident())",
"def _client_connection(self, callback, data, request_ocsp=True):\n ctx = Context(SSLv23_METHOD)\n ctx.set_ocsp_client_callback(callback, data)\n client = Connection(ctx)\n\n if request_ocsp:\n client.request_ocsp()\n\n client.set_connect_state()\n return client",
"def ssl_connect(host, port, keyfile=None, certfile=None, ca_certs=None,\n cert_reqs=None, ssl_version=None, ciphers=None,\n service=VoidService, config={}, ipv6=False, keepalive=False, verify_mode=None):\n ssl_kwargs = {\"server_side\": False}\n if keyfile is not None:\n ssl_kwargs[\"keyfile\"] = keyfile\n if certfile is not None:\n ssl_kwargs[\"certfile\"] = certfile\n if verify_mode is not None:\n ssl_kwargs[\"cert_reqs\"] = verify_mode\n else:\n ssl_kwargs[\"cert_reqs\"] = ssl.CERT_NONE\n if ca_certs is not None:\n ssl_kwargs[\"ca_certs\"] = ca_certs\n ssl_kwargs[\"cert_reqs\"] = ssl.CERT_REQUIRED\n if cert_reqs is not None:\n ssl_kwargs[\"cert_reqs\"] = cert_reqs\n elif cert_reqs != ssl.CERT_NONE:\n ssl_kwargs[\"check_hostname\"] = False\n if ssl_version is not None:\n ssl_kwargs[\"ssl_version\"] = ssl_version\n if ciphers is not None:\n ssl_kwargs[\"ciphers\"] = ciphers\n s = SocketStream.ssl_connect(host, port, ssl_kwargs, ipv6=ipv6, keepalive=keepalive)\n return connect_stream(s, service, config)",
"def test_ssl_object_attributes(self) -> None:\n self.start_dummy_server()\n\n sock = socket.create_connection((self.host, self.port))\n with SSLTransport(\n sock, self.client_context, server_hostname=\"localhost\"\n ) as ssock:\n cipher = ssock.cipher()\n assert type(cipher) == tuple\n\n # No chosen protocol through ALPN or NPN.\n assert ssock.selected_alpn_protocol() is None\n assert ssock.selected_npn_protocol() is None\n\n shared_ciphers = ssock.shared_ciphers()\n # SSLContext.shared_ciphers() changed behavior completely in a patch version.\n # See: https://github.com/python/cpython/issues/96931\n assert shared_ciphers is None or (\n type(shared_ciphers) is list and len(shared_ciphers) > 0\n )\n\n assert ssock.compression() is None\n\n validate_peercert(ssock)\n\n ssock.send(sample_request())\n response = consume_socket(ssock)\n validate_response(response)",
"def handshake(self):\n\n request_line = _build_method_line(self._options.get('resource'))\n self._logger.debug('Client\\'s opening handshake Request-Line: %r',\n request_line)\n self._socket.sendall(request_line)\n\n fields = []\n fields.append(_format_host_header(\n self._options.get('server_host'),\n int(self._options.get('server_port')),\n self._options.get('use_tls')=='True'))\n fields.append(_UPGRADE_HEADER)\n fields.append(_CONNECTION_HEADER)\n if self._options.get('origin') is not None:\n if self._options.get('protocol_version') == _PROTOCOL_VERSION_HYBI08:\n fields.append(_origin_header(\n common.SEC_WEBSOCKET_ORIGIN_HEADER,\n self._options.get('origin')))\n else:\n fields.append(_origin_header(common.ORIGIN_HEADER,\n self._options.get('origin')))\n\n original_key = os.urandom(16)\n self._key = base64.b64encode(original_key)\n self._logger.debug(\n '%s: %r (%s)',\n common.SEC_WEBSOCKET_KEY_HEADER,\n self._key,\n util.hexify(original_key))\n fields.append(\n '%s: %s\\r\\n' % (common.SEC_WEBSOCKET_KEY_HEADER, self._key))\n\n if int(self._options.get('version_header')) > 0:\n fields.append('%s: %d\\r\\n' % (common.SEC_WEBSOCKET_VERSION_HEADER,\n int(self._options.get('version_header'))))\n else:\n fields.append('%s: %d\\r\\n' % (common.SEC_WEBSOCKET_VERSION_HEADER,\n common.VERSION_HYBI_LATEST))\n\n extensions_to_request = []\n\n if self._options.get('deflate_stream') == 'True':\n extensions_to_request.append(\n common.ExtensionParameter(\n common.DEFLATE_STREAM_EXTENSION))\n\n if self._options.get('deflate_frame') == 'True':\n extensions_to_request.append(\n common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION))\n\n if len(extensions_to_request) != 0:\n fields.append(\n '%s: %s\\r\\n' %\n (common.SEC_WEBSOCKET_EXTENSIONS_HEADER,\n common.format_extensions(extensions_to_request)))\n\n for field in fields:\n self._socket.sendall(field)\n\n self._socket.sendall('\\r\\n')\n\n self._logger.debug('Sent client\\'s opening handshake headers: %r',\n fields)\n self._logger.debug('Start reading Status-Line')\n\n status_line = ''\n while True:\n ch = _receive_bytes(self._socket, 1)\n status_line += ch\n if ch == '\\n':\n break\n\n m = re.match('HTTP/\\\\d+\\.\\\\d+ (\\\\d\\\\d\\\\d) .*\\r\\n', status_line)\n if m is None:\n raise ClientHandshakeError(\n 'Wrong status line format: %r' % status_line)\n status_code = m.group(1)\n if status_code != '101':\n self._logger.debug('Unexpected status code %s with following '\n 'headers: %r', status_code, self._read_fields())\n raise ClientHandshakeError(\n 'Expected HTTP status code 101 but found %r' % status_code)\n\n self._logger.debug('Received valid Status-Line')\n self._logger.debug('Start reading headers until we see an empty line')\n\n fields = self._read_fields()\n\n ch = _receive_bytes(self._socket, 1)\n if ch != '\\n': # 0x0A\n raise ClientHandshakeError(\n 'Expected LF but found %r while reading value %r for header '\n 'name %r' % (ch, \"test\", \"test\"))\n\n self._logger.debug('Received an empty line')\n self._logger.debug('Server\\'s opening handshake headers: %r', fields)\n\n _validate_mandatory_header(\n fields,\n common.UPGRADE_HEADER,\n common.WEBSOCKET_UPGRADE_TYPE,\n False)\n\n _validate_mandatory_header(\n fields,\n common.CONNECTION_HEADER,\n common.UPGRADE_CONNECTION_TYPE,\n False)\n\n accept = _get_mandatory_header(\n fields, common.SEC_WEBSOCKET_ACCEPT_HEADER)\n\n # Validate\n try:\n binary_accept = base64.b64decode(accept)\n except TypeError, e:\n raise HandshakeError(\n 'Illegal value for header %s: %r' %\n (common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))\n\n if len(binary_accept) != 20:\n raise ClientHandshakeError(\n 'Decoded value of %s is not 20-byte long' %\n common.SEC_WEBSOCKET_ACCEPT_HEADER)\n\n self._logger.debug(\n 'Response for challenge : %r (%s)',\n accept, util.hexify(binary_accept))\n\n binary_expected_accept = util.sha1_hash(\n self._key + common.WEBSOCKET_ACCEPT_UUID).digest()\n expected_accept = base64.b64encode(binary_expected_accept)\n\n self._logger.debug(\n 'Expected response for challenge: %r (%s)',\n expected_accept, util.hexify(binary_expected_accept))\n\n if accept != expected_accept:\n raise ClientHandshakeError(\n 'Invalid %s header: %r (expected: %s)' %\n (common.SEC_WEBSOCKET_ACCEPT_HEADER, accept, expected_accept))\n\n deflate_stream_accepted = False\n deflate_frame_accepted = False\n\n extensions_header = fields.get(\n common.SEC_WEBSOCKET_EXTENSIONS_HEADER.lower())\n accepted_extensions = []\n if extensions_header is not None and len(extensions_header) != 0:\n accepted_extensions = common.parse_extensions(extensions_header[0])\n # TODO(bashi): Support the new style perframe compression extension.\n for extension in accepted_extensions:\n extension_name = extension.name()\n if (extension_name == common.DEFLATE_STREAM_EXTENSION and\n len(extension.get_parameter_names()) == 0 and\n self._options.get('deflate_stream')=='True'):\n deflate_stream_accepted = True\n continue\n\n if (extension_name == common.DEFLATE_FRAME_EXTENSION and\n self._options.get('deflate_frame')=='True'):\n deflate_frame_accepted = True\n processor = DeflateFrameExtensionProcessor(extension)\n unused_extension_response = processor.get_extension_response()\n self._options['deflate_frame'] = processor\n continue\n\n raise ClientHandshakeError(\n 'Unexpected extension %r' % extension_name)\n\n if (self._options.get('deflate_stream') == 'True' and not deflate_stream_accepted):\n raise ClientHandshakeError(\n 'Requested %s, but the server rejected it' %\n common.DEFLATE_STREAM_EXTENSION)\n\n if (self._options.get('deflate_frame') == 'True' and not deflate_frame_accepted):\n raise ClientHandshakeError(\n 'Requested %s, but the server rejected it' %\n common.DEFLATE_FRAME_EXTENSION)",
"def __call__(self):\n ssl_mode, external_ca = ssl_utils.get_ssl_mode()\n\n ctxt = {\n 'ssl_mode': ssl_mode,\n }\n\n if ssl_mode == 'off':\n close_port(config('ssl_port'))\n ssl_utils.reconfigure_client_ssl()\n return ctxt\n\n ssl_key = convert_from_base64(config('ssl_key'))\n ssl_cert = convert_from_base64(config('ssl_cert'))\n ssl_ca = convert_from_base64(config('ssl_ca'))\n ssl_port = config('ssl_port')\n\n # If external managed certs then we need all the fields.\n if (ssl_mode in ('on', 'only') and any((ssl_key, ssl_cert)) and\n not all((ssl_key, ssl_cert))):\n log('If ssl_key or ssl_cert are specified both are required.',\n level=ERROR)\n sys.exit(1)\n\n if not external_ca:\n ssl_cert, ssl_key, ssl_ca = ServiceCA.get_service_cert()\n\n ctxt.update(self.enable_ssl(\n ssl_key, ssl_cert, ssl_port, ssl_ca,\n ssl_only=(ssl_mode == \"only\"), ssl_client=False\n ))\n\n ssl_utils.reconfigure_client_ssl(True)\n open_port(ssl_port)\n\n return ctxt",
"def get_ssl_certificate():"
] | [
"0.71356076",
"0.68475354",
"0.6733499",
"0.6656914",
"0.6618129",
"0.62812626",
"0.6249632",
"0.61929065",
"0.6149828",
"0.6121964",
"0.6121661",
"0.6115878",
"0.6056641",
"0.5964749",
"0.5959794",
"0.5909478",
"0.5882002",
"0.5877713",
"0.5869282",
"0.5824944",
"0.58152556",
"0.58095294",
"0.57660854",
"0.57337636",
"0.57318664",
"0.5706049",
"0.57051593",
"0.5689323",
"0.5683694",
"0.56052315"
] | 0.7671109 | 0 |
Combine script files basted on the path of the request. For a request for ``/gist.github.com.js``, tries to load ``gist.github.com.js`` as well as ``github.com.js`` and ``com.js``, in addition to the global ``default.js``. Returns the combined contents of the scripts found. | def build_body(self) -> str:
# Always include default.js
files = [os.path.join(self.directory, "default.js")]
# Find increasingly less specific files based on the request path.
paths = self.path.replace("/", "").split(".")
while paths:
files.append(os.path.join(self.directory, ".".join(paths)))
paths = paths[1:]
# Combine the files found, if they exist.
body = "// dotjs is working! //\n"
for filename in files:
if os.path.exists(filename):
with open(filename) as fp:
body += fp.read() + "\n"
return body | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_script(self,\n script_path,\n domain=\"raw.githubusercontent.com\",\n urlpath=_GITHUB_URLPATH):\n info = self.script_path(script_path)\n _fetch_script(info, script_path, domain, urlpath)\n return info",
"def get_source_files(self):\n return zip(*self.distribution.scripts)[0]",
"def get_default_scripts ( self ):\n return roverlay.util.dictwalk.dictmerge (\n self.iter_default_scripts ( unpack=True ),\n get_value=lambda kv:kv[1]\n )",
"def get_imports() -> str:\n extension = \"\"\n for js_ in JS_FILES.values():\n extension += f'<script src=\"{js_}\"></script>'\n for css in CSS_FILES.values():\n extension += f'<link rel=\"stylesheet\" href=\"{css}\" is=\"custom-style\">'\n\n return extension",
"def js_merge(self):\n if self.merge:\n js = \"\"\n for file_name in self.file_names:\n try:\n js += jsmin(open(file_name, newline=\"\\n\").read())\n except FileNotFoundError:\n print(f\"The file {file_name} could not be found\")\n self.js = jsmin(js)\n\n else:\n for file_name in self.file_names:\n js = jsmin(open(file_name, newline=\"\\n\").read())\n open(file_name, 'w', newline=\"\\n\").write(js)",
"def copy_scripts (self):\n self.mkpath(self.build_dir)\n outfiles = []\n for source, scriptname in self.scripts:\n script = util.convert_path(source)\n # skip empty files\n if not os.path.getsize(script):\n self.warn(\"%s is an empty file (skipping)\" % script)\n continue\n\n if os.name != 'posix' and not scriptname.endswith('.py'):\n # add py extensions on systems, which don't understand\n # shebangs\n scriptname += '.py'\n outfile = os.path.join(self.build_dir, scriptname)\n outfiles.append(outfile)\n\n if not self.force and not dep_util.newer(script, outfile):\n log.debug(\"not copying %s (up-to-date)\", script)\n continue\n\n if not self._adjust_shebang(script, outfile):\n # just copy script, if there was no sheband to adjust\n self.copy_file(script, outfile)",
"def get_all_js_files(self, root):\n res = []\n\n for fname in os.listdir(root):\n mo = re.match(r'(\\w+)\\.js$', fname)\n if mo:\n res.append({\n 'name': mo.group(1),\n 'src': file_contents(os.path.join(root, mo.group()))\n })\n\n return res",
"def getBaseURL():\n return getQualifiedURL(getScriptname())",
"def get_js(directory):\n\n composed = fu.lcompose([\n partial(get_path_files_with_ext, '.js'),\n fu.fmap(fu.file_to_str),\n '\\n'.join,\n ])\n return composed(directory)",
"def get_scripts():\n scripts = []\n if os.path.isdir('bin'):\n scripts = [ fname for fname in glob.glob(os.path.join('bin', '*'))\n if not os.path.basename(fname).endswith('.rst') ]\n return scripts",
"def get_scripts():\n scripts = []\n if os.path.isdir('bin'):\n scripts = [fname for fname in glob.glob(os.path.join('bin', '*'))\n if not os.path.basename(fname).endswith('.rst')]\n return scripts",
"def LoadRenderScripts(scriptRoot):\n # user supplied rendering scripts\n # script names (keys) are used by the Render funciton\n # to select the desired script\n renderingScripts = {\n 'max(v(x,y))' : 'render-max-v.py',\n 'binning bv' : 'render-binning-bv.py',\n 'scatter bv' : 'render-scatter-bv.py',\n 'volume phi' : 'render-volume-phi.py',\n 'particle v' : 'render-particle-v.py',\n }\n\n for key,fileName in renderingScripts.iteritems():\n f = open(os.path.join(scriptRoot,fileName))\n code = f.read()\n f.close()\n renderingScripts[key] = code\n\n return renderingScripts",
"def locate_scripts():\n scripts = []\n bin_dir = os.path.join(os.getcwd(), 'bin')\n if not os.path.isdir(bin_dir):\n return scripts\n for item in os.listdir(bin_dir):\n full_path = os.path.join(bin_dir, item)\n if os.path.isfile(full_path):\n with open(full_path) as f:\n first_line = next(f)\n if first_line.startswith('#!'):\n scripts.append(full_path)\n return scripts",
"def get_source_paths():\r\n script_paths = set()\r\n try:\r\n script_paths.update(filter(None, os.environ.get(PYENV).split(os.pathsep)))\r\n script_paths.update(filter(None, os.environ.get(MELENV).split(os.pathsep)))\r\n except AttributeError:\r\n logger.debug('No custom environ variables set.')\r\n\r\n cwd = os.path.dirname(os.path.abspath(__file__))\r\n for each in os.listdir(cwd):\r\n path = os.path.join(cwd, each)\r\n if not os.path.isdir(path) or each.startswith(EXCLUDE_PATTERNS):\r\n continue\r\n script_paths.add(path)\r\n\r\n return script_paths",
"def fetch_and_import(self,\n script_path,\n domain=\"raw.githubusercontent.com\",\n urlpath=_GITHUB_URLPATH):\n def import_file_directly(path):\n \"\"\"Import a file at :path: directly, bypassing __import__.\"\"\"\n name = \"local_module_\" + re.sub(r\"[\\./]\", \"_\", path)\n return imp.load_source(name, path)\n\n # First try to find the script locally in the\n # current working directory.\n info = self.fetch_script(script_path, domain, urlpath)\n key = \"{0}/{1}/{2}\".format(domain, urlpath, script_path)\n\n try:\n return self._module_cache[key]\n except KeyError:\n # We try to import the file normally first - this is useful\n # for tests where we want to be able to get coverage on those\n # files. If we can't import directly, then we need to\n # fall back to importing the file.\n fs_path = info.fs_path\n if info.in_scripts_dir:\n try:\n name = os.path.relpath(os.path.splitext(fs_path)[0],\n start=self._scripts_dir)\n name = name.replace(os.path.sep, \".\")\n self._module_cache[key] = importlib.import_module(name)\n except ImportError:\n self._module_cache[key] = import_file_directly(fs_path)\n else:\n self._module_cache[key] = import_file_directly(fs_path)\n\n return self._module_cache[key]",
"def third_party_scripts(request):\n return {\n 'ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE':\n settings.ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE\n }",
"def _fetch_script(info,\n script_path,\n domain=\"raw.githubusercontent.com\",\n urlpath=_GITHUB_URLPATH):\n if not os.path.exists(info.fs_path):\n with open_and_force_mkdir(info.fs_path, \"w\") as scr:\n remote = \"%s/%s/%s\" % (domain, urlpath, script_path)\n retrycount = 100\n while retrycount != 0:\n try:\n contents = urlopen(\"http://{0}\".format(remote)).read()\n scr.write(contents.decode())\n scr.truncate()\n retrycount = 0\n except URLError:\n retrycount -= 1",
"def scripts_in_dir(path):\n introwords = [' ', '\\n', '#', 'import', 'from']\n\n listing = sorted(os.listdir(path))\n scripts = []\n for name in listing:\n ###################################################\n # RETRIEVE SCRIPT NAME #\n ###################################################\n longname = path + '/' + name\n if len(name) < 3:\n continue\n if name[0] == '.':\n continue\n if name[:2] == '__':\n continue\n if name[-3:] != '.py':\n continue\n if not os.path.isfile(longname):\n continue\n\n ###################################################\n # RETRIEVE DOCSTRING HEADER #\n ###################################################\n f = open(longname, 'r')\n lines = f.readlines()\n f.close()\n\n header = ''\n for line in lines:\n # skip intro lines\n skip = False\n for word in introwords:\n L = len(word)\n if line[:L] == word:\n skip = True\n break\n if skip:\n continue\n\n # check whether line is start of a docstring\n if line[:3] not in ['\"\"\"', \"'''\"]:\n break\n\n # retrieve header of this string\n header = line[3:]\n\n # delete trailing closing quotes\n if header[-3:] in ['\"\"\"', \"'''\"]:\n header = header[:-3]\n\n scripts.append([name, header])\n\n return scripts",
"def get_vendor_bundle_path() -> str:\n vendor_bundle_directory = os.path.join(os.path.dirname(__file__), \"dist\", \"js\")\n file_list_with_full_path = []\n for f in os.listdir(vendor_bundle_directory):\n file_path = os.path.join(vendor_bundle_directory, f)\n if os.path.isfile(file_path):\n if os.path.splitext(file_path)[-1].endswith(\"js\"):\n if os.path.splitext(f)[0].startswith(\"chunk-vendors\"):\n file_list_with_full_path.append(os.path.abspath(file_path))\n return file_list_with_full_path[0]",
"def get_default_javascript():\n return [\"_static/require.js\"]",
"def get_js_files(directories):\n # Initialize key variables\n result = []\n # iterate through files in directories\n for d in directories:\n for root, _, files in os.walk(d, topdown=False):\n for name in files:\n # append files with .js extension\n if name.endswith('.js'):\n result.append(os.path.join(root, name))\n\n return result",
"def _rollup_static_files(self, instructions, extension, minifier=None,\n wrap_source=None):\n fix_css_urls = True if 'css' in extension else False\n is_lessjs = self._instructions_have_lessjs(instructions)\n\n def nop_minifier(arg):\n \"\"\"\n A minifier that does nothing, but is callable\n \"\"\"\n return arg\n\n # Figure out a name\n files = [i['static'] for i in instructions]\n\n basename = '%s.%s' % (self._make_filename(files), extension,)\n filename = os.path.join(self.cache_root, basename)\n location = urljoin(self.cache_url, basename)\n retval = {'location': location}\n\n if is_lessjs:\n retval['process'] = 'lessjs'\n\n if not files:\n return None\n\n lastmod = max([self._get_media_stat(i).st_mtime for i in files])\n\n if os.path.isfile(filename) and \\\n filename in self.__rollup_last_modifieds and \\\n self.__rollup_last_modifieds[filename] == lastmod:\n # Nothing has changed since we last saw this instruction set\n return retval\n self.__rollup_last_modifieds[filename] = lastmod\n\n if not wrap_source:\n wrap_source = ('', '',)\n\n if not minifier or is_lessjs:\n \"\"\"\n If minifier is not defined we use a no-operate version\n\n If lessjs is used, we can't alter the original file because it will\n throw the parser off. So we turn off the minification\n \"\"\"\n minifier = nop_minifier\n\n if not os.path.isfile(filename) or settings.DEBUG:\n f = open(filename, 'w')\n source = minifier(self._concat_files(instructions, fix_css_urls))\n f.write('%s\\n%s\\n%s' % (wrap_source[0], source, wrap_source[1],))\n f.close()\n\n return retval",
"def mergeScripts(\n netParams,\n tx,\n idx,\n pkScript,\n scriptClass,\n addresses,\n nRequired,\n sigScript,\n prevScript,\n):\n # TODO(oga) the scripthash and multisig paths here are overly\n # inefficient in that they will recompute already known data.\n # some internal refactoring could probably make this avoid needless\n # extra calculations.\n scriptVersion = 0\n if scriptClass == ScriptHashTy:\n # Nothing to merge if either the new or previous signature\n # scripts are empty or fail to parse.\n\n if (\n len(sigScript) == 0\n or checkScriptParses(scriptVersion, sigScript) is not None\n ):\n return prevScript\n if (\n not prevScript\n or len(prevScript) == 0\n or checkScriptParses(scriptVersion, prevScript) is not None\n ):\n return sigScript\n\n # Remove the last push in the script and then recurse.\n # this could be a lot less inefficient.\n #\n # Assume that final script is the correct one since it was just\n # made and it is a pay-to-script-hash.\n script = finalOpcodeData(scriptVersion, sigScript)\n\n # We already know this information somewhere up the stack,\n # therefore the error is ignored.\n scriptClass, addresses, nrequired = extractPkScriptAddrs(\n DefaultScriptVersion, script, netParams\n )\n\n # Merge\n mergedScript = mergeScripts(\n netParams,\n tx,\n idx,\n script,\n scriptClass,\n addresses,\n nrequired,\n sigScript,\n prevScript,\n )\n\n # Reappend the script and return the result.\n finalScript = ByteArray(b\"\", length=0)\n finalScript += mergedScript\n finalScript += addData(script)\n return finalScript\n elif scriptClass == MultiSigTy:\n return mergeMultiSig(\n tx, idx, addresses, nRequired, pkScript, sigScript, prevScript\n )\n else:\n # It doesn't actually make sense to merge anything other than multisig\n # and scripthash (because it could contain multisig). Everything else\n # has either zero signatures, can't be spent, or has a single signature\n # which is either present or not. The other two cases are handled\n # above. In the conflicting case here we just assume the longest is\n # correct (this matches behaviour of the reference implementation).\n if prevScript is None or len(sigScript) > len(prevScript):\n return sigScript\n return prevScript",
"def list_scripts(self):\n glob_exp = self.script_dir + \"/*.sh\"\n return [re.sub(r'.*/(.*)\\.sh', r'\\1', x) for x in glob(glob_exp)]",
"def get_xmodule_urls():\r\n if settings.DEBUG:\r\n paths = [path.replace(\".coffee\", \".js\") for path in\r\n settings.PIPELINE_JS['module-js']['source_filenames']]\r\n else:\r\n paths = [settings.PIPELINE_JS['module-js']['output_filename']]\r\n return [staticfiles_storage.url(path) for path in paths]",
"def get_static_regexps():\n handlers = modules_util.module_yaml('default')['handlers']\n retval = set()\n\n for handler in handlers:\n if handler.GetHandlerType() == 'static_dir':\n retval.add('^' + handler.url + '/')\n elif handler.GetHandlerType() == 'static_files':\n retval.add('^' + handler.url + '$')\n\n return sorted(retval)",
"def sources(self):\n res = set()\n for elem in chain(settings.PIPELINE_CSS.values(), settings.PIPELINE_JS.values()):\n # TODO: add support for glob\n res.update(elem.get('source_filenames', []))\n return tuple(res)",
"def _get_script_paths_from_scripts_node(self) -> typing.Generator:\r\n for script_node in filter(is_script_node, self.scripts_node):\r\n self.try_fix_namespace_path(script_node)\r\n\r\n script_path: str = script_node.text\r\n\r\n if script_path == os.pardir or script_path == os.curdir:\r\n PapyrusProject.log.error(f'Script path at line {script_node.sourceline} in project file is not a file path')\r\n sys.exit(1)\r\n\r\n # handle . and .. in path\r\n if startswith(script_path, os.pardir):\r\n script_path = script_path.replace(os.pardir, os.path.normpath(os.path.join(self.project_path, os.pardir)), 1)\r\n elif startswith(script_path, os.curdir):\r\n script_path = script_path.replace(os.curdir, self.project_path, 1)\r\n\r\n if os.path.isdir(script_path):\r\n PapyrusProject.log.error(f'Script path at line {script_node.sourceline} in project file is not a file path')\r\n sys.exit(1)\r\n\r\n yield os.path.normpath(script_path)",
"def process_js():\n source_paths = [\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/admin.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/app.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/footnotes.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/table_of_contents.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/text_resize.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/toastr.js'),\n ]\n dest_path = os.path.join(settings.BASE_DIR, 'static/CMESH/js/app.js')\n min_path = os.path.join(settings.BASE_DIR, 'static/CMESH/js/app.min.js')\n\n process_js_files(source_paths, dest_path, min_path)",
"def do_js(js_input_dir, js_output_dir):\n\n remove_extention('.js', js_output_dir)\n js_str = get_js(js_input_dir)\n js_name = get_cachebusting_name(js_str) + '.js'\n fu.str_to_file(js_output_dir + js_name, js_str)\n return js_name"
] | [
"0.54122865",
"0.53973025",
"0.5321434",
"0.53174424",
"0.5260792",
"0.5172825",
"0.5163565",
"0.5132076",
"0.5092393",
"0.50862974",
"0.5047552",
"0.50348",
"0.4977795",
"0.49442473",
"0.49381855",
"0.4916476",
"0.49147418",
"0.489088",
"0.4888798",
"0.4885273",
"0.48759082",
"0.4867766",
"0.4859941",
"0.48446754",
"0.47945684",
"0.47431508",
"0.4729149",
"0.472025",
"0.46664074",
"0.46552616"
] | 0.5625085 | 0 |
Inspect the Origin header to see if it matches the path. | def detect_origin(self) -> typing.Optional[str]:
origin: typing.Optional[str] = self.headers.get("Origin")
if not origin or "://" not in origin:
return None
_, origin_host = origin.split("://", 1)
if ":" in origin_host:
origin_host, _ = origin_host.split(":")
search = self.path.replace("/", "")
if search.endswith(".js"):
search = search[:-3]
if origin and self.path and origin_host == search:
return origin
else:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_origin(self, origin):\n return True",
"def check_origin(self, origin):\n return True",
"def check_origin(self, origin):\n return True",
"def check_origin(self, origin):\n return True",
"def check_origin(self, origin):\n # import re\n # bool(re.match(r'^.*?\\.mydomain\\.com', origin))\n # allowed = super.check_origin(origin)\n if self.allow_origin == '*':\n return True\n\n host = self.request.headers.get(\"Host\")\n if origin is None:\n origin = self.request.headers.get(\"Origin\")\n\n # If no header is provided, assume we can't verify origin\n if origin is None:\n LOG.warning(\"user {0} Missing Origin header, rejecting WebSocket connection.\".format(self.client_id))\n return False\n if host is None:\n LOG.warning(\"user {0} Missing Host header, rejecting WebSocket connection.\".format(self.client_id))\n return False\n\n origin = origin.lower()\n origin_host = urlparse(origin).netloc\n\n # OK if origin matches host\n if origin_host == host:\n return True\n\n # Check CORS headers\n if self.allow_origin:\n allow = self.allow_origin == origin\n # elif self.allow_origin_pat:\n # allow = bool(self.allow_origin_pat.match(origin))\n else:\n # No CORS headers deny the request\n allow = False\n if not allow:\n LOG.warning(\"user {0} Blocking Cross Origin WebSocket Attempt. Origin: %s, Host: %s\",\n self.client_id, origin, host)\n return allow",
"def _check_cors_headers(self, res):\r\n self.assertEqual(res.headers['access-control-allow-origin'], '*')\r\n self.assertEqual(\r\n res.headers['access-control-allow-headers'], 'X-Requested-With')",
"def _check_cors_headers(self, res):\r\n self.assertEqual(res.headers['access-control-allow-origin'], '*')\r\n self.assertEqual(\r\n res.headers['access-control-allow-headers'], 'X-Requested-With')",
"def _has_cors_header(self):\n return \"Access-Control-Request-Method\" in self.headers or \"Access-Control-Request-Headers\" in self.headers or \"Origin\" in self.headers",
"def validate_origin(self, parsed_origin):\n return any(\n pattern == \"*\" or self.match_allowed_origin(parsed_origin, pattern)\n for pattern in self.allowed_origins\n )",
"def valid_origin(self, parsed_origin):\n # None is not allowed unless all hosts are allowed\n if parsed_origin is None and \"*\" not in self.allowed_origins:\n return False\n return self.validate_origin(parsed_origin)",
"def _get_origin_path(self):\n return self.__origin_path",
"def match_allowed_origin(self, parsed_origin, pattern):\n if parsed_origin is None:\n return False\n\n # Get ResultParse object\n parsed_pattern = urlparse(pattern.lower())\n if parsed_origin.hostname is None:\n return False\n if not parsed_pattern.scheme:\n pattern_hostname = urlparse(\"//\" + pattern).hostname or pattern\n return is_same_domain(parsed_origin.hostname, pattern_hostname)\n # Get origin.port or default ports for origin or None\n origin_port = self.get_origin_port(parsed_origin)\n # Get pattern.port or default ports for pattern or None\n pattern_port = self.get_origin_port(parsed_pattern)\n # Compares hostname, scheme, ports of pattern and origin\n if (\n parsed_pattern.scheme == parsed_origin.scheme\n and origin_port == pattern_port\n and is_same_domain(parsed_origin.hostname, parsed_pattern.hostname)\n ):\n return True\n return False",
"def is_node_origin(self, node):\n return self.node_attribute(key=node, name=\"type\") == \"_origin\"",
"def http_header_access_control_allow_origin():\n return 'Access-Control-Allow-Origin'",
"def check_origin(self):\n if self.relativize:\n name = dns.name.empty\n else:\n name = self.origin\n if self.get_rdataset(name, dns.rdatatype.SOA) is None:\n raise NoSOA\n if self.get_rdataset(name, dns.rdatatype.NS) is None:\n raise NoNS",
"def get_access_control_allow_origin(self):\n return self.access_control_allow_origin",
"def access_control_allow_origin(self) -> str:\n if self._access_control_allow_origin is None:\n access_control_allow_origin = self._get_env(\"ACCESS_CONTROL_ALLOW_ORIGIN\")\n self._access_control_allow_origin = access_control_allow_origin\n\n return self._access_control_allow_origin",
"def check_header(self, name, value):\r\n if value in self.headers.get(name, ''):\r\n return True\r\n return False",
"def check_header(self, name, value):\r\n if value in self.headers.get(name, ''):\r\n return True\r\n return False",
"def is_origin_remote():\n return sync_mode in (SyncMode.RECEIVER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)",
"def contains_origin(self):\n return self.contains(self.ambient_space().zero())",
"def GetOrigin(self):\n ...",
"def GetOrigin(self):\n ...",
"def GetOrigin(self):\n ...",
"def _is_request_in_include_path(self, request):\n if self._include_paths:\n for path in self._include_paths:\n if request.path.startswith(path):\n return True\n return False\n else:\n return True",
"def is_header_content(response, key, value):\n try:\n if response.headers[key].lower() == value:\n return True\n else:\n return False\n except:\n return False",
"def allow_origins(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"allow_origins\")",
"def same_origin(url1, url2):\n p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)\n return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)",
"def is_remote_access_allowed(self, path: str):\n return self.public_path_marker.test(path) or self.is_public(path) and not self.is_private(path)",
"def _get_origin(self, origin):\n origins = {\n 'Directory': self._directory_origin,\n 'HTTP Client': self._http_client_origin,\n 'JDBC Multitable Consumer': self._jdbc_multitable_origin,\n 'JDBC Query Consumer': self._jdbc_query_origin,\n 'Kafka Multitopic Consumer': self._kafka_multitopic_origin,\n 'S3': self._s3_origin,\n 'SFTP Client': self._sftp_client_origin\n }\n stage = origins.get(origin)\n return stage()"
] | [
"0.6808445",
"0.6808445",
"0.6808445",
"0.6808445",
"0.64415884",
"0.6320489",
"0.6320489",
"0.6283557",
"0.62806875",
"0.6209452",
"0.60481757",
"0.60423523",
"0.5956921",
"0.5909411",
"0.5816535",
"0.5712817",
"0.5611696",
"0.5599866",
"0.5599866",
"0.5544079",
"0.5511452",
"0.5438937",
"0.5438937",
"0.5438937",
"0.5428158",
"0.5398111",
"0.5355251",
"0.53464055",
"0.53221613",
"0.53131574"
] | 0.75541246 | 0 |
Return angles, axis pair that corresponds to rotation matrix m. The case where ``m`` is the identity matrix corresponds to a singularity where any rotation axis is valid. In that case, ``Vector([1, 0, 0])``, is returned. | def m2rotaxis(m):
eps = 1e-5
# Check for singularities a la http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToAngle/
if (
abs(m[0, 1] - m[1, 0]) < eps
and abs(m[0, 2] - m[2, 0]) < eps
and abs(m[1, 2] - m[2, 1]) < eps
):
# Singularity encountered. Check if its 0 or 180 deg
if (
abs(m[0, 1] + m[1, 0]) < eps
and abs(m[0, 2] + m[2, 0]) < eps
and abs(m[1, 2] + m[2, 1]) < eps
and abs(m[0, 0] + m[1, 1] + m[2, 2] - 3) < eps
):
angle = 0
else:
angle = numpy.pi
else:
# Angle always between 0 and pi
# Sense of rotation is defined by axis orientation
t = 0.5 * (numpy.trace(m) - 1)
t = max(-1, t)
t = min(1, t)
angle = numpy.arccos(t)
if angle < 1e-15:
# Angle is 0
return 0.0, Vector(1, 0, 0)
elif angle < numpy.pi:
# Angle is smaller than pi
x = m[2, 1] - m[1, 2]
y = m[0, 2] - m[2, 0]
z = m[1, 0] - m[0, 1]
axis = Vector(x, y, z)
axis.normalize()
return angle, axis
else:
# Angle is pi - special case!
m00 = m[0, 0]
m11 = m[1, 1]
m22 = m[2, 2]
if m00 > m11 and m00 > m22:
x = numpy.sqrt(m00 - m11 - m22 + 0.5)
y = m[0, 1] / (2 * x)
z = m[0, 2] / (2 * x)
elif m11 > m00 and m11 > m22:
y = numpy.sqrt(m11 - m00 - m22 + 0.5)
x = m[0, 1] / (2 * y)
z = m[1, 2] / (2 * y)
else:
z = numpy.sqrt(m22 - m00 - m11 + 0.5)
x = m[0, 2] / (2 * z)
y = m[1, 2] / (2 * z)
axis = Vector(x, y, z)
axis.normalize()
return numpy.pi, axis | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _rotate_(self, x: np.array, m: np.array) -> (np.array, np.array):\n # get a random angle\n angle = np.random.randint(0, self.rotate)\n # get a random sign for the angle\n sign = np.random.randint(0, 2)\n x = rotate(x, -sign * angle, reshape=False)\n m = rotate(m, -sign * angle, axes=(0, 1),\n mode='nearest',\n reshape=False)\n return x, m",
"def momentToMatrix(m):\n angle = vectorops.norm(m)\n axis = vectorops.div(m,angle)\n return so3.rotation(axis,angle)",
"def turnangles(M):\n Mt = M.transpose()\n x0, y0 = Mt[0], Mt[1] # arrays of all x's & y's\n x1, y1 = Mt[0][ 0], Mt[1][ 0] # start point\n x2, y2 = Mt[0][-1], Mt[1][-1] # end point\n\n vec1_x, vec1_y = x1 - x0, y1 - y0 \n vec2_x, vec2_y = x2 - x0, y2 - y0 \n vec1dot2 = vec1_x * vec2_x + vec1_y * vec2_y\n vec1_len = np.sqrt(vec1_x * vec1_x + vec1_y * vec1_y)\n vec2_len = np.sqrt(vec2_x * vec2_x + vec2_y * vec2_y)\n\n cos = vec1dot2 / np.maximum(vec1_len * vec2_len, EPS)\n cos = np.minimum(np.maximum(cos, -1.), 1.) \n turn_angles = np.pi - np.arccos(cos) \n # TODO convert [-360,360] -> [-180,180]\n # turn_angles = np.mod(turn_angles + 3*np.pi, 2.*np.pi) - np.pi \n # -2pi->0, -pi->-pi 0->0, pi->-pi 2pi->0 \n turn_angles[0], turn_angles[-1] = 0., 0. # endpoints\n return np.rad2deg(turn_angles)",
"def rotMatToAxisAngles(rotmat):\n\n yrot = np.sqrt(rotmat[0, 0] ** 2 + rotmat[1, 0] ** 2)\n\n if np.isclose(yrot, 0):\n xrot = np.arctan2(-rotmat[1, 2], rotmat[1, 1])\n yrot = np.arctan2(-rotmat[2, 0], yrot)\n zrot = 0\n else:\n xrot = np.arctan2( rotmat[2, 1], rotmat[2, 2])\n yrot = np.arctan2(-rotmat[2, 0], yrot)\n zrot = np.arctan2( rotmat[1, 0], rotmat[0, 0])\n\n return [xrot, yrot, zrot]",
"def get_rot_from_mat(m_mat):\n trans_matrix = oMa.MTransformationMatrix(m_mat)\n rot = trans_matrix.rotation()\n\n return rot",
"def orthogonalize_matrix(m):\n U, __, VT = np.linalg.svd(np.matrix(m))\n return np.dot(U, VT)",
"def orthogonal_matrix_from_angles(m, angles):\n expected_l = m * (m - 1) // 2\n angles = list(angles)\n l = len(angles)\n if not l == expected_l:\n raise ValueError(\"need {0} angles to compute orthogonal \"\n \"({1}, {1})-matrix\".format(expected_l, m))\n\n matrix = 1.0\n for n in range(1, m):\n small_t = numpy.zeros((n + 1, n + 1))\n small_s = numpy.zeros((n + 1, n + 1))\n\n small_t[:n, :n] = matrix\n small_t[n, n] = 1.0\n\n matrix = numpy.zeros((n + 1, n + 1))\n small_s[0, n] = -1.0\n for k in range(n):\n gamma = angles.pop()\n cg, sg = numpy.cos(gamma), numpy.sin(gamma)\n matrix[k] = small_t[k] * cg - small_s[k] * sg\n small_s[k + 1] = small_t[k] * sg + small_s[k] * cg\n\n matrix[n] = -small_s[n]\n\n return matrix",
"def rotation_axis_matrix(phi: numbers.Real, axis: int):\n\n if axis == 0:\n return [[1, 0, 0, 0],\n [0, cos(phi), sin(phi), 0],\n [0, sin(phi), cos(phi), 0],\n [0, 0, 0, 1]]\n elif axis == 1:\n return [[cos(phi), 0, sin(phi), 0],\n [0, 1, 0, 0],\n [-sin(phi), 0, cos(phi), 0],\n [0, 0, 0, 1]]\n elif axis == 2:\n return [[cos(phi), -sin(phi), 0, 0],\n [sin(phi), cos(phi), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]]\n else:\n raise ValueError(\"only 3d space coordinates as homogeneous vectors are supported\")",
"def test_angle_between_vectors():\n v = np.array([1, 0, 0])\n a = np.array([0, 1, 0, np.pi / 2])\n R = pr.matrix_from_axis_angle(a)\n vR = np.dot(R, v)\n assert_almost_equal(pr.angle_between_vectors(vR, v), a[-1])\n v = np.array([0, 1, 0])\n a = np.array([1, 0, 0, np.pi / 2])\n R = pr.matrix_from_axis_angle(a)\n vR = np.dot(R, v)\n assert_almost_equal(pr.angle_between_vectors(vR, v), a[-1])\n v = np.array([0, 0, 1])\n a = np.array([1, 0, 0, np.pi / 2])\n R = pr.matrix_from_axis_angle(a)\n vR = np.dot(R, v)\n assert_almost_equal(pr.angle_between_vectors(vR, v), a[-1])",
"def _rotation_from_gradient(self,m):\n\t\ttheta = -np.arctan(m)\n\t\tself.current_theta = theta\n\t\treturn self._rotation_from_angle(theta)",
"def matrix(self):\n return self._rotation",
"def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle",
"def to_axisangle(self) -> Tuple[np.ndarray, float]:\n angle = np.arccos((self.A.trace()-1)/2)\n axis = np.zeros(3)\n if angle!=0:\n axis = np.array([self.A[2, 1]-self.A[1, 2], self.A[0, 2]-self.A[2, 0], self.A[1, 0]-self.A[0, 1]])/(2*np.sin(angle))\n return axis, angle",
"def get_rot(m_obj):\n mfn_obj = oMa.MFnTransform(m_obj)\n\n rot = mfn_obj.rotation()\n\n return rot",
"def rotationMatrixToEulerAngles(R) :\n sy = np.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n singular = sy < 1e-6\n\n if not singular :\n x = np.arctan2(R[2,1] , R[2,2])\n y = np.arctan2(-R[2,0], sy)\n z = np.arctan2(R[1,0], R[0,0])\n else :\n x = np.arctan2(-R[1,2], R[1,1])\n y = np.arctan2(-R[2,0], sy)\n z = 0\n\n return np.array([x, y, z])",
"def rotaxis2m(theta, vector):\n vector = vector.normalized()\n c = numpy.cos(theta)\n s = numpy.sin(theta)\n t = 1 - c\n x, y, z = vector.get_array()\n rot = numpy.zeros((3, 3))\n # 1st row\n rot[0, 0] = t * x * x + c\n rot[0, 1] = t * x * y - s * z\n rot[0, 2] = t * x * z + s * y\n # 2nd row\n rot[1, 0] = t * x * y + s * z\n rot[1, 1] = t * y * y + c\n rot[1, 2] = t * y * z - s * x\n # 3rd row\n rot[2, 0] = t * x * z - s * y\n rot[2, 1] = t * y * z + s * x\n rot[2, 2] = t * z * z + c\n return rot",
"def angles_from_orthogonal_matrix(matrix):\n n, m = matrix.shape\n if not ((n == m) and numpy.allclose(linalg.det(matrix), 1.0)):\n raise ValueError(\"'matrix' not orthogonal\")\n l = n * (n - 1) // 2\n angles = numpy.zeros(l)\n\n def error(angles):\n eps = matrix - orthogonal_matrix_from_angles(n, angles)\n err_sq = numpy.sum(eps * eps)\n return err_sq\n\n # Find the angles that minimize the squared error\n bounds = [(0.0, 2 * numpy.pi)] * l\n angles = optimize.minimize(error, angles, bounds=bounds).x\n\n return angles",
"def homog_rot_mtx(angle_rads: float, axis: str) -> numpy.array:\n cosang = numpy.cos(angle_rads)\n sinang = numpy.sin(angle_rads)\n\n if \"z\" == axis:\n return numpy.array(\n (\n (cosang, -sinang, 0, 0),\n (sinang, cosang, 0, 0),\n (0, 0, 1, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )\n elif \"y\" == axis:\n return numpy.array(\n (\n (cosang, 0, sinang, 0),\n (0, 1, 0, 0),\n (-sinang, 0, cosang, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )\n else:\n return numpy.array(\n (\n (1, 0, 0, 0),\n (0, cosang, -sinang, 0),\n (0, sinang, cosang, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )",
"def get_rotation(self) -> np.array:\n axis = self.get_arms()[1]\n force = [self.d_x, self.d_y] # \"Force applied on the arm\"\n o_m = [self.target.x_obj - axis.x_obj, self.target.y_obj - axis.y_obj]\n torque = o_m[0]*force[1] - o_m[1] * force[0] # OM vectorial F\n if torque == 1: # Anti clockwise rotation\n rotation = np.array([[0, -1], [1, 0]])\n if torque == -1: # Clockwise rotation\n rotation = np.array([[0, 1], [-1, 0]])\n if torque == 0: # No rotation\n rotation = np.array([[0, 0], [0, 0]])\n return rotation",
"def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]",
"def py_rotation_from_matrix(matrix):\n return np.float32(quat2angle_axis(mat2quat(matrix)))",
"def axis2rotmat(axis):\n return quat2rotmat(axis2quat(axis))",
"def _r90(self,m):\n\n return np.rot90(m,1)",
"def get_angle_and_body_vector(moments):\n body_cov = np.array( [ [moments['mu20'], moments['mu11']], [moments['mu11'], moments['mu02'] ]])\n eig_vals, eig_vecs = np.linalg.eigh(body_cov)\n max_eig_ind = np.argmax(eig_vals**2)\n max_eig_vec = eig_vecs[:,max_eig_ind]\n angle = np.arctan2(max_eig_vec[1], max_eig_vec[0])\n return angle, max_eig_vec",
"def get_angle_and_body_vector(moments):\n body_cov = np.array( [ [moments['mu20'], moments['mu11']], [moments['mu11'], moments['mu02'] ]])\n eig_vals, eig_vecs = np.linalg.eigh(body_cov)\n max_eig_ind = np.argmax(eig_vals**2)\n max_eig_vec = eig_vecs[:,max_eig_ind]\n angle = np.arctan2(max_eig_vec[1], max_eig_vec[0])\n return angle, max_eig_vec",
"def _rotation_matrix_to_euler_angles(self, R):\n assert (self._is_rotation_matrix(R))\n\n sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n singular = sy < 1e-6\n\n if not singular:\n x = math.atan2(R[2, 1], R[2, 2])\n y = math.atan2(-R[2, 0], sy)\n z = math.atan2(R[1, 0], R[0, 0])\n else:\n x = math.atan2(-R[1, 2], R[1, 1])\n y = math.atan2(-R[2, 0], sy)\n z = 0\n\n return np.array([x, y, z])",
"def _rotation_matrix_to_euler_angles(R):\n sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n\n singular = sy < 1e-6\n\n if not singular:\n x = math.atan2(R[2, 1], R[2, 2])\n y = math.atan2(-R[2, 0], sy)\n z = math.atan2(R[1, 0], R[0, 0])\n else:\n x = math.atan2(-R[1, 2], R[1, 1])\n y = math.atan2(-R[2, 0], sy)\n z = 0\n\n return np.array([x, y, z])",
"def _r180(self,m):\n return np.rot90(m,2)",
"def rotation_matrix(phi):\n return np.asmatrix([\n [np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]\n ])",
"def test_conversions_matrix_axis_angle():\n R = np.eye(3)\n a = pr.axis_angle_from_matrix(R)\n pr.assert_axis_angle_equal(a, np.array([1, 0, 0, 0]))\n\n R = pr.active_matrix_from_intrinsic_euler_xyz(\n np.array([-np.pi, -np.pi, 0.0]))\n a = pr.axis_angle_from_matrix(R)\n pr.assert_axis_angle_equal(a, np.array([0, 0, 1, np.pi]))\n\n R = pr.active_matrix_from_intrinsic_euler_xyz(\n np.array([-np.pi, 0.0, -np.pi]))\n a = pr.axis_angle_from_matrix(R)\n pr.assert_axis_angle_equal(a, np.array([0, 1, 0, np.pi]))\n\n R = pr.active_matrix_from_intrinsic_euler_xyz(\n np.array([0.0, -np.pi, -np.pi]))\n a = pr.axis_angle_from_matrix(R)\n pr.assert_axis_angle_equal(a, np.array([1, 0, 0, np.pi]))\n\n a = np.array([np.sqrt(0.5), np.sqrt(0.5), 0.0, np.pi])\n R = pr.matrix_from_axis_angle(a)\n a2 = pr.axis_angle_from_matrix(R)\n pr.assert_axis_angle_equal(a2, a)\n\n random_state = np.random.RandomState(0)\n for _ in range(50):\n a = pr.random_axis_angle(random_state)\n R = pr.matrix_from_axis_angle(a)\n pr.assert_rotation_matrix(R)\n\n a2 = pr.axis_angle_from_matrix(R)\n pr.assert_axis_angle_equal(a, a2)\n\n R2 = pr.matrix_from_axis_angle(a2)\n assert_array_almost_equal(R, R2)\n pr.assert_rotation_matrix(R2)"
] | [
"0.663678",
"0.62896967",
"0.6157976",
"0.6006294",
"0.59815663",
"0.5924986",
"0.5909249",
"0.58380806",
"0.58301306",
"0.5824258",
"0.57651687",
"0.57651037",
"0.5755777",
"0.57331914",
"0.57147455",
"0.5714623",
"0.5711759",
"0.57107013",
"0.5710682",
"0.5695341",
"0.5695",
"0.56422335",
"0.5637957",
"0.56274974",
"0.56274974",
"0.5622773",
"0.5620622",
"0.5583154",
"0.55766547",
"0.5563162"
] | 0.8007706 | 0 |
Vector to axis method. Return the vector between a point and the closest point on a line (ie. the perpendicular projection of the point on the line). | def vector_to_axis(line, point):
line = line.normalized()
np = point.norm()
angle = line.angle(point)
return point - line ** (np * numpy.cos(angle)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_direction_vector(line):\n pt1, pt2 = line\n pt1 = np.array(pt1).reshape(2,)\n pt2 = np.array(pt2).reshape(2,)\n direct = pt2 - pt1\n direct_norm = normalize(direct)\n return direct_norm",
"def project_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n\n return add_vectors(a, c)",
"def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)",
"def perpendicular_axis(vec):\n axis = vec.rotate(-math.pi / 2) # rotate vector -90 degrees\n axis = axis.norm() # turn axis vector into unit vector\n return axis",
"def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b",
"def find_perpendicular_vector(vt):\n x, y = vt\n return np.array([y, -x])",
"def projectPoint(self, point):\n vector = self.normal_vector\n angle = vector.angle\n line = Line(point, angle, correct=False)\n projection = self.crossLine(line)\n return projection",
"def dist_to_line2d(line, point):\n\tx1,y1 = line[0]\n\tx2,y2 = line[1]\n\tx3,y3 = point\n\t\n\t# where on line the perpendicular is\n\tu = ( ((x3-x1)*(x2-x1) + (y3-y1)*(y2-y1))\n\t\t\t/ (math.pow(x1-x2,2) + math.pow(y1-y2,2)) )\n\t\n\t# intersection point\n\tx = x1 + u*(x2-x1)\n\ty = y1 + u*(y2-y1)\n\t\n\tdist = math.sqrt(math.pow(x-x3,2)+math.pow(y-y3,2))\n\t\n\treturn dist",
"def _dist_point2line(self, point: ndarray,\n line: Tuple[ndarray, ndarray]) -> ndarray:\n\n assert isinstance(line, tuple)\n point1, point2 = line\n d = abs(np.cross(point2 - point1, point - point1)) / (\n norm(point2 - point1) + 1e-8)\n return d",
"def perpendicularTo(self, vector):\n perpendicular = self.subtractVector(self.parallelTo(vector))\n return perpendicular",
"def intersect_line(self, line: Line, **kwargs) -> Point:\n if self.normal.is_perpendicular(line.direction, **kwargs):\n raise ValueError(\"The line and plane must not be parallel.\")\n\n vector_plane_line = Vector.from_points(self.point, line.point)\n\n num = -self.normal.dot(vector_plane_line)\n denom = self.normal.dot(line.direction)\n\n # Vector along the line to the intersection point.\n vector_line_scaled = num / denom * line.direction\n\n return line.point + vector_line_scaled",
"def pick_point_not_on_line(line: Line):\n return line.point1 + line.get_perpendicular_at_point(line.point1).get_direction_vector()",
"def project_point_to_line(point, line_start, line_end):\n line_magnitude = line_start.distance(line_end)\n \n u = ((point.x - line_start.x) * (line_end.x - line_start.x) +\n (point.y - line_start.y) * (line_end.y - line_start.y)) \\\n / (line_magnitude ** 2)\n\n # closest point does not fall within the line segment, \n # take the shorter distance to an endpoint\n if u < 0.00001 or u > 1:\n ix = point.distance(line_start)\n iy = point.distance(line_end)\n if ix > iy:\n return line_end\n else:\n return line_start\n else:\n ix = line_start.x + u * (line_end.x - line_start.x)\n iy = line_start.y + u * (line_end.y - line_start.y)\n return Point([ix, iy])",
"def project_line(self, line: Line, **kwargs: float) -> Line:\n if self.normal.is_parallel(line.vector, **kwargs):\n raise ValueError(\"The line and plane must not be perpendicular.\")\n\n point_projected = self.project_point(line.point)\n\n if self.normal.is_perpendicular(line.vector, **kwargs):\n return Line(point_projected, line.vector)\n\n vector_projected = self.project_vector(line.vector)\n\n return Line(point_projected, vector_projected)",
"def dist_to_line(self, line, pt):\n return abs(line[0]*pt.x + line[1]*pt.y + line[2])/math.sqrt(line[0]**2 + line[1]**2)",
"def get_projection_of_pt_on_line(point, line_point1, line_point2):\n projection = Point(-1, -1)\n projection.x = point.x\n if (line_point2.x - line_point1.x) != 0:\n projection.y = (projection.x - line_point1.x) * (line_point2.y - line_point1.y) / \\\n (line_point2.x - line_point1.x) + line_point1.y\n else:\n projection.y = (projection.x - line_point1.x) * (line_point2.y - line_point1.y) / 1 + line_point1.y\n return projection",
"def project_point(self, point: array_like) -> Point:\n # Vector from the point in space to the point on the plane.\n vector_to_plane = Vector.from_points(point, self.point)\n\n # Perpendicular vector from the point in space to the plane.\n vector_projected = self.normal.project_vector(vector_to_plane)\n\n return Point(point) + vector_projected",
"def get_perpendicular2d(vector):\n if vector[1] == 0:\n return np.asarray([0.,1.])\n v2_0 = 1.0\n v2_1 = -(vector[0]/vector[1])\n v2 = np.asarray([v2_0, v2_1])\n return v2 / np.linalg.norm(v2)",
"def perpendicular(self):\n return tuple.__new__(Vec2, (-self[1], self[0]))",
"def project_vector(self, vector: array_like) -> Vector:\n point_in_space = self.point + vector\n point_on_plane = self.project_point(point_in_space)\n\n return Vector.from_points(self.point, point_on_plane)",
"def _to_xy_coordinates(unit_vector_angle, point):\n angle_orthogonal = unit_vector_angle + pi / 2\n return point[0] * cos(unit_vector_angle) + point[1] * cos(angle_orthogonal), \\\n point[0] * sin(unit_vector_angle) + point[1] * sin(angle_orthogonal)",
"def DistPoint2Line(point,line_point1, line_point2=np.array([0,0,0])):\n return np.linalg.norm(np.cross((point-line_point2),(point-line_point1)))/np.linalg.norm(line_point1 - line_point2)",
"def vector_line(self):\n assert len(self.xcoords) == 2\n diff_x = self.xcoords[1] - self.xcoords[0]\n diff_z = self.zcoords[1] - self.zcoords[0]\n vec = np.hstack((diff_x, diff_z))\n return vec",
"def point_to_line_signed(p: Vec2, p0: Vec2, p1: Vec2):\n return cross(norm(nor_vector(p0, p1)), dir_vector(p, p0))",
"def vertical_projection(self, tangent_vec, base_point, **kwargs):\n caller_name = sys._getframe().f_back.f_code.co_name\n if caller_name == \"horizontal_projection\":\n raise NotImplementedError\n\n return tangent_vec - self.horizontal_projection(tangent_vec, base_point)",
"def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)",
"def distance_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector(cross_vectors(pa, pb))\n l_ab = length_vector(ab)\n return l / l_ab",
"def Perpendicular(self, line: Line, point: Point, interesting=True) -> Line:\n if point in line:\n return self.ErectPerpendicular(line, point, interesting=interesting)\n else:\n return self.DropPerpendicular(line, point, interesting=interesting)",
"def line_to_vec(line, unitize=False):\r\n vec = rs.VectorCreate(rs.CurveEndPoint(line),rs.CurveStartPoint(line))\r\n if unitize is True:\r\n vec = rs.VectorUnitize(vec)\r\n return vec",
"def get_vertical_line(self, point: Sequence[float], **kwargs) -> Line:\n return self.get_line_from_axis_to_point(0, point, **kwargs)"
] | [
"0.7509544",
"0.7091944",
"0.701979",
"0.6911473",
"0.68514013",
"0.67952347",
"0.6784686",
"0.66677094",
"0.65612835",
"0.64763176",
"0.64715296",
"0.64650273",
"0.6458402",
"0.6456651",
"0.6448089",
"0.642346",
"0.6413407",
"0.6406169",
"0.63755524",
"0.6344475",
"0.62969303",
"0.627018",
"0.6259338",
"0.6246161",
"0.6244741",
"0.6228847",
"0.6220047",
"0.621737",
"0.6180329",
"0.61788076"
] | 0.8516493 | 0 |
Return a (left multiplying) matrix that rotates p onto q. | def rotmat(p, q):
rot = numpy.dot(refmat(q, -p), refmat(p, -p))
return rot | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def quat_rotate(X, q):\n # repeat q along 2nd dim\n ones_x = X[[0], :, :][:, :, [0]] * 0 + 1\n q = torch.unsqueeze(q, 1) * ones_x\n\n q_conj = torch.cat([q[:, :, [0]], -1 * q[:, :, 1:4]], dim=-1)\n X = torch.cat([X[:, :, [0]] * 0, X], dim=-1)\n\n X_rot = hamilton_product(q, hamilton_product(X, q_conj))\n return X_rot[:, :, 1:4]",
"def quat_rotate(X, q):\n # repeat q along 2nd dim\n ones_x = X[[0], :, :][:, :, [0]] * 0 + 1\n q = torch.unsqueeze(q, 1) * ones_x\n\n q_conj = torch.cat([q[:, :, [0]], -1 * q[:, :, 1:4]], dim=-1)\n X = torch.cat([X[:, :, [0]] * 0, X], dim=-1)\n\n X_rot = hamilton_product(q, hamilton_product(X, q_conj))\n return X_rot[:, :, 1:4]",
"def quaterion_product(q, p):\n q0 = q[3]\n p0 = p[3]\n\n return [q0*p[0:3] + p0*q[0:3] + mtimes(skew(q[0:3]), p[0:3]), q0*p0 - mtimes(q[0:3].T, p[0:3])]",
"def make_matrix(p, q):\n M = [[ele[0] * ele[1] for ele in itertools.product([player, 1 - player], \n [opponent, 1 - opponent])]\n for opponent in q for player in p]\n return np.array(M)",
"def rotate(points, q):\n # Rotation is achieved by a quaternion sandwich: q * p * (q^-1)\n # This implementation computes the two quaternion products one after the other\n res = np.zeros((len(points), 4))\n # q*p\n res[:, 0] = -np.sum(q[1:]*points, axis=1)\n res[:, 1:] = q[0]*points + np.cross(q[1:], points)\n # (q*p) * q^-1\n res2 = np.zeros((len(points), 4))\n res2[:, 0] = res[:, 0]*q[0] - np.dot(res[:, 1:].copy(), -q[1:].copy())\n res2[:, 1:] = res[:, 0].copy().reshape((-1, 1))*(-q[1:]) + (q[0]*res[:, 1:]) + np.cross(res[:, 1:], -q[1:])\n return res2[:, 1:]",
"def quatLeftMat(q):\n\ts = q[0]\n\tv = q[1:].reshape(-1,)\n\tL = np.zeros((4, 4))\n\tL[0, 0] = s\n\tL[0, 1:] = -v\n\tL[1:, 0] = v\n\tL[1:, 1:] = s*np.eye(3) + skewMat(v)\n\treturn L",
"def qrot(q, v):\n assert q.shape[-1] == 4\n assert v.shape[-1] == 3\n assert q.shape[:-1] == v.shape[:-1]\n\n original_shape = v.shape\n q = q.view(-1, 4)\n v = v.view(-1, 3)\n\n qvec = q[:, 1:]\n uv = torch.cross(qvec, v, dim=1)\n uuv = torch.cross(qvec, uv, dim=1)\n return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape)",
"def toRot(q):\n R = SX.zeros(3, 3)\n qi = q[0]; qj = q[1]; qk = q[2]; qr = q[3]\n R[0, 0] = 1. - 2. * (qj * qj + qk * qk);\n R[0, 1] = 2. * (qi * qj - qk * qr);\n R[0, 2] = 2. * (qi * qk + qj * qr)\n R[1, 0] = 2. * (qi * qj + qk * qr);\n R[1, 1] = 1. - 2. * (qi * qi + qk * qk);\n R[1, 2] = 2. * (qj * qk - qi * qr)\n R[2, 0] = 2. * (qi * qk - qj * qr);\n R[2, 1] = 2. * (qj * qk + qi * qr);\n R[2, 2] = 1. - 2. * (qi * qi + qj * qj)\n\n return R",
"def qrot(q, v):\n assert q.shape[-1] == 4\n assert v.shape[-1] == 3\n assert q.shape[:-1] == v.shape[:-1]\n\n qvec = q[..., 1:]\n uv = torch.cross(qvec.double(), v.double(), dim=len(q.shape) - 1)\n uuv = torch.cross(qvec.double(), uv.double(), dim=len(q.shape) - 1)\n return v + 2 * (q[..., :1] * uv + uuv)",
"def quatPassiveRot(q, v):\n\tv_q = np.zeros((4, 1))\n\tv_q[1:] = v\n\tv_qnew = quatLeftMat(q).T @ quatRightMat(q) @ v_q\n\treturn v_qnew[1:]",
"def cross(p, q):\n xyz = np.zeros(3)\n xyz[0] = p[1] * q[2] - p[2] * q[1]\n xyz[1] = p[2] * q[0] - p[0] * q[2]\n xyz[2] = p[0] * q[1] - p[1] * q[0]\n return xyz",
"def _rotate_quaternion(self, q):\n self._normalise()\n return self * q * self.conjugate",
"def rotate(q, v):\n if v.ndim == 1:\n qv = np.append(v,0)\n else:\n qv = np.hstack([v,np.zeros((len(v),1))])\n out = mult(q,qv)\n out = mult(out, inv(q))\n return out[:,:3]",
"def quatRightMat(q):\n\ts = q[0]\n\tv = q[1:].reshape(-1,)\n\tR = np.zeros((4, 4))\n\tR[0, 0] = s\n\tR[0, 1:] = -v\n\tR[1:, 0] = v\n\tR[1:, 1:] = s*np.eye(3) - skewMat(v)\n\treturn R",
"def quatActiveRot(q, v):\n\tv_q = np.zeros((4, 1))\n\tv_q[1:] = v\n\tv_qnew = quatLeftMat(q) @ quatRightMat(q).T @ v_q\n\treturn v_qnew[1:]",
"def mult(p, q):\n if p.ndim == 1 and q.ndim > 1:\n p = np.tile(p,(q.shape[0],1))\n if q.ndim == 1 and p.ndim > 1:\n q = np.tile(q,(p.shape[0],1))\n if q.ndim == 1 and p.ndim == 1:\n p = p.reshape((1,4))\n q = q.reshape((1,4))\n\n ps = p[:,3]\n qs = q[:,3]\n pv = p[:,:3]\n qv = q[:,:3]\n\n pq = np.empty_like(p)\n pq[:,3] = ps * qs \n pq[:,3] -= arraylist_dot(pv, qv).flatten()\n pq[:,:3] = ps[:,np.newaxis] * qv \n pq[:,:3] += pv * qs[:,np.newaxis] \n pq[:,:3] += np.cross(pv , qv)\n\n #opposite sign due to different convention on the basis vectors\n #pq *= -1\n return pq",
"def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]",
"def get_rotation_matrix_from_quaternion(q):\n R = np.matrix([[q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3],\n 2 * (q[1] * q[2] - q[0] * q[3]),\n 2 * (q[1] * q[3] + q[0] * q[2])],\n [2 * (q[2] * q[1] + q[0] * q[3]),\n q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3],\n 2 * (q[2] * q[3] - q[0] * q[1])],\n [2 * (q[3] * q[1] - q[0] * q[2]),\n 2 * (q[3] * q[2] + q[0] * q[1]),\n q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]]])\n return R",
"def rotate(p,q,A,V): \n n = A.shape[0]\n App, Aqq, Apq = A[p,p], A[q,q], A[p,q] #Initial values\n phi = 0.5*math.atan2(2*Apq, Aqq-App) #Find the rotation value\n c, s = math.cos(phi), math.sin(phi) #Calculate sin and cos\n\n #Update the matrix diagonal elements\n A[p,p] = c*c*App + s*s*Aqq - 2*s*c*Apq \n A[q,q] = s*s*App + c*c*Aqq + 2*s*c*Apq\n A[p,q] = 0 #This is zero by construction\n \n \n #Iterate over and update remaining off-diagonal elements\n for i in range(p):\n Aip, Aiq = A[i,p], A[i,q]\n A[i,p] = c*Aip - s*Aiq\n A[i,q] = c*Aiq + s*Aip\n \n for i in range(p+1,q):\n Api, Aiq = A[p,i], A[i,q]\n A[p,i] = c*Api - s*Aiq\n A[i,q] = c*Aiq + s*Api\n \n for i in range(q+1,n):\n Api, Aqi = A[p,i], A[q,i]\n A[p,i] = c*Api - s*Aqi\n A[q,i] = c*Aqi + s*Api\n \n #Update eigenvectors in matrix V\n for i in range(n):\n Vip, Viq = V[i,p], V[i,q]\n V[i,p] = c*Vip - s*Viq\n V[i,q] = s*Vip + c*Viq\n \n return A, V",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = angles[0:3]\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, kappa, omega) = self.get_phi_kappa_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.kappa_opposite_rotation_matrix(phi, np.deg2rad(self.alpha), kappa, omega)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi) = angles[0:2]\n omega = np.deg2rad(self.omega)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def refmat(p, q):\n p = p.normalized()\n q = q.normalized()\n if (p - q).norm() < 1e-5:\n return numpy.identity(3)\n pq = p - q\n pq.normalize()\n b = pq.get_array()\n b.shape = (3, 1)\n i = numpy.identity(3)\n ref = i - 2 * numpy.dot(b, numpy.transpose(b))\n return ref",
"def quat2transform(q):\n x, y, z, w = q\n xx2 = 2 * x * x\n yy2 = 2 * y * y\n zz2 = 2 * z * z\n xy2 = 2 * x * y\n wz2 = 2 * w * z\n zx2 = 2 * z * x\n wy2 = 2 * w * y\n yz2 = 2 * y * z\n wx2 = 2 * w * x\n\n rmat = np.empty((3, 3), float)\n rmat[0,0] = 1. - yy2 - zz2\n rmat[0,1] = xy2 - wz2\n rmat[0,2] = zx2 + wy2\n rmat[1,0] = xy2 + wz2\n rmat[1,1] = 1. - xx2 - zz2\n rmat[1,2] = yz2 - wx2\n rmat[2,0] = zx2 - wy2\n rmat[2,1] = yz2 + wx2\n rmat[2,2] = 1. - xx2 - yy2\n\n return rmat",
"def rotate(self,r):\n return r.hprod( self.hprod( r.inv() ) )"
] | [
"0.7235314",
"0.7235314",
"0.6990883",
"0.69877553",
"0.69679874",
"0.6881361",
"0.6804596",
"0.6750322",
"0.6703256",
"0.6547457",
"0.6546621",
"0.6531093",
"0.6472698",
"0.63746524",
"0.6359099",
"0.6341709",
"0.63394356",
"0.6274029",
"0.624419",
"0.6177997",
"0.61512566",
"0.6147087",
"0.6147087",
"0.6143461",
"0.6143461",
"0.61423063",
"0.61363983",
"0.6134263",
"0.61064863",
"0.6082871"
] | 0.8370842 | 0 |
Calculate angle method. Calculate the angle between 3 vectors representing 3 connected points. | def calc_angle(v1, v2, v3):
v1 = v1 - v2
v3 = v3 - v2
return v1.angle(v3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calcul_angle(point1, point2, point3):\n \n x1,y1,z1=point1\n x2,y2,z2=point2\n x3,y3,z3=point3\n \n vec1=[x1-x2, y1-y2, z1-z2]\n vec2=[x3-x2, y3-y2, z3-z2]\n\n return calcul_angle_vector(vec1, vec2)",
"def get_angle(pt1,pt2,pt3):\r\n a = float(get_distance(pt1,pt2))\r\n b = float(get_distance(pt2,pt3))\r\n c = float(get_distance(pt1,pt3))\r\n angle = np.arccos((a**2 + b**2 - c**2)/(2*a*b)) # Law of Cosines \r\n \r\n return angle",
"def find_angle(p1, p2, p3):\n\n BAx = p1[0] - p2[0]\n BAy = p1[1] - p2[1]\n\n BCx = p3[0] - p2[0]\n BCy = p3[1] - p2[1]\n\n a = [BAx, BAy]\n b = [BCx, BCy]\n a_mag = np.linalg.norm(a)\n b_mag = np.linalg.norm(b)\n\n theta = np.arccos(np.dot(a, b) / (a_mag * b_mag))\n\n return math.degrees(theta)",
"def getAngle(p1, p2, p3):\n\tv1 = p1 - p2\n\tv2 = p3 - p2\n\tmag = la.norm(v1) * la.norm(v2)\n\tc = np.dot(v1, v2) / mag\n\tcross = np.cross(v1,v2)\n\ts = la.norm(cross)/mag\n\tatang = math.atan2(s,c)\n\tang = atang * 180 / math.pi\n\treturn ang",
"def angle( nt1, nt2, nt3 ):\n if vector(nt1, nt2) == [0,0]:\n print(\"nt1\", nt1.seqpos, \" at \", nt1.x, nt1.y, \" is at the same position as nt2\", nt2.seqpos)\n if vector(nt2, nt3) == [0,0]:\n print(\"nt2\", nt2.seqpos, \" at \", nt2.x, nt2.y, \" is at the same position as nt3\", nt3.seqpos)\n #print(vector(nt1, nt2), vector(nt2, nt3))\n if vectors_close(vector(nt1, nt2), vector(nt2, nt3)):\n # These vectors are identical and that is messing with the ability to call two things parallel?\n return 180.0\n return 180.0 - math.degrees(math.acos(dot(vector(nt1, nt2), vector(nt2, nt3)) / (mod(vector(nt1, nt2)) * mod(vector(nt2, nt3)))))",
"def compute_angle_v2v(v1, v2, v3=None):\n\n alpha = math.acos(dot_product(v1, v2) / (vlength(v1)*vlength(v2)))\n if v3 is not None:\n cross = cross_product(v2, v1)\n if dot_product(cross,v3) > 0.0:\n return 2*math.pi-alpha\n\n return alpha",
"def _angle(self, a, b, c):\n divid = (a ** 2 + b ** 2 - c ** 2)\n divis = (2 * a * b)\n if (divis) > 0:\n result = float(divid) / divis\n if result <= 1.0 and result >= -1.0:\n return acos(result)\n return 0\n else:\n return 0",
"def angle(*args):\n if len(args) < 1:\n return 0.0\n elif len(args) == 1:\n return np.arctan2(args[0][1], args[0][0])\n else:\n v1 = args[0].flatten()\n v2 = args[1].flatten()\n return np.arccos(np.dot(v1, v2) / (norm(v1) * norm(v2)))",
"def get_angle_and_distance(point_1, point_2, point_3):\n if point_1 == point_2 or point_1 == point_3 or point_2 == point_3:\n return 0, 0\n \n v1 = Vector2D(point_2, point_1)\n v2 = Vector2D(point_2, point_3)\n \n cosalpha = Vector2D.dot(v1, v2) / (v1.norm() * v2.norm())\n\n return (acos(cosalpha), v1.norm())",
"def angle_to( self, vector3 ):\n # make sure neither vector is zero-length\n sm = self.magnitude\n vm = vector3.magnitude\n if abs(sm) < self.EPSILON or abs(vm) < self.EPSILON:\n raise ZeroDivisionError(\n \"can't calculate angle between zero-length vectors!\" )\n \n # calculation will fail if vectors have same heading\n # catch error and return zero\n try:\n return math.degrees( math.acos(self.dot(vector3) / (sm * vm)) )\n except ValueError:\n # test whether direction is same or opposite\n if Vector3( self ).add( vector3 ).magnitude < sm:\n return 180.0\n return 0.0",
"def angles_points(a, b, c):\n u = subtract_vectors(b, a)\n v = subtract_vectors(c, a)\n return angles_vectors(u, v)",
"def angle3pt(\n ax: float, ay: float, bx: float, by: float, cx: float, cy: float\n ) -> float:\n ang = math.degrees(math.atan2(cy - by, cx - bx) - math.atan2(ay - by, ax - bx))\n return ang + 360 if ang < 0 else ang",
"def angle_between_ll(ll1, ll2, ll3):\n return angle_between_points(ll2xy(*ll1)[0:2], ll2xy(*ll2)[0:2], ll2xy(*ll3)[0:2])",
"def _angle(*vectors):\n if len(vectors) == 1:\n return DubinsUAV2D._sawtooth(np.arctan2(vectors[0][1], vectors[0][0]))\n elif len(vectors) == 2:\n return DubinsUAV2D._sawtooth(np.arctan2(vectors[1][1], vectors[1][0]) - np.arctan2(vectors[0][1], vectors[0][0]))\n else:\n raise AttributeError()",
"def angle_between_points(a, b, c):\n ax, ay = a\n bx, by = b\n cx, cy = c\n\n return angle_between([ax - bx, ay - by], [cx - bx, cy - by])",
"def angle(self, v1, v2):\r\n cosang = np.dot(v1, v2)\r\n sinang = np.linalg.norm(np.cross(v1, v2))\r\n return np.arctan2(sinang, cosang)",
"def angles_points_degrees(a, b, c):\n return degrees(angles_points(a, b, c))",
"def test_vectors_angle(self):\n\n # Example 1.3\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n angle_ref_deg = 53.300774799510123\n\n angle_rad = vector.angle_rad(crystal, vector_p, vector_q)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n angle_rad = vector.angle_rad(crystal, vector_q, vector_p)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n #self.fail(\"Test if the testcase is working.\")",
"def compute_angle(self, a, b, c):\n\n ba = a - b\n bc = c - b\n\n cosine_angle = np.dot(ba, bc) / \\\n (np.linalg.norm(ba) * np.linalg.norm(bc))\n\n # because of precision issues, sometimes cosine_angle is something linke -1.000000001\n # we make sure we only pass the correct arguments to np.arccos()\n if cosine_angle > 1:\n cosine_angle = 1\n elif cosine_angle < -1:\n cosine_angle = -1\n\n angle = np.arccos(cosine_angle)\n\n return np.degrees(angle)",
"def angle(v1: Vector, v2: Vector) -> float:\n return math.degrees(math.acos((v1 * v2) / (v1.length() * v2.length())))",
"def get_angle(v1, v2):\n return np.arccos(np.dot(v1, v2))",
"def angle(z):",
"def compute_angle(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n angle = np.arctan2(sinang, cosang)\n return angle",
"def angle_vecs(vec1,vec2):\n angle=np.arccos(np.dot(vec1,vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2)))\n return angle",
"def get_angle(a, b, c):\n\n # Law of cosines:\n # C = acos((a^2 + b^2 - c^2) / (2ab))\n return math.acos((a * a + b * b - c * c) / (2 * a * b))",
"def _angle(a, b, c, angle='A'):\n if angle == 'A':\n _a = b\n _b = c\n _c = a\n elif angle == 'B':\n _a = a\n _b = c\n _c = b\n else:\n _a = a\n _b = b\n _c = c\n\n cos_angle = (_a**2 + _b**2 - _c**2) / (2 * _a * _b)\n return np.arccos(np.fabs(cos_angle)) * 180.0 / np.pi",
"def dihedral_calculator():\n\n\t# Prime with first 3 points\n\tp1 = Vector3((yield None))\n\tp2 = Vector3((yield None))\n\tp3 = Vector3((yield None))\n\n\t# Set up for first angle\n\tlastpoint = p3\n\tlastdisp = p3 - p2\n\tlastnormal = ((p2 - p1) @ lastdisp).normalize()\n\n\tangle = None\n\n\t# For each point starting with the 4th, we can compute a new angle\n\twhile True:\n\n\t\t# Yield the last angle (None the first time), get the next point\n\t\tnextpoint = Vector3((yield angle))\n\n\t\t# Displacement from previous point to current\n\t\tnextdisp = nextpoint - lastpoint\n\n\t\t# Normal vector to plane containing last 3 points\n\t\tnextnormal = (lastdisp @ nextdisp).normalize()\n\n\t\t# This one's complicated... see step 3 in source.\n\t\tx = lastnormal * nextnormal\n\t\ty = (lastnormal @ lastdisp.normalize()) * nextnormal\n\t\tangle = -math.atan2(y, x)\n\n\t\t# Current values used as previous in next loop\n\t\tlastpoint = nextpoint\n\t\tlastdisp = nextdisp\n\t\tlastnormal = nextnormal",
"def compute_angle(a: [float], b: [float], c: [float]) -> float:\n ba = a - b\n bc = c - b\n\n cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))\n angle = np.arccos(np.abs(cosine_angle))\n\n return angle",
"def angle_between_vectors(a, b):\n return math.acos(dot_product(a, b) / (length(a) * length(b)))",
"def get_angle(a, b, c):\n\n ba = a - b\n cb = c - b\n\n ba_mod = mod(ba)\n cb_mod = mod(cb)\n val = dot(ba, cb) / (ba_mod * cb_mod)\n # better fix?\n if val > 1:\n val = 1\n elif val < -1:\n val = -1\n\n return np.arccos(val)"
] | [
"0.7981472",
"0.7753118",
"0.76024896",
"0.7592898",
"0.7543423",
"0.74118024",
"0.73399997",
"0.7231473",
"0.7139338",
"0.7095912",
"0.70290226",
"0.6875393",
"0.68628615",
"0.6811979",
"0.6781552",
"0.67007625",
"0.66819406",
"0.66541386",
"0.6647638",
"0.6644245",
"0.6641563",
"0.6636769",
"0.66342133",
"0.6632891",
"0.6630977",
"0.6628061",
"0.66231483",
"0.6621044",
"0.66151154",
"0.6602678"
] | 0.79181033 | 1 |
Calculate dihedral angle method. Calculate the dihedral angle between 4 vectors representing 4 connected points. The angle is in ]pi, pi]. | def calc_dihedral(v1, v2, v3, v4):
ab = v1 - v2
cb = v3 - v2
db = v4 - v3
u = ab ** cb
v = db ** cb
w = u ** v
angle = u.angle(v)
# Determine sign of angle
try:
if cb.angle(w) > 0.001:
angle = -angle
except ZeroDivisionError:
# dihedral=pi
pass
return angle | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dihedral_calculator():\n\n\t# Prime with first 3 points\n\tp1 = Vector3((yield None))\n\tp2 = Vector3((yield None))\n\tp3 = Vector3((yield None))\n\n\t# Set up for first angle\n\tlastpoint = p3\n\tlastdisp = p3 - p2\n\tlastnormal = ((p2 - p1) @ lastdisp).normalize()\n\n\tangle = None\n\n\t# For each point starting with the 4th, we can compute a new angle\n\twhile True:\n\n\t\t# Yield the last angle (None the first time), get the next point\n\t\tnextpoint = Vector3((yield angle))\n\n\t\t# Displacement from previous point to current\n\t\tnextdisp = nextpoint - lastpoint\n\n\t\t# Normal vector to plane containing last 3 points\n\t\tnextnormal = (lastdisp @ nextdisp).normalize()\n\n\t\t# This one's complicated... see step 3 in source.\n\t\tx = lastnormal * nextnormal\n\t\ty = (lastnormal @ lastdisp.normalize()) * nextnormal\n\t\tangle = -math.atan2(y, x)\n\n\t\t# Current values used as previous in next loop\n\t\tlastpoint = nextpoint\n\t\tlastdisp = nextdisp\n\t\tlastnormal = nextnormal",
"def get_dihedral(p0,p1,p2,p3,unit):\n if unit == 'Ang':\n p0 = p0*0.529177249\n p1 = p1*0.529177249\n p2 = p2*0.529177249\n p3 = p3*0.529177249\n\n b0 = -1.0*(p1 - p0)\n b1 = p2 - p1\n b2 = p3 - p2\n\n # normalize b1 so that it does not influence magnitude of vector\n # rejections that come next\n b1 /= linalg.norm(b1)\n\n # vector rejections\n # v = projection of b0 onto plane perpendicular to b1\n # = b0 minus component that aligns with b1\n # w = projection of b2 onto plane perpendicular to b1\n # = b2 minus component that aligns with b1\n v = b0 - dot(b0, b1)*b1\n w = b2 - dot(b2, b1)*b1\n\n # angle between v and w in a plane is the torsion angle\n # v and w may not be normalized but that's fine since tan is y/x\n x = dot(v, w)\n y = dot(cross(b1, v), w)\n return degrees(arctan2(y, x))\n\n #q1 = subtract(p1,p0) # b - a \n #q2 = subtract(p2,p1) # c - b \n #q3 = subtract(p3,p2) # d - c\n #print(q1,q2)\n\n #q1_x_q2 = cross(q1,q2) \n #q2_x_q3 = cross(q2,q3)\n\n #n1 = q1_x_q2/sqrt(dot(q1_x_q2,q1_x_q2)) \n #n2 = q2_x_q3/sqrt(dot(q2_x_q3,q2_x_q3))\n\n #u1 = n2\n #u3 = q2/(sqrt(dot(q2,q2))) \n #u2 = cross(u3,u1)\n\n #cos_theta = dot(n1,u1)\n #sin_theta = dot(n1,u2)\n ## Calculate theta\n #theta = -atan2(sin_theta,cos_theta)\n ## it is different from atan2 from fortran math.atan2(y,x)\n #theta_deg = degrees(theta)\n #return(theta_deg)",
"def calculate_dihedral_angles(mol, dihedral_atom_sets):\n\n # Create list for the dihedrals (to be ordered in the same order as the input dihedral sets)\n dihedral_angles = []\n # Now calculate the dihedral angles between the sets identified previously\n conf = mol.GetConformer()\n # Loop through the angles => 2-3 is the rotatable bonds, 1,4 are the neighbours of 2,3 respectively\n for at1, at2, at3, at4 in dihedral_atom_sets:\n # Get the coordinates of the positions\n pos1 = conf.GetAtomPosition(at1)\n pos2 = conf.GetAtomPosition(at2)\n pos3 = conf.GetAtomPosition(at3)\n pos4 = conf.GetAtomPosition(at4)\n # Need to calculate three vectors 1->2, 2->3, 3->4\n vec1 = pos2 - pos1\n vec2 = pos3 - pos2\n vec3 = pos4 - pos3\n # Get the normals to the two planes (vec1-vec2 plane and vec2-vec3 plane))\n cross12 = vec1.CrossProduct(vec2)\n cross23 = vec2.CrossProduct(vec3)\n # Normalise the normals\n cross12.Normalize()\n cross23.Normalize()\n # Calculate dot-product and then inverse cosine to get the angle\n dot_prod = cross12.DotProduct(cross23)\n dihedral_rad = math.acos(dot_prod)\n dihedral_deg = 180*dihedral_rad/math.pi\n dihedral_angles.append(dihedral_deg)\n return dihedral_angles",
"def get_dihedral_angles(self):\n mol = self.m\n c1 = mol.GetConformer(-1)\n torsma = '[!$(*#*)&!D1]~[!$(*#*)&!D1]'\n q = Chem.MolFromSmarts(torsma)\n matches = mol.GetSubstructMatches(q)\n nmat = len(matches)\n dic = {}\n for match in matches:\n j = match[0]\n k = match[1]\n bond = mol.GetBondBetweenAtoms(j, k)\n aj = mol.GetAtomWithIdx(j)\n ak = mol.GetAtomWithIdx(k)\n hj, hk = [ _hyb[_a.GetHybridization()] for _a in [aj,ak] ]\n iok1 = ( hj not in [2,3] )\n iok2 = ( hk not in [2,3] )\n if iok1 or iok2: continue\n for b1 in aj.GetBonds():\n if (b1.GetIdx() == bond.GetIdx()):\n continue\n i = b1.GetOtherAtomIdx(j)\n for b2 in ak.GetBonds():\n if (b2.GetIdx() == bond.GetIdx()) or (b2.GetIdx() == b1.GetIdx()):\n continue\n l = b2.GetOtherAtomIdx(k)\n # skip 3-membered rings\n if (l == i):\n continue\n _dang = rdMolTransforms.GetDihedralDeg(c1, i,j,k,l)\n dang = abs(_dang)\n assert dang <= 180.0\n ias4 = (i,j,k,l)\n if not self.wH:\n if np.any([ self.zs[iaa]==1 for iaa in ias4 ]):\n continue\n if self.key in ['z']:\n #print('atsi=',ias4, 'zsi=', [_zs[iaa] for iaa in ias4])\n zi,zj,zk,zl = [ self.zs[iaa] for iaa in ias4 ]\n if (zj==zk and zi>zl) or (zj>zk):\n ias4 = (l,k,j,i)\n #torsions.append(ias4)\n #_zi,_zj,_zk,_zl = [ zs[_] for _ in ias4 ]\n #typez = '%d-%d-%d-%d'%(_zi,_zj,_zk,_zl)\n type4 = tuple([self.zs[iaa] for iaa in ias4])\n if type4 in list(dic.keys()):\n dic[type4] += [dang]\n else:\n dic[type4] = [dang]\n elif self.key in ['ia','i']:\n type4 = ias4\n dic[type4] = dang\n else:\n raise Exception('#unknown key')\n return dic",
"def _angle(u, v, w, d='+'):\n vu = np.arctan2(u[1] - v[1], u[0] - v[0])\n vw = np.arctan2(w[1] - v[1], w[0] - v[0])\n phi = vw - vu\n if phi < 0:\n phi += 2 * np.pi\n if d == '-':\n phi = 2 * np.pi - phi\n return np.round(phi, 6)",
"def sp2_dihedrals(atoms):\n\n #problems with atoms inbuilt dihedral method (doesn't match gaussview/jmol at all)\n #so we'll use one taken from http://stackoverflow.com/questions/20305272/dihedral-torsion-angle-from-four-points-in-cartesian-coordinates-in-python\n def get_dihedral(p):\n b = p[:-1] - p[1:]\n b[0] *= -1\n v = np.array([v - (v.dot(b[1])/b[1].dot(b[1])) * b[1] for v in [b[0], b[2]]])\n # Normalize vectors\n v /= np.sqrt(np.einsum('...i,...i', v, v)).reshape(-1,1)\n b1 = b[1] / np.linalg.norm(b[1])\n x = np.dot(v[0], v[1])\n m = np.cross(v[0], b1)\n y = np.dot(m, v[1])\n return np.degrees(np.arctan2(y, x))\n\n mol = to_molmod(atoms)\n data = []\n\n for i in range(len(atoms)):\n if len(mol.graph.neighbors[i]) == 3:\n atom_indices = [i] + list(mol.graph.neighbors[i])\n atom_positions = np.array([atoms[temp_index].position for temp_index in atom_indices])\n #dihedral = atoms.get_dihedral(atom_indices)\n dihedral = get_dihedral(atom_positions)\n result = (i, dihedral)\n data.append(result)\n\n return data",
"def calc_dihedrals(points):\n\tpiter = iter(points)\n\n\tcalculator = dihedral_calculator()\n\tcalculator.send(None)\n\n\tfor i in range(3):\n\t\tcalculator.send(next(piter))\n\n\tfor point in piter:\n\t\tyield calculator.send(point)",
"def _find_dihedral(selected):\n atom_name = lambda atom: atom.fullName()\n atom_mass = lambda atom: atom.mass()\n # Loop over possible nearest neighbors\n for a2 in selected:\n # Find the new atom\n attached_to_a2 = sorted([a for a in a2.bondedTo() \\\n if a not in selected], key=atom_name)\n for a1 in sorted(attached_to_a2, key=atom_mass, reverse=True):\n # Find the third atom\n attached_to_a3 = sorted([a for a in a2.bondedTo() \\\n if (a in selected) and (a!=a1)], key=atom_name)\n for a3 in sorted(attached_to_a3, key=atom_mass, reverse=True):\n # Find the last atom\n attached_to_a4 = sorted([a for a in a3.bondedTo() \\\n if (a in selected) and (a!=a2)], key=atom_name)\n for a4 in sorted(attached_to_a4, key=atom_mass, reverse=True):\n return (a1, a2, a3, a4)\n print 'Selected atoms:', selected\n raise Exception('No new dihedral angle found!')",
"def getDihedrals(self):\n try:\n return self._dihedralList\n except AttributeError:\n pass\n forceConstant=self._raw_data[\"DIHEDRAL_FORCE_CONSTANT\"]\n phase=self._raw_data[\"DIHEDRAL_PHASE\"]\n periodicity=self._raw_data[\"DIHEDRAL_PERIODICITY\"]\n dihedralPointers = self._raw_data[\"DIHEDRALS_INC_HYDROGEN\"] \\\n +self._raw_data[\"DIHEDRALS_WITHOUT_HYDROGEN\"]\n self._dihedralList=[]\n forceConstConversionFactor = (units.kilocalorie_per_mole).conversion_factor_to(units.kilojoule_per_mole)\n for ii in range(0,len(dihedralPointers),5):\n if int(dihedralPointers[ii])<0 or int(dihedralPointers[ii+1])<0:\n raise Exception(\"Found negative dihedral atom pointers %s\"\n % ((dihedralPointers[ii],\n dihedralPointers[ii+1],\n dihedralPointers[ii+2],\n dihedralPointers[ii+3]),))\n iType=int(dihedralPointers[ii+4])-1\n self._dihedralList.append((int(dihedralPointers[ii])//3,\n int(dihedralPointers[ii+1])//3,\n abs(int(dihedralPointers[ii+2]))//3,\n abs(int(dihedralPointers[ii+3]))//3,\n float(forceConstant[iType])*forceConstConversionFactor,\n float(phase[iType]),\n int(0.5+float(periodicity[iType]))))\n return self._dihedralList",
"def test_dihedrals(pose):\n for i in range(1, pose.total_residue()+1):\n\n print \"\\n\"+str(pose.pdb_info.pose2pdb(i))\n try:\n print \"Phi: \"+repr(math.degrees(pose.phi(i)))\n print \"Psi: \"+repr(math.degrees(pose.psi(i)))\n print \"Omega:\"+repr(math.degrees(pose.omega(i)))\n except Exception:\n \"Print could not get dihedral for resnum \"+repr(i)\n\n return True",
"def calculate_theta_4(joint_rotations, theta0_4):\n # R0_3\n theta_1 = joint_rotations[0]\n theta_2 = joint_rotations[1]\n theta_3 = joint_rotations[2]\n # R0_4\n R0_4a = np.dot(rot_z(theta_1), rot_x(90))\n R0_4 = np.dot(R0_4a, rot_z(theta0_4))\n R0_1 = np.dot(rot_x(90), rot_y(theta_1))\n R1_2 = rot_z(theta_2)\n R2_3 = rot_z(theta_3)\n R0_2 = np.dot(R0_1, R1_2)\n R0_3 = np.dot(R0_2, R2_3)\n # R3_4\n R3_4 = np.dot(np.transpose(R0_3), R0_4)\n # theta_4\n theta_4 = np.degrees(np.arcsin(R3_4[1, 0]))\n return theta_4",
"def getDihedrals(self):\n uniqKpList = self.getFlagData('DIHEDRAL_FORCE_CONSTANT')\n uniqPeriodList = self.getFlagData('DIHEDRAL_PERIODICITY')\n uniqPhaseList = self.getFlagData('DIHEDRAL_PHASE')\n # for list below, true atom number = abs(index)/3 + 1\n dihCodeHList = self.getFlagData('DIHEDRALS_INC_HYDROGEN')\n dihCodeNonHList = self.getFlagData('DIHEDRALS_WITHOUT_HYDROGEN')\n dihCodeList = dihCodeHList + dihCodeNonHList\n properDih = []\n improperDih = []\n condProperDih = [] # list of dihedrals condensed by the same quartet\n #atomPairs = []\n atomPairs = set()\n for i in xrange(0, len(dihCodeList), 5):\n idAtom1 = dihCodeList[i] / 3 # remember python starts with id 0\n idAtom2 = dihCodeList[i+1] / 3\n # 3 and 4 indexes can be negative: if id3 < 0, end group interations\n # in amber are to be ignored; if id4 < 0, dihedral is improper\n idAtom3raw = dihCodeList[i+2] / 3 # can be negative -> exclude from 1-4vdw\n idAtom4raw = dihCodeList[i+3] / 3 # can be negative -> Improper\n idAtom3 = abs(idAtom3raw)\n idAtom4 = abs(idAtom4raw)\n dihTypeId = dihCodeList[i+4] - 1\n atom1 = self.atoms[idAtom1]\n atom2 = self.atoms[idAtom2]\n atom3 = self.atoms[idAtom3]\n atom4 = self.atoms[idAtom4]\n kPhi = uniqKpList[dihTypeId] # already divided by IDIVF\n period = int(uniqPeriodList[dihTypeId]) # integer\n phase = uniqPhaseList[dihTypeId]# angle given in rad in prmtop\n atoms = [atom1, atom2, atom3, atom4]\n dihedral = Dihedral(atoms, kPhi, period, phase)\n if idAtom4raw > 0:\n try: atomsPrev = properDih[-1].atoms\n except: atomsPrev = []\n properDih.append(dihedral)\n if idAtom3raw < 0 and atomsPrev == atoms:\n condProperDih[-1].append(dihedral)\n else:\n condProperDih.append([dihedral])\n pair = (atom1, atom4)\n #if atomPairs.count(pair) == 0 and idAtom3raw > 0:\n if idAtom3raw > 0:\n atomPairs.add(pair)\n else:\n improperDih.append(dihedral)\n try: atomPairs = sorted(atomPairs)\n except: pass\n self.properDihedrals = properDih\n self.improperDihedrals = improperDih\n self.condensedProperDihedrals = condProperDih # [[],[],...]\n self.atomPairs = atomPairs # set((atom1, atom2), ...)\n self.printDebug(\"getDihedrals done\")",
"def myDihedralFunctionAirliner(Epsilon):\n BaseDihedral = 7\n\n # A simple model of a loaded wing shape:\n return BaseDihedral + Epsilon*Epsilon*10",
"def addDihedralBond(a1, a2, length, angleInfo, dihedInfo):\n\n\tif a1.molecule == a2.molecule:\n\t\traise ValueError(\"Atoms to be bonded must be in different models\")\n\n\t# first, get the distance correct\n\tfrom chimera import Xform, cross, angle, Point\n\tdvector = a1.xformCoord() - a2.xformCoord()\n\tdvector.length = dvector.length + length\n\topenState = a2.molecule.openState\n\topenState.globalXform(Xform.translation(dvector))\n\n\t# then angle\n\tif angleInfo:\n\t\tatoms, angleVal = angleInfo\n\t\tp1, p2, p3 = [a.xformCoord() for a in atoms]\n\t\taxis = cross(p1-p2, p2-p3)\n\t\tcurAngle = angle(p1, p2, p3)\n\t\tdelta = angleVal - curAngle\n\t\tv2 = p2 - Point(0.0, 0.0, 0.0)\n\t\ttrans1 = Xform.translation(v2)\n\t\tv2.negate()\n\t\ttrans2 = Xform.translation(v2)\n\t\ttrans1.multiply(Xform.rotation(axis, delta))\n\t\ttrans1.multiply(trans2)\n\t\topenState.globalXform(trans1)",
"def fk4(joint_rotations):\n h0_4 = htm4(joint_rotations)\n x0_4 = h0_4[0, 3]\n y0_4 = h0_4[1, 3]\n z0_4 = h0_4[2, 3]\n d0_4 = [x0_4, y0_4, z0_4]\n return d0_4",
"def generate_dihedral_matrices(protein):\n\n #double check maths for this to be safe (particularly signs)\n\n natoms = len(protein.atoms)\n ndihedrals = len(protein.dihedrals)\n\n A = np.zeros([ndihedrals, 3*natoms])\n force_constants = np.zeros(ndihedrals)\n for dihedral in protein.dihedrals:\n \n atom1_id = dihedral.atom1.id\n atom2_id = dihedral.atom2.id\n atom3_id = dihedral.atom3.id\n atom4_id = dihedral.atom4.id\n\n atom1_xyz = dihedral.atom1.xyz\n atom2_xyz = dihedral.atom2.xyz\n atom3_xyz = dihedral.atom3.xyz\n atom4_xyz = dihedral.atom4.xyz\n\n four_centre_length = np.linalg.norm(atom1_xyz - atom4_xyz)\n\n row = A[dihedral.id]\n row[[3*atom1_id, (3*atom1_id)+1, (3*atom1_id)+2]] = -((atom1_xyz - atom3_xyz) + (atom4_xyz - atom2_xyz))/four_centre_length \n row[[3*atom2_id, (3*atom2_id)+1, (3*atom2_id)+2]] = -((atom2_xyz - atom1_xyz) + (atom2_xyz - atom3_xyz) + (atom2_xyz - atom4_xyz))/four_centre_length\n row[[3*atom3_id, (3*atom3_id)+1, (3*atom3_id)+2]] = -((atom3_xyz - atom4_xyz) + (atom3_xyz - atom1_xyz) + (atom3_xyz - atom2_xyz))/four_centre_length\n row[[3*atom4_id, (3*atom4_id)+1, (3*atom4_id)+2]] = -((atom4_xyz - atom2_xyz) + (atom1_xyz - atom3_xyz))/four_centre_length\n\n force_constant = dihedral.force_constant\n force_constants[dihedral.id] = force_constant\n\n A = scipy.sparse.csr_matrix(A)\n G = scipy.sparse.diags(force_constants)\n\n return (A, G)",
"def calcul_angle(point1, point2, point3):\n \n x1,y1,z1=point1\n x2,y2,z2=point2\n x3,y3,z3=point3\n \n vec1=[x1-x2, y1-y2, z1-z2]\n vec2=[x3-x2, y3-y2, z3-z2]\n\n return calcul_angle_vector(vec1, vec2)",
"def _angle(*vectors):\n if len(vectors) == 1:\n return DubinsUAV2D._sawtooth(np.arctan2(vectors[0][1], vectors[0][0]))\n elif len(vectors) == 2:\n return DubinsUAV2D._sawtooth(np.arctan2(vectors[1][1], vectors[1][0]) - np.arctan2(vectors[0][1], vectors[0][0]))\n else:\n raise AttributeError()",
"def derivcd4(vals, dx):\n deriv = []\n for i in range(2):\n deriv.append((-3*vals[i] + 4*vals[i+1] - vals[i+2]) / (2*dx))\n for i in range(2, len(vals) - 2):\n deriv.append((-1*vals[i-2] + 8*vals[i-1] + 8*vals[i+1] -\\\n vals[i+2]) / (12*dx))\n # Note that due to the fact that this function has been set up this\n # way, this will not output a value at 5000000\n if i % 500000 == 0:\n print('Derivative list: {}'.format(i))\n for i in range((len(vals) - 2), len(vals)):\n deriv.append((vals[i] - vals[i-1]) / dx)\n return deriv",
"def test_vectors_angle(self):\n\n # Example 1.3\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n angle_ref_deg = 53.300774799510123\n\n angle_rad = vector.angle_rad(crystal, vector_p, vector_q)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n angle_rad = vector.angle_rad(crystal, vector_q, vector_p)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n #self.fail(\"Test if the testcase is working.\")",
"def is_dihedral(self):\n if self._is_dihedral is not None:\n return self._is_dihedral\n\n order = self.order()\n\n if order % 2 == 1:\n self._is_dihedral = False\n return False\n if order == 2:\n self._is_dihedral = True\n return True\n if order == 4:\n # The dihedral group of order 4 is the Klein 4-group.\n self._is_dihedral = not self.is_cyclic\n return self._is_dihedral\n if self.is_abelian:\n # The only abelian dihedral groups are the ones of orders 2 and 4.\n self._is_dihedral = False\n return False\n\n # Now we know the group is of even order >= 6, and nonabelian.\n n = order // 2\n\n # Handle special cases where there are exactly two generators.\n gens = self.generators\n if len(gens) == 2:\n x, y = gens\n a, b = x.order(), y.order()\n # Make a >= b\n if a < b:\n x, y, a, b = y, x, b, a\n # Using Theorem 2.1 of [Di3]:\n if a == 2 == b:\n self._is_dihedral = True\n return True\n # Using Theorem 1.1 of [Di3]:\n if a == n and b == 2 and y*x*y == ~x:\n self._is_dihedral = True\n return True\n\n # Proceed with algorithm of [Di1]\n # Find elements of orders 2 and n\n order_2, order_n = [], []\n for p in self.elements:\n k = p.order()\n if k == 2:\n order_2.append(p)\n elif k == n:\n order_n.append(p)\n\n if len(order_2) != n + 1 - (n % 2):\n self._is_dihedral = False\n return False\n\n if not order_n:\n self._is_dihedral = False\n return False\n\n x = order_n[0]\n # Want an element y of order 2 that is not a power of x\n # (i.e. that is not the 180-deg rotation, when n is even).\n y = order_2[0]\n if n % 2 == 0 and y == x**(n//2):\n y = order_2[1]\n\n self._is_dihedral = (y*x*y == ~x)\n return self._is_dihedral",
"def get_torsional(a, b, c, d):\n \n # Compute 3 vectors connecting the four points\n ba = b - a\n cb = c - b\n dc = d - c\n \n # Compute the normal vector to each plane\n u_A = cross(ba, cb)\n u_B = cross(cb, dc)\n\n #Measure the angle between the two normal vectors\n u_A_mod = mod(u_A)\n u_B_mod = mod(u_B)\n val = dot(u_A, u_B) / (u_A_mod * u_B_mod)\n # better fix?\n if val > 1:\n val = 1\n elif val < -1:\n val = -1\n tor_rad = np.arccos(val)\n \n # compute the sign\n sign = dot(u_A, dc)\n if sign > 0:\n return tor_rad\n else:\n return -tor_rad",
"def set_dihedral(self, pivots, scan, deg_increment):\n if deg_increment == 0:\n logger.warning('set_dihedral was called with zero increment for {label} with pivots {pivots}'.format(\n label=self.label, pivots=pivots))\n for rotor in self.rotors_dict.values(): # penalize this rotor to avoid inf. looping\n if rotor['pivots'] == pivots:\n rotor['times_dihedral_set'] += 1\n break\n else:\n for rotor in self.rotors_dict.values():\n if rotor['pivots'] == pivots and rotor['times_dihedral_set'] <= 10:\n rotor['times_dihedral_set'] += 1\n break\n else:\n logger.info('\\n\\n')\n for i, rotor in self.rotors_dict.items():\n logger.error('Rotor {i} with pivots {pivots} was set {times} times'.format(\n i=i, pivots=rotor['pivots'], times=rotor['times_dihedral_set']))\n raise RotorError('Rotors were set beyond the maximal number of times without converging')\n coordinates, atoms, _, _, _ = get_xyz_matrix(self.final_xyz)\n mol = molecules_from_xyz(self.final_xyz, multiplicity=self.multiplicity, charge=self.charge)[1]\n conf, rd_mol, indx_map = rdkit_conf_from_mol(mol, coordinates)\n rd_scan = [indx_map[i - 1] for i in scan] # convert the atom indices in `scan` to RDKit indices\n new_xyz = set_rdkit_dihedrals(conf, rd_mol, indx_map, rd_scan, deg_increment=deg_increment)\n self.initial_xyz = get_xyz_string(coords=new_xyz, symbols=atoms)",
"def direction(point0, point1):\n d = [0, 0, 0]\n vector = [point1[0] - point0[0], point1[1] - point0[1]]\n d[1] = math.atan2(vector[1], vector[0])\n while d[1] <= -np.pi / 2:\n d[1] += np.pi\n return d",
"def vec_angle_rad(v1,v2):\r\n \r\n c = np.dot(v1,v2)/(vector_len(v2)* vector_len(v2))\r\n return math.acos(c)",
"def angle(z):",
"def Angles(self, degrees=True):\n\n self.__do_essential_memebers_exist__()\n if self.InferElementalDimension() != 2:\n raise ValueError(\"Angles can be computed only for 2D elements\")\n if self.InferSpatialDimension() != 2:\n raise ValueError(\"Angles can be computed only in 2-dimensional plane\")\n\n nodeperelem = self.InferNumberOfNodesPerLinearElement()\n angles = np.zeros((self.nelem, nodeperelem))\n\n norm = lambda x: np.linalg.norm(x,axis=1)\n\n edge_coords = self.points[self.elements[:,:],:]\n if self.element_type == \"tri\":\n AB = edge_coords[:,1,:] - edge_coords[:,0,:]\n AC = edge_coords[:,2,:] - edge_coords[:,0,:]\n BC = edge_coords[:,2,:] - edge_coords[:,1,:]\n\n angles[:,0] = np.einsum(\"ij,ij->i\",AB,AC) / (norm(AB)*norm(AC))\n angles[:,1] = np.einsum(\"ij,ij->i\",AC,BC) / (norm(AC)*norm(BC))\n angles[:,2] = np.einsum(\"ij,ij->i\",BC,-AB)/ (norm(BC)*norm(AB))\n angles = np.arccos(angles)\n\n elif self.element_type == \"quad\":\n AB = edge_coords[:,1,:] - edge_coords[:,0,:]\n BC = edge_coords[:,2,:] - edge_coords[:,1,:]\n CD = edge_coords[:,3,:] - edge_coords[:,2,:]\n DA = edge_coords[:,0,:] - edge_coords[:,3,:]\n\n angles[:,0] = np.einsum(\"ij,ij->i\",AB,BC) / (norm(AB)*norm(BC))\n angles[:,1] = np.einsum(\"ij,ij->i\",BC,CD) / (norm(BC)*norm(CD))\n angles[:,2] = np.einsum(\"ij,ij->i\",CD,DA) / (norm(CD)*norm(DA))\n angles[:,3] = np.einsum(\"ij,ij->i\",DA,-AB)/ (norm(DA)*norm(AB))\n angles = np.arccos(angles)\n\n if degrees:\n angles *= 180/np.pi\n\n return angles",
"def ex_4pdeer(param): \r\n param = _parsargs(param,npar=1) \r\n \r\n # Dipolar pathways\r\n lam = param[0]\r\n pathways = [\r\n [1-lam],\r\n [lam, 0]\r\n ]\r\n return pathways",
"def vec_angle_deg(v1,v2):\r\n \r\n return math.degrees(vec_angle_rad(v1,v2))",
"def angle(v,w):\n cosx = dot_product(v,w) / (length(v) * length(w))\n #det = determinant(A,B)\n rad = math.acos(cosx) # in radians\n return rad\n #return rad*180/math.pi # returns degrees"
] | [
"0.8227105",
"0.7010041",
"0.6948374",
"0.66514385",
"0.6404093",
"0.6321645",
"0.6247588",
"0.6223799",
"0.6211882",
"0.61957514",
"0.61255556",
"0.60270005",
"0.598636",
"0.5953265",
"0.5933423",
"0.59146756",
"0.5819998",
"0.58028436",
"0.57949847",
"0.5760357",
"0.574049",
"0.5727581",
"0.57136375",
"0.5637539",
"0.562745",
"0.5624766",
"0.5620993",
"0.5596551",
"0.5582286",
"0.555544"
] | 0.8613842 | 0 |
Return value of array index i. | def __getitem__(self, i):
return self._ar[i] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __getitem__(self, i):\n\t\tif i < self.n:\n\t\t\treturn self.v[i]",
"def __getitem__(self, i):\n return self.get(i, i + 1)",
"def __getitem__(self, i):\n return self.data[i]",
"def __getitem__(self, i):\n return self.__x[i]",
"def xval(self, i):\n return self.x[i]",
"def __getitem__(self, index):\n return self.array[index]",
"def __getitem__(self, i):\n if not (isinstance(i, int), 0 < i < 10):\n raise IndexError('Index should be 0 < int < 10, not %s' % i)\n return self.__values[i]",
"def __getitem__(self, n):\n return self._array[n]",
"def get(self, i):\n\t\tif(i < 0 or i >= self.d):\n\t\t\traise ValueError(\"Illegal index\")\n\t\tif(self.st.contains(i)):\n\t\t\treturn self.st.get(i)\n\t\telse:\n\t\t\treturn 0.0",
"def __getitem__(self,i):\n\t\treturn self.series[i]",
"def get_item(array, index):\n row, column = index\n return array[row][column]",
"def get(self, i: int) -> int:\n return self.range_sum(i, i)",
"def __getitem__(self,i):\n return self._items[i]",
"def __getitem__(self, inds):\n i, j = inds\n return self.array[i][j]",
"def __getitem__(self, idx):\n return self.GetArray(idx)",
"def __getitem__(self, idx):\n return self.GetArray(idx)",
"def field(self,i):\n assert self.is_block()\n assert self.tag () != OCamlValue.DOUBLE_ARRAY_TAG # FIXME not implemented\n n = self.size_words()\n if n is None:\n return None\n if i < 0 or i >= n:\n raise IndexError(\"field %d size %d\" % (i,n))\n return self._unsafe_field(i)\n #t = intnat.array(n).pointer()\n #return OCamlValue(self.v.cast(t).dereference()[i])",
"def get(self, index):\n if 0 <= index <= len(self.nums):\n return self.nums[index]\n return -1",
"def __getitem__(self, i):\n return self.__points[i]",
"def __getitem__(self, index):\n return self._value_at(index)",
"def __getitem__(self, index):\n return self._value_at(index)",
"def item(self, i, path=None):\n if path is None:\n assert list(self.sequences.keys())[0] is NoDim, \"Cannot access item without path if the array has more than one dimension.\"\n path = NoDim\n return self.sequences[path][i]",
"def __getitem__(self, index):\n return self._nums[index]",
"def __getitem__(self, index):\n return self.values[index]",
"def __getitem__(self, index):\n assert 0 <= index < len(self), \"Array subscript out of range\"\n return self._elements[index]",
"def get_value_at(self, i):\n return self.default_value if i > self.last_item else self.heap[i]",
"def __getitem__(self, i):\n try:\n return self.__getIthNode(i).getPayload()\n except IndexError:\n print(\"ERROR: Index value out of range.\")",
"def __getitem__(self, i):\n # UHI support\n if callable(i):\n i = i(self)\n else:\n if i < 0:\n i += self._ax.size\n if i >= self._ax.size:\n raise IndexError(\n \"Out of range access, {0} is more than {1}\".format(i, self._ax.size)\n )\n return self.bin(i)",
"def __getitem__(self, i):\n if isinstance(i, slice):\n return self.v[slice(i.start+self.tau-1, i.stop+self.tau-1, i.step)]\n else:\n return self.v[i+self.tau-1]",
"def __getitem__(self, index):\r\n #if index < 0 or index >= self.size():\r\n # raise IndexError(\"Array index out of bounds\")\r\n return self._items[index]"
] | [
"0.7630643",
"0.7395586",
"0.73718834",
"0.7324915",
"0.727493",
"0.7198595",
"0.71864104",
"0.6988594",
"0.6973699",
"0.69633764",
"0.6930768",
"0.6863983",
"0.6862375",
"0.68333304",
"0.67888004",
"0.67888004",
"0.6754036",
"0.67357033",
"0.6730818",
"0.6709809",
"0.6709809",
"0.66407263",
"0.66386974",
"0.6629713",
"0.6595245",
"0.6549933",
"0.6495784",
"0.64626557",
"0.6460316",
"0.6431295"
] | 0.7823676 | 0 |
Assign values to array index i. | def __setitem__(self, i, value):
self._ar[i] = value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __setitem__(self, i, val):\n\t\tif i < self.n:\n\t\t\tself.v[i] = val",
"def assignMoreVectors(self, i):\n return",
"def __setitem__(self, inds, value):\n i, j = inds\n self.array[i][j] = value",
"def __setitem__(self,i,v):\n _items[i] = v",
"def i(self, i):\n\n self._i = i",
"def setitem(self, i, j, value):\n # XXX: flint matrices do not support negative indices\n # XXX: They also raise ValueError instead of IndexError\n m, n = self.shape\n if i < 0:\n i += m\n if j < 0:\n j += n\n try:\n self.rep[i, j] = value\n except ValueError:\n raise IndexError(f\"Invalid indices ({i}, {j}) for Matrix of shape {self.shape}\")",
"def updateValue(self,i,x):\n assert 0 <= i < len(self)\n self.__update_aux(0,0,len(self),i,x)",
"def __setitem__(self, i: int, item: Any) -> None:\n if i < 0:\n i = self._length + i\n\n curr = self._first\n index_so_far = 0\n\n while curr is not None:\n if index_so_far == i:\n curr.item = item\n break\n index_so_far += 1\n curr = curr.next\n if curr is None:\n raise IndexError",
"def __setitem__(self, i, v):\n raise TypeError(\"'Factorization' object does not support item assignment\")",
"def __setitem__(self, i, value):\n if not (isinstance(i, int), 0 < i < 10):\n raise IndexError('Index should be 0 < int < 10, not %s' % i)\n if value not in [self.CELL_EMPTY, self.CELL_0, self.CELL_1]:\n raise ValueError('Invalid cell value')\n self.__values[i] = value",
"def set(self, i: int, v: int) -> None:\n self.add(i, v - self.range_sum(i, i))",
"def __setitem__(self, i_j, value):\n\t\t\n\t\tif i_j == Ellipsis:\n\t\t\tself.item_cache.clear()\n\t\t\tassert not self.item_cache\n\t\telse:\n\t\t\ttry:\n\t\t\t\tif any((hasattr(ij, 'start') and hasattr(ij, 'stop') and hasattr(ij, 'step')) for ij in i_j):\n\t\t\t\t\tself.item_cache.clear()\n\t\t\t\t\tassert not self.item_cache\n\t\t\texcept TypeError:\n\t\t\t\ttry:\n\t\t\t\t\tdel self.item_cache[i_j]\n\t\t\t\texcept KeyError:\n\t\t\t\t\tpass\n\t\t\n\t\tdef setitem(direction, indices_i, indices_j):\n\t\t\tif direction == self.__direction.scalar:\n\t\t\t\tself.value[self.row_dimension * indices_i + indices_j] = value\n\t\t\telif direction == self.__direction.row:\n\t\t\t\tif len(value) != len(indices_i):\n\t\t\t\t\traise ValueError(\"Assigned value (len {}) must have length equal to indices list ({}).\".format(len(value), len(indices_i)))\n\t\t\t\tj = indices_j\n\t\t\t\tfor m, i in enumerate(indices_i):\n\t\t\t\t\tself.value[self.row_dimension * i + j] = value[m]\n\t\t\telif direction == self.__direction.column:\n\t\t\t\tif len(value) != len(indices_j):\n\t\t\t\t\traise ValueError(\"Assigned value (len {}) must have length equal to indices list ({}).\".format(len(value), len(indices_j)))\n\t\t\t\ti = indices_i\n\t\t\t\tfor n, j in enumerate(indices_j):\n\t\t\t\t\tself.value[self.row_dimension * i + j] = value[n]\n\t\t\telif direction == self.__direction.matrix:\n\t\t\t\tif self.row_dimension != len(indices_i):\n\t\t\t\t\traise ValueError\n\t\t\t\tif self.column_dimension != len(indices_j):\n\t\t\t\t\traise ValueError\n\t\t\t\tfor (m, i), (n, j) in product(enumerate(indices_i), enumerate(indices_j)):\n\t\t\t\t\tself.value[self.row_dimension * i + j] = value[m, n]\n\t\t\telif direction == self.__direction.copy:\n\t\t\t\tif self.algebra != value.algebra:\n\t\t\t\t\traise TypeError(\"In-place matrix assignment works only from a matrix of the same type.\")\n\t\t\t\tif self.column_dimension != value.column_dimension or self.row_dimension != value.row_dimension:\n\t\t\t\t\traise ValueError(\"In-place matrix assignment works only from a matrix of the same dimensions.\")\n\t\t\t\tself.value = list(value.value)\n\t\t\telse:\n\t\t\t\traise RuntimeError(\"Unknown direction value: `{}`\".format(repr(direction)))\n\t\t\n\t\tself.__analyze_indices(i_j, setitem)",
"def set_values(self, value):\n for i in range(len(self)):\n self._elements[i] = value",
"def set_value_at(self, i, new_value=default_value):\n self.heap[i] = new_value",
"def set_values(self,x):\n for i in range(len(self)):\n self[i].set_value(x[i])",
"def x_at_y(arr,n, i):\n arr.append(0)\n for z in range(len(arr) - 1, i, -1):\n myarr[z] = myarr[z - 1]\n myarr[i] = n\n return myarr",
"def __setitem__(self, i, value):\n try:\n self.__getIthNode(i).setPayload(value)\n except IndexError:\n print(\"ERROR: Index value out of range.\")",
"def set_idx(self, i, other, tensor_value):\n for k, v in self.variables.items():\n if k not in other.variables:\n self.variables[k][i] *= 0\n\n for k, v in other.variables.items():\n if k not in self.variables:\n self.variables[k] = np.zeros(tensor_value.shape)\n self.variables[k][i] = other.variables[k]",
"def update(i, v, xs):\n return [v if i == ind else x for ind, x in enumerate(xs)]",
"def __setitem__(self, i, value):\n if i < X:\n raise IndexError(\"point3d::__setitem__: negative index {0}\".format(i))\n if i == X:\n self._x = value\n return\n if i == Y:\n self._y = value\n return\n if i == Z:\n self._z = value\n return\n # beyond Z\n raise IndexError(\"point3d::__setitem__: index too large {0}\".format(i))",
"def __setitem__(self, index, value):\n assert 0 <= index < len(self), \"Array subscript out of range\"\n self._elements[index] = value",
"def setItem(self, i, j, val):\n if i < 0:\n raise IndexError('Row index must be nonnegative.')\n if j < 0:\n raise IndexError('Column index must be nonnegative.')\n\n self.__m[i - 1][j - 1] = val",
"def put(self, i, value):\n\t\tif(i < 0 or i >= self.d):\n\t\t\traise ValueError(\"Illegal index\")\n\t\tif(value == 0.0):\n\t\t\tself.st.delete(i)\n\t\telse:\n\t\t\tself.st.put(i,value)",
"def set(self, i, buf):\n self.buf = buf\n self.buf_i = i\n self.avg = None if len(buf) == 0 else (sum(buf) / len(buf))",
"def __setitem__(self, j, val):\n\t\tself._coords[j] = val",
"def __setitem__(self, index, value):\n self.elem[index] = value",
"def assign_index(self):\n\n i = 0\n for word in self.words:\n self.index[word] = i\n i += 1",
"def assign(array1, array2):\n for i in range(len(array1)):\n array2[i] = array1[i]",
"def __setitem__(self, index_tuple, value):\n assert len(index_tuple) == 2, \"Invalid number of array subscripts.\"\n row, col = index_tuple\n assert 0 <= row < self.num_rows() and 0 <= col < self.num_cols(), \\\n \"Array subscript out of range.\"\n array_1d = self.rows[row]\n array_1d[col] = value",
"def __setitem__(self, idx, val):\n self.rows[idx[0]][idx[1]] = val"
] | [
"0.73695976",
"0.68885285",
"0.686191",
"0.67810804",
"0.67465854",
"0.6717178",
"0.6495393",
"0.6471185",
"0.6410022",
"0.638259",
"0.6255025",
"0.62211835",
"0.61497325",
"0.6143243",
"0.6138627",
"0.6102501",
"0.60960704",
"0.60622686",
"0.60607976",
"0.60226876",
"0.6016797",
"0.5963274",
"0.59539586",
"0.59260744",
"0.5903171",
"0.58917004",
"0.57539505",
"0.57359785",
"0.5720493",
"0.56875914"
] | 0.7562142 | 0 |
Validate if i is in array. | def __contains__(self, i):
return i in self._ar | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkElementInArray(element,array):\n\t\n\texists = False\n\t\n\tfor i in array:\n\t\n\t\tif i == element:\n\t\t\texists = True\n\n\treturn exists",
"def arrayContains(arr, item):\n\tcontains = True\n\ttry:\n\t\tarr.index(item)\n\texcept ValueError:\n\t\tcontains = False\n\treturn contains",
"def in_array(array1, array2):",
"def in_array(val, obj):\n return (val in obj)",
"def is_valid(array, index):\n row, column = index\n return 0 <= row < len(array) and 0 <= column < len(array[row])",
"def __contains__(self, i):\n for j in self:\n if j == i:\n return True\n return False",
"def check_array(self, v, t):\n raise NotImplementedError('check_array')",
"def contains(self, i):\n return self.__qp[i] != -1",
"def contains(self, item):\n for h_num in xrange(self.k):\n val = self.hash_value(item, h_num)\n if not self.arr[val]:\n return False\n else:\n return True",
"def is_in(self, e):\n return e in self.vals",
"def __contains__(self, idx):\n return idx in self._data",
"def is_valid_array(n):\n\tarr = []\n\twhile len(arr) != n:\n\t\ttry:\n\t\t\tarr = list(map(int, input(\"Enter integer values: \").split()))\n\t\texcept:\n\t\t\tprint(\"Invalid input\")\n\t\tif len(arr) != n:\n\t\t\tprint(\"Must have {} integers\".format(n))\n\treturn arr",
"def check(indivs, geno_list):\r\n\tfor i in xrange(0,len(indivs)):\r\n\t\tif indivs[i] not in geno_list:\r\n\t\t\t# print \"this is not in: \"+ indivs[i]\r\n\t\t\treturn False\r\n\treturn True",
"def test_02_this_step_will_fail(self):\n\n self.assertIn(5, arr)",
"def row_is_in_array(row, array):\n return any((array[:] == row).all(1))",
"def inside_exons(i, exons):\n for exon in exons:\n if i in range1(exon[0], exon[1]):\n return(True)\n return(False)",
"def arrNotInArrList(arr, arrList):\n a = np.array(arr)\n for item in arrList:\n item = np.array(item)\n if np.array_equiv(item, a):\n return False\n return True",
"def is_inside_np(self, NPs, idx):\n for np in NPs:\n if np[0] <= idx and np[1] > idx:\n return True\n return False",
"def is_in(elt, seq):\n\treturn any(x is elt for x in seq)",
"def __contains__(self, v):\n for i in self:\n if v in i:\n return True\n False",
"def is_in(elt, seq):\n return any(x is elt for x in seq)",
"def check_i(self):\n for i in xrange(1, len(self.I)):\n assert self.I[i]-self.I[i-1] >= self.m",
"def _idxs_are_present(self, *args):\n return set(args).issubset(set(range(self.n_atoms)))",
"def is_arrayexpress_array(val):\n return arrayexpress_array_regexp.match(val)",
"def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False",
"def check_index(i):\n\n i = asarray(i)\n if (i.ndim > 1) or (size(i) < 1):\n raise Exception(\"Index must be one-dimensional and non-singleton\")\n\n return i",
"def _match_array(tipo, array):\n\n return bool(re.match(array, tipo))",
"def exist(self,list,a):\r\n\t\ti = 0\r\n\t\tfor elem in list:\r\n\t\t\tif (elem == a):\r\n\t\t\t\ti=i+1\r\n\t\tif (i>0):\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False",
"def validateTrain(self, train):\n i = 0\n while i < len(train):\n c = train[i]\n\n while train[i] == c:\n i = i + 1\n if i >= len(train):\n return True\n\n if c in train[i + 1:]:\n return False\n\n return True",
"def __contains__(self, elem):\n return elem in list(self)"
] | [
"0.6500306",
"0.64386773",
"0.62485456",
"0.62156224",
"0.6165227",
"0.6158595",
"0.60294133",
"0.58980155",
"0.5880824",
"0.58586836",
"0.58278084",
"0.5720893",
"0.5711022",
"0.5681861",
"0.566563",
"0.5652514",
"0.5647837",
"0.5602884",
"0.5552213",
"0.55297995",
"0.5498522",
"0.5497623",
"0.54937136",
"0.5460412",
"0.54585606",
"0.54583603",
"0.5447774",
"0.5442354",
"0.54013115",
"0.53961"
] | 0.70314944 | 0 |
Return a deep copy of the Vector. | def copy(self):
return Vector(self._ar) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clone(self):\n v = self.mV[:]\n return Vector.fromSequence(v)",
"def copy(self):\n return Vector(self.x, self.y)",
"def Copy(self) -> BaseVector:",
"def deepcopy(self):\n return self.copy()",
"def deepcopy(self):\n return copy.deepcopy(self)",
"def copy(self):\n\t\treturn pythoncopy.deepcopy(self)",
"def deepcopy(self):\n return copymod.deepcopy(self)",
"def copy(self):\r\n return copy.deepcopy(self)",
"def copy(self):\n vList = GeneralVertexList(len(self.V))\n vList.setVertices(list(self.V.values()))\n return vList",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def copy(self):\n return copy.deepcopy(self)",
"def clone(self):\n return deepcopy(self)",
"def clone(self):\n return deepcopy(self)",
"def clone(self):\n return deepcopy(self)",
"def clone(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)"
] | [
"0.83562434",
"0.8350245",
"0.8067865",
"0.7411623",
"0.7331414",
"0.73207086",
"0.7280997",
"0.72413135",
"0.72351533",
"0.7163208",
"0.7163208",
"0.7163208",
"0.7163208",
"0.7163208",
"0.7163208",
"0.7163208",
"0.7163208",
"0.7163208",
"0.7163208",
"0.7163208",
"0.7163208",
"0.7163208",
"0.7163208",
"0.7163208",
"0.7163208",
"0.7142696",
"0.7142696",
"0.7142696",
"0.7142696",
"0.7123576"
] | 0.8383834 | 0 |
Compute spherical coordinates (r, azimuth, polar_angle) for X,Y,Z point. | def get_spherical_coordinates(xyz: numpy.array) -> Tuple[float, float, float]:
r = numpy.linalg.norm(xyz)
if 0 == r:
return (0, 0, 0)
azimuth = _get_azimuth(xyz[0], xyz[1])
polar_angle = numpy.arccos(xyz[2] / r)
return (r, azimuth, polar_angle) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cartesianToSpherical(x=0, y=0, z=0):\n\n hxy = np.hypot(x, y)\n radius = np.hypot(hxy, z)\n altitude = np.arctan2(z, hxy)\n azimuth = np.arctan2(y, x)\n return altitude, azimuth, radius",
"def cartesian_to_spherical(x, y, z):\n import math\n\n xsq = x ** 2\n ysq = y ** 2\n zsq = z ** 2\n\n r = (xsq + ysq + zsq) ** 0.5\n s = (xsq + ysq) ** 0.5\n\n if np.isscalar(x) and np.isscalar(y) and np.isscalar(z):\n lon = math.atan2(y, x)\n lat = math.atan2(z, s)\n else:\n lon = np.arctan2(y, x)\n lat = np.arctan2(z, s)\n\n return r, lat, lon",
"def cart2spheric(x, y, z):\n # doesn't compute r because chosen egal to 1\n with np.errstate(all='ignore'):\n theta = np.arccos(z)\n phi = np.arctan2(y, x)\n\n return theta, phi",
"def __cartesian2spherical(x: float, y: float, z: float) -> Tuple[float, float]:\n if x == 0 and y == 0:\n return 0, np.degrees(np.pi * 0.5 * np.sign(z))\n lat = np.arctan2(z, np.sqrt(x * x + y * y))\n lon = np.arctan2(y, x)\n return np.degrees(lon), np.degrees(lat)",
"def cartesian2spherical(vector: tuple[float, float, float]) -> tuple[float, float, float]:\n x, y, z = vector\n r = m.sqrt(x**2 + y**2 + z**2)\n # acos returns the angle in radians between 0 and pi\n theta = m.degrees(m.acos(z / r))\n # atan2 returns the angle in radians between -pi and pi\n phi = m.degrees(m.atan2(y, x))\n # lets ensure the angle in degrees is always between 0 and 360, as SHIELD-HIT12A requires\n if phi < 0.:\n phi += 360.\n return theta, phi, r",
"def sphericalToCartesian(altitude=0, azimuth=0, radius=0):\n\n rcos_theta = radius * np.cos(altitude)\n x = rcos_theta * np.cos(azimuth)\n y = rcos_theta * np.sin(azimuth)\n z = radius * np.sin(altitude)\n return x, y, z",
"def spherical_function(j, x, y, z):\n theta = np.arccos(z)\n phi = np.arctan2(y, x)\n return angular_function(j, theta, phi)",
"def sphericalToCartesian(magnitude, azimuthal, polar):\r\n azimuthal = azimuthal*math.pi/180.0\r\n polar = polar*math.pi/180.0\r\n xval = magnitude * math.sin(azimuthal) * math.cos(polar)\r\n yval = magnitude * math.sin(azimuthal) * math.sin(polar)\r\n zval = magnitude * math.cos(azimuthal)\r\n return [xval, yval, zval]",
"def spherical_project(x, y, cos_lat, sin_lat,\n celestial_pole_x, celestial_pole_y,\n celestial_cos_lat, celestial_sin_lat, native_pole_x\n ): # pragma: no cover\n right_angle = np.pi / 2\n\n d_lon = x - celestial_pole_x\n if equal_angles(np.abs(celestial_pole_y), right_angle):\n if celestial_pole_y > 0:\n phi = native_pole_x + d_lon + np.pi\n theta = y\n else:\n phi = native_pole_x - d_lon\n theta = -y\n else:\n cos_d_lon = np.cos(d_lon)\n\n phi = native_pole_x + np.arctan2(\n -cos_lat * np.sin(d_lon),\n (sin_lat * celestial_cos_lat)\n - (cos_lat * celestial_sin_lat * cos_d_lon))\n\n theta = asin(\n (sin_lat * celestial_sin_lat)\n + (cos_lat * celestial_cos_lat * cos_d_lon))\n\n phi = np.fmod(phi, two_pi)\n\n return theta, phi",
"def spherical_to_cartesian(r, lat, lon):\n import math\n\n if np.isscalar(r) and np.isscalar(lat) and np.isscalar(lon):\n x = r * math.cos(lat) * math.cos(lon)\n y = r * math.cos(lat) * math.sin(lon)\n z = r * math.sin(lat)\n else:\n x = r * np.cos(lat) * np.cos(lon)\n y = r * np.cos(lat) * np.sin(lon)\n z = r * np.sin(lat)\n\n return x, y, z",
"def cart2spher(x: np.ndarray, y: np.ndarray,\n z: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n indexes = np.where((x == 0) & (y == 0))[0]\n if indexes.size:\n x[indexes] = np.nan\n y[indexes] = np.nan\n lat = np.arctan2(z, np.sqrt(x * x + y * y))\n lon = np.arctan2(y, x)\n if indexes.size:\n lon[indexes] = 0\n lat[indexes] = np.pi * 0.5 * np.sign(z[indexes])\n return np.degrees(lon), np.degrees(lat)",
"def spherical_parameters(self):\n phi_mu_list = []\n theta_mu_list = []\n \n for mu in self.mu_list:\n r, phi, theta = T_cartesian_to_spherical(x=mu[0], y=mu[1], z=mu[2])\n phi_mu_list.append(phi)\n theta_mu_list.append(theta)\n \n return phi_mu_list, theta_mu_list",
"def sphere_centers(r_x, r_y, r_z):\n a_ccs_p_trans_m = hom_translation_matrix(\n t_x=0.265, t_y=0, t_z=0.014)\n a_ccs_p_rot_m = hom_rotation(x_axis_rotation_matrix(r_x) @\n y_axis_rotation_matrix(r_y) @\n z_axis_rotation_matrix(r_z))\n a_p_sph_1_2 = hom_translation_matrix(\n t_x=0.015, t_y=0.029, t_z=-0.0965)\n a_p_sph_2_2 = hom_translation_matrix(\n t_x=0.015, t_y=-0.029, t_z=-0.0965)\n\n a_ccs_ = a_ccs_p_trans_m @ a_ccs_p_rot_m\n a_c1 = a_ccs_ @ a_p_sph_1_2\n a_c2 = a_ccs_ @ a_p_sph_2_2\n\n return get_translation(a_c1), get_translation(a_c2)",
"def project_to_sphere(points):\n # for uv, the sphere: r=1, azimuth(phi): 2*pi*u, elevation(theta): 2*pi*v\n # theta is elevation, phi is azimuth\n r, theta, phi = cs.cart2sp(x=points[:, 0], y=points[:, 1], z=points[:, 2])\n # logger.info(f\"number of zero points in r: {np.sum(r==0)}\")\n assert np.sum(r == 0) == 0, \"points contains zeros\"\n points_sphere = points / r.reshape(-1, 1)\n return points_sphere, r, theta, phi\n\n # r, theta, phi = cs.cart2sp(x=1, y=1, z=1)\n\n # # spherical to cartesian\n # x, y, z = cs.sp2cart(r=1, theta=np.pi/4, phi=np.pi/4)\n\n # # cartesian to cylindrical\n # r, phi, z = cs.cart2cyl(x=1, y=1, z=1)",
"def CartesianToSpherical(Cartesian):\n\n # x,y,z -> r,theta,phi\n x = Cartesian[:,0]\n y = Cartesian[:,1]\n z = Cartesian[:,2]\n r = np.sqrt(x*x + y*y + z*z)\n projR = np.sqrt(x*x + y*y)\n theta = np.arccos(z/r)\n phi = np.arctan2(y,x)\n theta[theta<0.] +=2.*np.pi\n \n if (len(Cartesian[0,:])==3):\n Spherical = np.column_stack((r,theta,phi))\n return Spherical\n else:\n # vx,vy,vz -> vr,vtheta,vphi\n vx = Cartesian[:,3]\n vy = Cartesian[:,4]\n vz = Cartesian[:,5]\n vr = (x*vx + y*vy + z*vz)/r\n vt = (z*vr - r*vz)/projR\n vp = r*np.sin(theta)*(vy*x-y*vx)/(projR*projR) \n Spherical = np.column_stack((r,theta,phi,vr,vt,vp))\n return Spherical",
"def coord_rotate_rad(x, y, z):\n #-- 1 --\n xt = math.asin ( math.sin(x) * math.sin(y) +\n math.cos(x) * math.cos(y) * math.cos(z) )\n #-- 2 --\n yt = math.acos ( ( math.sin(x) - math.sin(y) * math.sin(xt) ) /\n ( math.cos(y) * math.cos(xt) ) )\n #-- 3 --\n if math.sin(z) > 0.0:\n yt = TWO_PI - yt\n\n #-- 4 --\n return (xt, yt)",
"def _position_cylindrical2spherical(pos):\n\n rho=pos[:,0]\n theta_cylindrical=pos[:,1]\n z=pos[:,2]\n\n r=np.sqrt(rho**2+z**2)\n theta_spherical=np.arctan2(rho,z)\n phi=theta_cylindrical\n\n return np.dstack((r,theta_spherical,phi))[0]",
"def SphericalToCartesian(Spherical):\n\n # r,theta,phi -> x,y,z\n r = Spherical[:,0]\n st = np.sin(Spherical[:,1])\n sp = np.sin(Spherical[:,2])\n ct = np.cos(Spherical[:,1])\n cp = np.cos(Spherical[:,2])\n x = r*st*cp\n y = r*st*sp\n z = r*ct\n\n if (len(Spherical[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n return Cartesian\n else:\n # vr,vtheta,vphi -> vx,vy,vz\n vr = Spherical[:,3]\n vt = Spherical[:,4]\n vp = Spherical[:,5]\n vx = vr*st*cp - vt*ct*cp - vp*sp\n vy = vr*st*sp + vt*ct*sp + vp*cp\n vz = vr*ct - vt*st\n Cartesian= np.column_stack((x,y,z,vx,vy,vz))\n return Cartesian",
"def spherical(self, x, y):\n\t\twhile x >= self.planet.width or x < 0 or y >= self.planet.height or y < 0:\n\t\t\t#change x if x is out of boundary\n\t\t\tif x >= self.planet.width:\n\t\t\t\tx -= (self.planet.width)\n\t\t\telif x < 0:\n\t\t\t\tx += (self.planet.width)\n\t\t\t#change y if y is out of boundary\n\t\t\tif y >= self.planet.height:\n\t\t\t\ty -= (self.planet.height)\n\t\t\telif y < 0:\n\t\t\t\ty += (self.planet.height)\n\t\treturn x, y",
"def cart2sph(x: float, y: float, z: float) -> typing.Tuple[float, float, float]:\n hxy = hypot(x, y)\n r = hypot(hxy, z)\n el = atan2(z, hxy)\n az = atan2(y, x)\n return az, el, r",
"def _position_cartesian2spherical(pos):\n\n #save cartesian position of each particle\n x=pos[:,0]\n y=pos[:,1]\n z=pos[:,2]\n\n r=np.sqrt(x**2+y**2+z**2) #radius position of each particle\n\n #define theta and take care of r=0 case\n theta=np.zeros(np.size(x))\n ind_zero=(r == 0.) #is there any point where radius is 0 ?\n theta= np.arccos(z/r) \n theta[ind_zero]=0.\n\n phi=np.arctan2(y,x)\n\n return np.dstack((r,theta,phi))[0]",
"def cartesian2spherical(cartesian):\n cartesian = np.array(cartesian).squeeze()\n x, y, z = cartesian\n distance = np.linalg.norm(cartesian)\n azimuth = np.arccos(z / distance)\n elevation = np.arctan2(y, x) # Use arctan2 instead of arctan to get proper sign!\n return np.array([distance, azimuth, elevation])",
"def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos",
"def polar_coord(point, center):\n x = point[0] - center[0]\n y = point[1] - center[1]\n rho = np.sqrt(x ** 2 + y ** 2)\n phi = np.arctan2(y, x)\n return np.array([phi, rho])",
"def sph2cart(az: float, el: float, r: float) -> typing.Tuple[float, float, float]:\n rcos_theta = r * cos(el)\n x = rcos_theta * cos(az)\n y = rcos_theta * sin(az)\n z = r * sin(el)\n return x, y, z",
"def spherical_to_cartesian(self, r, phi, theta):\n x = r*cos(phi)*sin(theta)\n y = r*sin(phi)*sin(theta)\n z = r*cos(theta)\n \n return Vector(float(x), float(y), float(z))",
"def circle_point(radius, phi):\n if radius <= 0:\n raise AssertionError('Radius mast be grater than 0')\n x = radius * cos(radians(phi))\n y = radius * sin(radians(phi))\n z = 0\n\n return x, y, z",
"def _position_spherical2cylindrical(pos):\n \n\n r=pos[:,0]\n theta_spherical=pos[:,1]\n phi_spherical=pos[:,2]\n\n if any(theta_spherical>np.pi) or any(theta_spherical<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n rho=r*np.sin(theta_spherical)\n theta_cylindrical=phi_spherical\n z=r*np.cos(theta_spherical)\n\n return np.dstack((rho,theta_cylindrical,z))[0]",
"def xyz_to_spherical(self, xyz: np.ndarray, directions: bool = False) -> np.ndarray:\n if not directions:\n xyz = xyz - self.xyz\n r = np.sqrt(np.sum(xyz ** 2, axis=1))\n azimuth_iso = np.arctan2(xyz[:, 1], xyz[:, 0])\n altitude_iso = np.arccos(xyz[:, 2] / r)\n angles = np.column_stack(\n (\n (90 - (azimuth_iso * 180 / np.pi)) % 360,\n 90 - (altitude_iso * 180 / np.pi),\n )\n )\n if not directions:\n angles = np.column_stack((angles, r))\n return angles",
"def spherical2cartesian(spherical):\n spherical = np.array(spherical).squeeze()\n distance, azimuth, elevation = spherical\n x = distance * np.sin(azimuth) * np.cos(elevation)\n y = distance * np.sin(azimuth) * np.sin(elevation)\n z = distance * np.cos(azimuth)\n return np.array([x, y, z])"
] | [
"0.8174543",
"0.74399215",
"0.73569125",
"0.73225564",
"0.70352703",
"0.701186",
"0.6950183",
"0.69103575",
"0.69003415",
"0.68811667",
"0.68668544",
"0.6825777",
"0.68175334",
"0.68068254",
"0.6771065",
"0.6755179",
"0.67518723",
"0.6618703",
"0.65975374",
"0.655547",
"0.6547075",
"0.64991987",
"0.64818317",
"0.64721715",
"0.6468997",
"0.64433473",
"0.6401196",
"0.6398245",
"0.6398176",
"0.6382422"
] | 0.7569556 | 1 |
Create [entries] numpy Z rotation matrices for [entries] angles. | def multi_rot_Z(angle_rads: numpy.ndarray) -> numpy.ndarray:
rz = numpy.empty((angle_rads.shape[0], 4, 4))
rz[...] = numpy.identity(4)
rz[:, 0, 0] = rz[:, 1, 1] = numpy.cos(angle_rads)
rz[:, 1, 0] = numpy.sin(angle_rads)
rz[:, 0, 1] = -rz[:, 1, 0]
return rz | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_rotation_matrix(x_angle, y_angle, z_angle):\n return np.array([\n [1, 0, 0],\n [0, np.cos(x_angle), -np.sin(x_angle)],\n [0, np.sin(x_angle), np.cos(x_angle)],\n ]).dot([\n [np.cos(y_angle), 0, np.sin(y_angle)],\n [0, 1, 0],\n [-np.sin(y_angle), 0, np.cos(y_angle)],\n ]).dot([\n [np.cos(z_angle), -np.sin(z_angle), 0],\n [np.sin(z_angle), np.cos(z_angle), 0],\n [0, 0, 1],\n ]).tolist()",
"def rotation_matrices_from_angles(angles):\n\n angles = np.atleast_1d(angles)\n npts = len(angles)\n\n sina = np.sin(angles)\n cosa = np.cos(angles)\n\n R = np.zeros((npts, 2, 2))\n R[:, 0, 0] = cosa\n R[:, 1, 1] = cosa\n\n R[:, 0, 1] = -sina\n R[:, 1, 0] = sina\n\n return R",
"def rotation3D_z(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]])",
"def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = angles[0:3]\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def rotation3Dz(theta):\n rmat = np.zeros((3,3))\n rmat[0,0] = rmat[1,1] = np.cos(theta)\n rmat[0,1] = np.sin(theta)\n rmat[1,0] = -rmat[0,1]\n rmat[2,2] = 1\n return rmat",
"def _calc_rotation_matrix(self, inds=None):\n if inds is None:\n inds = range(self.Ncomponents)\n\n n_inds = len(inds)\n\n lon, lat = self.get_lon_lat()\n # Find mathematical points and vectors for RA/Dec\n theta_frame = np.pi / 2.0 - lat.rad[inds]\n phi_frame = lon.rad[inds]\n frame_vec = sct.r_hat(theta_frame, phi_frame)\n assert frame_vec.shape == (3, n_inds)\n\n # Find mathematical points and vectors for Alt/Az\n theta_altaz = np.pi / 2.0 - self.alt_az[0, inds]\n phi_altaz = self.alt_az[1, inds]\n altaz_vec = sct.r_hat(theta_altaz, phi_altaz)\n assert altaz_vec.shape == (3, n_inds)\n\n R_avg = self._calc_average_rotation_matrix()\n\n R_exact = np.zeros((3, 3, n_inds), dtype=np.float64)\n\n for src_i in range(n_inds):\n intermediate_vec = np.matmul(R_avg, frame_vec[:, src_i])\n\n R_perturb = sct.vecs2rot(r1=intermediate_vec, r2=altaz_vec[:, src_i])\n\n R_exact[:, :, src_i] = np.matmul(R_perturb, R_avg)\n\n return R_exact",
"def create_rotation_matrix_3d(angles) -> np.array:\n\n mat1 = np.array([[1., 0., 0.],\n [0., math.cos(angles[0]), math.sin(angles[0])],\n [0., -math.sin(angles[0]), math.cos(angles[0])]],\n dtype='float')\n\n mat2 = np.array([[math.cos(angles[1]), 0., -math.sin(angles[1])],\n [0., 1., 0.],\n [math.sin(angles[1]), 0., math.cos(angles[1])]],\n dtype='float')\n\n mat3 = np.array([[math.cos(angles[2]), math.sin(angles[2]), 0.],\n [-math.sin(angles[2]), math.cos(angles[2]), 0.],\n [0., 0., 1.]],\n dtype='float')\n\n mat = (mat1 @ mat2) @ mat3\n return mat",
"def make_sample_rot_matrix(self, angles):\n (phi, chi) = angles[0:2]\n omega = np.deg2rad(self.omega)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )",
"def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot",
"def matrix_rotate_3d_z(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_z = -deg * pi/180\n c_z = cos(rad_z)\n s_z = sin(rad_z)\n return np.matrix([[c_z, -s_z, 0], [s_z, c_z, 0], [0, 0, 1]])",
"def rot_z(angle):\n sangle = math.sin(angle)\n cangle = math.cos(angle)\n rz = np.array([[cangle, sangle, 0.0],\n [-sangle, cangle, 0.0],\n [0.0, 0.0, 1.0]])\n return rz",
"def z_rotmat(theta):\n cos_t = np.cos(theta)\n sin_t = np.sin(theta)\n return np.array([[cos_t, -sin_t, 0],\n [sin_t, cos_t, 0],\n [0, 0, 1]])",
"def make_sample_rot_matrix(self, angles):\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def rot_z(theta):\n theta_rad = np.radians(theta)\n rotation_matrix = [[np.cos(theta_rad), -np.sin(theta_rad), 0],\n [np.sin(theta_rad), np.cos(theta_rad), 0],\n [0, 0, 1]]\n return np.matrix(rotation_matrix)",
"def axisAnglesToRotMat(xrot, yrot, zrot):\n\n xmat = np.eye(3)\n ymat = np.eye(3)\n zmat = np.eye(3)\n\n xmat[1, 1] = np.cos(xrot)\n xmat[1, 2] = -np.sin(xrot)\n xmat[2, 1] = np.sin(xrot)\n xmat[2, 2] = np.cos(xrot)\n\n ymat[0, 0] = np.cos(yrot)\n ymat[0, 2] = np.sin(yrot)\n ymat[2, 0] = -np.sin(yrot)\n ymat[2, 2] = np.cos(yrot)\n\n zmat[0, 0] = np.cos(zrot)\n zmat[0, 1] = -np.sin(zrot)\n zmat[1, 0] = np.sin(zrot)\n zmat[1, 1] = np.cos(zrot)\n\n return concat(zmat, ymat, xmat)",
"def axisAnglesToRotMat(xrot, yrot, zrot):\n\n xmat = np.eye(3)\n ymat = np.eye(3)\n zmat = np.eye(3)\n\n xmat[1, 1] = np.cos(xrot)\n xmat[1, 2] = -np.sin(xrot)\n xmat[2, 1] = np.sin(xrot)\n xmat[2, 2] = np.cos(xrot)\n\n ymat[0, 0] = np.cos(yrot)\n ymat[0, 2] = np.sin(yrot)\n ymat[2, 0] = -np.sin(yrot)\n ymat[2, 2] = np.cos(yrot)\n\n zmat[0, 0] = np.cos(zrot)\n zmat[0, 1] = -np.sin(zrot)\n zmat[1, 0] = np.sin(zrot)\n zmat[1, 1] = np.cos(zrot)\n\n return concat(zmat, ymat, xmat)",
"def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])",
"def create_azimuthal_polarization(dim, rotation):\n theta_array = np.zeros((dim, dim))\n\n for i in range(np.size(theta_array, 0)):\n for j in range(np.size(theta_array, 1)):\n x = -dim / 2 + i\n y = -dim / 2 + j\n # perform roation\n th = math.pi*rotation/180.0\n x = np.cos(th)*x - np.sin(th)*y\n y = np.sin(th)*x + np.cos(th)*y\n\n rot = math.atan2(x, y) + math.pi/2\n # factor = (rot % (2*math.pi))\n theta_array[i][j] = (rot % (2 * math.pi))\n return theta_array",
"def rotZ(theta, mode = 'radians'):\n\n\tif mode != 'radians' and mode != 'degrees':\n\t\traise ValueError('Mode should either be ``radians`` or ``degrees``.')\n\tif mode == 'degrees':\n\t\ttheta = np.deg2rad(theta)\n\treturn np.matrix([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], \\\n\t\t[0., 0., 1.]])",
"def rotation_matrix(rx, ry, rz):\n # Convert from degrees to radians.\n rx = np.pi * rx / 180\n ry = np.pi * ry / 180\n rz = np.pi * rz / 180\n\n # Pre-compute sine and cosine of angles.\n cx, cy, cz = np.cos([rx, ry, rz])\n sx, sy, sz = np.sin([rx, ry, rz])\n\n # Set up euler rotations.\n Rx = np.array([[1, 0, 0, 0],\n [0, cx, -sx, 0],\n [0, sx, cx, 0],\n [0, 0, 0, 1]])\n\n Ry = np.array([[cy, 0, sy, 0],\n [0, 1, 0, 0],\n [-sy, 0, cy, 0],\n [0, 0, 0, 1]])\n\n Rz = np.array([[cz, -sz, 0, 0],\n [sz, cz, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n return Rz.dot(Ry.dot(Rx))",
"def vrrotvec2mat(ax_ang):\n\n #file_dir = os.path.dirname(os.path.realpath(__file__))\n #path_dir2 = file_dir + '/../geometry/'\n #sys.path.append(path_dir2)\n\n if ax_ang.ndim == 1:\n if np.size(ax_ang) == 5:\n ax_ang = np.reshape(ax_ang, (5, 1))\n msz = 1\n elif np.size(ax_ang) == 4:\n ax_ang = np.reshape(np.hstack((ax_ang, np.array([1]))), (5, 1))\n msz = 1\n else:\n raise Exception('Wrong Input Type')\n elif ax_ang.ndim == 2:\n if np.shape(ax_ang)[0] == 5:\n msz = np.shape(ax_ang)[1]\n elif np.shape(ax_ang)[1] == 5:\n ax_ang = ax_ang.transpose()\n msz = np.shape(ax_ang)[1]\n else:\n raise Exception('Wrong Input Type')\n else:\n raise Exception('Wrong Input Type')\n\n direction = ax_ang[0:3, :]\n angle = ax_ang[3, :]\n\n d = np.array(direction, dtype=np.float64)\n d /= np.linalg.norm(d, axis=0)\n x = d[0, :]\n y = d[1, :]\n z = d[2, :]\n c = np.cos(angle)\n s = np.sin(angle)\n tc = 1 - c\n\n mt11 = tc*x*x + c\n mt12 = tc*x*y - s*z\n mt13 = tc*x*z + s*y\n\n mt21 = tc*x*y + s*z\n mt22 = tc*y*y + c\n mt23 = tc*y*z - s*x\n\n mt31 = tc*x*z - s*y\n mt32 = tc*y*z + s*x\n mt33 = tc*z*z + c\n\n mtx = np.column_stack((mt11, mt12, mt13, mt21, mt22, mt23, mt31, mt32, mt33))\n\n inds1 = np.where(ax_ang[4, :] == -1)\n mtx[inds1, :] = -mtx[inds1, :]\n\n if msz == 1:\n mtx = mtx.reshape(3, 3)\n else:\n mtx = mtx.reshape(msz, 3, 3)\n\n return mtx",
"def random_rotation_matrix():\n\n x = np.random.uniform(size=3)\n theta = x[0]*2*math.pi\n phi = x[1]*2*math.pi\n z = x[2]*2\n\n r = math.sqrt(z)\n vx = math.sin(phi)*r\n vy = math.cos(phi)*r\n vz = math.sqrt(2.0-z)\n\n st = math.sin(theta)\n ct = math.cos(theta)\n\n sx = vx*ct-vy*st\n sy = vx*st+vy*ct\n\n return np.array([[vx*sx-ct, vx*sy-st, vx*vz],\n [vy*sx+st, vy*sy-ct, vy*vz],\n [vz*sx,vz*sy,1.0-z]])",
"def rotation_elements(self, eta, phi, theta):\n \n # Three-axis rotation:\n # 1. Rotate about +z by eta (follows RHR; rotation is mathematical and thus counter-clockwise)\n # 2. Tilt by phi with respect to +z (rotation about y-axis) then\n # 3. rotate by theta in-place (rotation about z-axis) ### BUG: This isn't a conceptual rotation about z (influenced by other rotations)\n \n\n eta = radians( eta ) # eta is orientation around the z axis (before reorientation)\n phi = radians( phi ) # phi is grain tilt (with respect to +z axis)\n theta = radians( theta ) # grain orientation (around the z axis)\n \n rotation_elements = [[ cos(eta)*cos(phi)*cos(theta)-sin(eta)*sin(theta) ,\n -cos(eta)*cos(phi)*sin(theta)-sin(eta)*cos(theta) ,\n -cos(eta)*sin(phi) ],\n [ sin(eta)*cos(phi)*cos(theta)+cos(eta)*sin(theta) ,\n -sin(eta)*cos(phi)*sin(theta)+cos(eta)*cos(theta) ,\n sin(eta)*sin(phi) ],\n [ -sin(phi)*cos(theta) ,\n sin(phi)*sin(theta) ,\n cos(phi) ]]\n \n return rotation_elements",
"def rotate_z(angle):\n log.dev(\"lib.mathp.rotate_z is deprecated. Use lib.rotation.R3 instead.\")\n\n cosA = np.cos(angle)\n sinA = np.sin(angle)\n R = np.array([[cosA, sinA, 0], [-sinA, cosA, 0], [0, 0, 1]])\n return R",
"def cal_rotation_matrix(nvec=None, dest=[0., 0., 1]):\n import numpy.linalg as lag\n import math\n # rotation axis\n dest = np.asarray(dest) # towards +z direction.\n\n if lag.norm(nvec) != 1.0:\n nvec = nvec / lag.norm(nvec)\n if lag.norm(dest) != 1.0:\n dest = dest / lag.norm(dest)\n\n print(nvec, dest)\n r_axis = np.cross(nvec, dest)\n angle = math.acos(np.dot(nvec, dest))\n\n return _get_rotation_matrix(r_axis, angle)"
] | [
"0.68653584",
"0.6794344",
"0.6746414",
"0.66585994",
"0.6635431",
"0.6586524",
"0.6530783",
"0.651046",
"0.64896053",
"0.6478563",
"0.6458148",
"0.6396228",
"0.6392338",
"0.6383424",
"0.6383424",
"0.6374706",
"0.6374706",
"0.6374706",
"0.6250182",
"0.62475044",
"0.62475044",
"0.6238493",
"0.6193938",
"0.61859035",
"0.61835366",
"0.6173925",
"0.6147698",
"0.611363",
"0.6072117",
"0.6057054"
] | 0.71370095 | 0 |
Create [entries] numpy Y rotation matrices for [entries] angles. | def multi_rot_Y(angle_rads: numpy.ndarray) -> numpy.ndarray:
ry = numpy.empty((angle_rads.shape[0], 4, 4))
ry[...] = numpy.identity(4)
ry[:, 0, 0] = ry[:, 2, 2] = numpy.cos(angle_rads)
ry[:, 0, 2] = numpy.sin(angle_rads)
ry[:, 2, 0] = -ry[:, 0, 2]
return ry | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = angles[0:3]\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, chi) = angles[0:2]\n omega = np.deg2rad(self.omega)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def rotation_matrices_from_angles(angles):\n\n angles = np.atleast_1d(angles)\n npts = len(angles)\n\n sina = np.sin(angles)\n cosa = np.cos(angles)\n\n R = np.zeros((npts, 2, 2))\n R[:, 0, 0] = cosa\n R[:, 1, 1] = cosa\n\n R[:, 0, 1] = -sina\n R[:, 1, 0] = sina\n\n return R",
"def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )",
"def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def _euler_angles_to_rotation_matrix(theta):\n R_x = np.array([[1, 0, 0],\n [0, math.cos(theta[0]), -math.sin(theta[0])],\n [0, math.sin(theta[0]), math.cos(theta[0])]\n ])\n\n R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],\n [0, 1, 0],\n [-math.sin(theta[1]), 0, math.cos(theta[1])]\n ])\n\n R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],\n [math.sin(theta[2]), math.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n\n R = np.dot(R_z, np.dot(R_y, R_x))\n\n return R",
"def rotation3D_y(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, 0.0, s], [0.0, 1.0, 0.0], [-s, 0.0, c]])",
"def generate_rotation_matrix(x_angle, y_angle, z_angle):\n return np.array([\n [1, 0, 0],\n [0, np.cos(x_angle), -np.sin(x_angle)],\n [0, np.sin(x_angle), np.cos(x_angle)],\n ]).dot([\n [np.cos(y_angle), 0, np.sin(y_angle)],\n [0, 1, 0],\n [-np.sin(y_angle), 0, np.cos(y_angle)],\n ]).dot([\n [np.cos(z_angle), -np.sin(z_angle), 0],\n [np.sin(z_angle), np.cos(z_angle), 0],\n [0, 0, 1],\n ]).tolist()",
"def _rotation_matrix(theta):\n c, s = np.cos(theta), np.sin(theta)\n return np.array(((c, -s), (s, c)))",
"def y_rotmat(theta):\n cos_t = np.cos(theta)\n sin_t = np.sin(theta)\n return np.array([[cos_t, 0, sin_t],\n [0, 1, 0],\n [-sin_t, 0, cos_t]])",
"def create_azimuthal_polarization(dim, rotation):\n theta_array = np.zeros((dim, dim))\n\n for i in range(np.size(theta_array, 0)):\n for j in range(np.size(theta_array, 1)):\n x = -dim / 2 + i\n y = -dim / 2 + j\n # perform roation\n th = math.pi*rotation/180.0\n x = np.cos(th)*x - np.sin(th)*y\n y = np.sin(th)*x + np.cos(th)*y\n\n rot = math.atan2(x, y) + math.pi/2\n # factor = (rot % (2*math.pi))\n theta_array[i][j] = (rot % (2 * math.pi))\n return theta_array",
"def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot",
"def homog_rot_mtx(angle_rads: float, axis: str) -> numpy.array:\n cosang = numpy.cos(angle_rads)\n sinang = numpy.sin(angle_rads)\n\n if \"z\" == axis:\n return numpy.array(\n (\n (cosang, -sinang, 0, 0),\n (sinang, cosang, 0, 0),\n (0, 0, 1, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )\n elif \"y\" == axis:\n return numpy.array(\n (\n (cosang, 0, sinang, 0),\n (0, 1, 0, 0),\n (-sinang, 0, cosang, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )\n else:\n return numpy.array(\n (\n (1, 0, 0, 0),\n (0, cosang, -sinang, 0),\n (0, sinang, cosang, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )",
"def eulerAnglesToRotationMatrix(theta):\n\n R_x = np.array([[1, 0, 0 ],\n [0, np.cos(theta[0]), -np.sin(theta[0]) ],\n [0, np.sin(theta[0]), np.cos(theta[0]) ]\n ])\n R_y = np.array([[np.cos(theta[1]), 0, np.sin(theta[1]) ],\n [0, 1, 0 ],\n [-np.sin(theta[1]), 0, np.cos(theta[1]) ]\n ])\n R_z = np.array([[np.cos(theta[2]), -np.sin(theta[2]), 0],\n [np.sin(theta[2]), np.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n R = np.dot(R_z, np.dot( R_y, R_x ))\n return R",
"def multi_rot_Z(angle_rads: numpy.ndarray) -> numpy.ndarray:\n rz = numpy.empty((angle_rads.shape[0], 4, 4))\n rz[...] = numpy.identity(4)\n rz[:, 0, 0] = rz[:, 1, 1] = numpy.cos(angle_rads)\n rz[:, 1, 0] = numpy.sin(angle_rads)\n rz[:, 0, 1] = -rz[:, 1, 0]\n return rz",
"def to_angles(self) -> np.ndarray:\n phi = np.arctan2(self.A[1, 2], self.A[2, 2]) # Roll Angle\n theta = -np.sin(self.A[0, 2]) # Pitch Angle\n psi = np.arctan2(self.A[0, 1], self.A[0, 0]) # Yaw Angle\n return np.array([phi, theta, psi])",
"def axisAnglesToRotMat(xrot, yrot, zrot):\n\n xmat = np.eye(3)\n ymat = np.eye(3)\n zmat = np.eye(3)\n\n xmat[1, 1] = np.cos(xrot)\n xmat[1, 2] = -np.sin(xrot)\n xmat[2, 1] = np.sin(xrot)\n xmat[2, 2] = np.cos(xrot)\n\n ymat[0, 0] = np.cos(yrot)\n ymat[0, 2] = np.sin(yrot)\n ymat[2, 0] = -np.sin(yrot)\n ymat[2, 2] = np.cos(yrot)\n\n zmat[0, 0] = np.cos(zrot)\n zmat[0, 1] = -np.sin(zrot)\n zmat[1, 0] = np.sin(zrot)\n zmat[1, 1] = np.cos(zrot)\n\n return concat(zmat, ymat, xmat)",
"def axisAnglesToRotMat(xrot, yrot, zrot):\n\n xmat = np.eye(3)\n ymat = np.eye(3)\n zmat = np.eye(3)\n\n xmat[1, 1] = np.cos(xrot)\n xmat[1, 2] = -np.sin(xrot)\n xmat[2, 1] = np.sin(xrot)\n xmat[2, 2] = np.cos(xrot)\n\n ymat[0, 0] = np.cos(yrot)\n ymat[0, 2] = np.sin(yrot)\n ymat[2, 0] = -np.sin(yrot)\n ymat[2, 2] = np.cos(yrot)\n\n zmat[0, 0] = np.cos(zrot)\n zmat[0, 1] = -np.sin(zrot)\n zmat[1, 0] = np.sin(zrot)\n zmat[1, 1] = np.cos(zrot)\n\n return concat(zmat, ymat, xmat)",
"def _calc_rotation_matrix(self, inds=None):\n if inds is None:\n inds = range(self.Ncomponents)\n\n n_inds = len(inds)\n\n lon, lat = self.get_lon_lat()\n # Find mathematical points and vectors for RA/Dec\n theta_frame = np.pi / 2.0 - lat.rad[inds]\n phi_frame = lon.rad[inds]\n frame_vec = sct.r_hat(theta_frame, phi_frame)\n assert frame_vec.shape == (3, n_inds)\n\n # Find mathematical points and vectors for Alt/Az\n theta_altaz = np.pi / 2.0 - self.alt_az[0, inds]\n phi_altaz = self.alt_az[1, inds]\n altaz_vec = sct.r_hat(theta_altaz, phi_altaz)\n assert altaz_vec.shape == (3, n_inds)\n\n R_avg = self._calc_average_rotation_matrix()\n\n R_exact = np.zeros((3, 3, n_inds), dtype=np.float64)\n\n for src_i in range(n_inds):\n intermediate_vec = np.matmul(R_avg, frame_vec[:, src_i])\n\n R_perturb = sct.vecs2rot(r1=intermediate_vec, r2=altaz_vec[:, src_i])\n\n R_exact[:, :, src_i] = np.matmul(R_perturb, R_avg)\n\n return R_exact",
"def make_sample_rot_matrix(self, angles):\n (phi, kappa, omega) = self.get_phi_kappa_omega(angles)\n return numpy_utils.kappa_rotation_matrix(phi, np.deg2rad(self.alpha), kappa, omega)",
"def _rotationMatrix(self, n_dim, theta):\n i = np.identity(n_dim)\n c, s = np.cos(theta)*i, np.sin(theta)*i\n rotation = np.bmat([[c, s], [-s, c]])\n return rotation",
"def rotateEuler(axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])",
"def do_rotation(x_span, y_span, angle):\n\n radians = angle * np.pi / 180\n\n rotation_matrix = np.array(\n [[np.cos(radians), np.sin(radians)], [-np.sin(radians), np.cos(radians)]]\n )\n\n xx, yy = np.meshgrid(x_span, y_span, indexing=\"ij\")\n return np.einsum(\"ji, mni -> jmn\", rotation_matrix, np.dstack([xx, yy]))",
"def make_rotations(dataset, labels, angles):\n was_flattened = (len(dataset[0].shape) == 1)\n augmented_dataset = []\n augmented_labels = []\n \n for image, label in zip(dataset, labels):\n if was_flattened:\n image = unflatten(image)\n\n for angle in angles:\n rotated_pos = ndimage.rotate(image, angle)\n rotated_neg = ndimage.rotate(image, -angle)\n\n if was_flattened:\n rotated_pos = rotated_pos.flatten()\n rotated_neg = rotated_neg.flatten()\n\n augmented_dataset.append(rotated_pos)\n augmented_dataset.append(rotated_neg)\n augmented_labels.append(label)\n augmented_labels.append(label)\n \n return (augmented_dataset, augmented_labels)",
"def rotation_matrix(angle) -> np.array:\n return np.array([\n [np.cos(angle), np.sin(angle)],\n [-np.sin(angle), np.cos(angle)]])",
"def matrix_rotate_3d_y(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_y = -deg * pi/180\n c_y = cos(rad_y)\n s_y = sin(rad_y)\n return np.matrix([[c_y, 0, s_y], [0, 1, 0], [-s_y, 0, c_y]])"
] | [
"0.6704496",
"0.66379946",
"0.6614366",
"0.6525721",
"0.6497145",
"0.6497145",
"0.6497145",
"0.6494217",
"0.6494217",
"0.63507354",
"0.63133836",
"0.6302495",
"0.62334675",
"0.61759055",
"0.61702144",
"0.6167026",
"0.6162391",
"0.6145289",
"0.6126853",
"0.6108908",
"0.60886574",
"0.60886574",
"0.6087622",
"0.6064755",
"0.5979532",
"0.5956811",
"0.5941814",
"0.59266675",
"0.5897126",
"0.5890746"
] | 0.6921952 | 0 |
2 ways of defining set | def define_set():
set_1 = set([1, 2, 3])
print type(set_1)
print set_1
set_2 = {2, 3, 2}
print type(set_2)
# <type 'set'>
print set_2
# set([2, 3])
a = set((1, 2, 3, 4))
b = set([3, 4, 5, 6])
print a | b # Union
# {1, 2, 3, 4, 5, 6}
print a & b # Intersection
# {3, 4}
print a < b # Subset
# False
print a - b # Difference
# {1, 2}
print a ^ b # Symmetric Difference
# {1, 2, 5, 6} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set():",
"def set():\n pass",
"def set(x):\n pass",
"def test_set_creation():\r\n test_list = [1, 2, 1, 3] # it is alist\r\n set1 = set(test_list)\r\n assert {1,2,3} == set1 # randomly arranged and don't allow duplicates\r\n\r\n test_string = \"apple\"\r\n set2 = set(test_string)\r\n assert {'a','p','l','e'} == set2\r\n\r\n test_dict = { 1: \"one\", 2 : \"two\"}\r\n set3 = set(test_dict)\r\n assert {1,2} == set3 #only keys are considered\r\n\r\n set4 = set(test_dict.values())\r\n assert {\"one\",\"two\"} == set4\r\n\r\n set5 = set(test_dict.items())\r\n assert set([(2, 'two'), (1, 'one')]) == set5 ######################DOUBT\r",
"def getSet(unique_name):",
"def getSet(unique_name):",
"def set(self) -> set:\n return set(self)",
"def test_set(self):\n a = set()\n a.add('b')\n a.add('c')\n a.add('a')\n b = list(a)\n b.sort()\n self.assertEqual(b, ['a', 'b', 'c'])\n a.remove('b')\n b = list(a)\n b.sort()\n self.assertEqual(b, ['a', 'c'])\n\n a.discard('d')\n\n b = set(['r', 's'])\n d = a.union(b)\n b = list(d)\n b.sort()\n self.assertEqual(b, ['a', 'c', 'r', 's'])",
"def Set(self) -> None:",
"def __init__(self):\n self.set = set()",
"def SetFunction():\r\n s2 = []\r\n s3 = []\r\n s4 = []\r\n s2 = { i for i in range(21) if i%2 == 0}\r\n s3 = { i for i in range(21) if i%3 == 0}\r\n s4 = { i for i in range(21) if i%4 == 0}\r\n s2 = set(s2)\r\n s3 = set(s3)\r\n s4 = set(s4)\r\n print s3.issubset(s2)\r\n print s4.issubset(s2)",
"def fixSets(namespace):\n\ttry:\n\t\tset\n\texcept:\n\t\timport sets\n\t\tnamespace[\"set\"] = sets.Set\n\t\tnamespace[\"frozenset\"] = sets.ImmutableSet",
"def getSets():",
"def set_of(element: Type) -> SetType:\n return SetType(element)",
"def add_elements_to_set(s: set, *args) -> set:\n s.update(set(*args))\n return s",
"def make_set(g, nodes):\n s = Set()\n names = nodes['names']\n for ii,name in enumerate(names):\n \"\"\" \n We will assume node is entirely contained\n in group if they have one atom in common\n \"\"\" \n atoms = mdn.dic2list(nodes[name]['atoms'])\n atom0 = atoms[0]\n if (atom0 in mdn.dic2list(g['atoms'])):\n s.add(ii)\n return s",
"def getSets(unique_name=None):",
"def __init__(self):\n self.s = set()",
"def add_sets(*args):\n out = set()\n for arg in args:\n for thing in arg:\n out.add(thing)\n return out",
"def visit_Set(self, node):\n self.generic_visit(node)\n return to_call(to_attribute(self.operator, '__set__'), node.elts)",
"def __init__(self):\n self.EntireSet = []",
"def __init__(self, set):\n Rule.__init__(self)\n self.__set = set",
"def __init__(self, set):\n Rule.__init__(self)\n self.__set = set",
"def mkset(item):\n if isinstance(item, set):\n return item\n elif item is None:\n return set()\n elif isIterable(item):\n return set(item)\n else:\n return set([item])",
"def get_from_set(set_):\n for e in set_: return e",
"def create_C1(data_set):\r\n C1 = set()\r\n for t in data_set:\r\n for item in t:\r\n item_set = frozenset([item])\r\n C1.add(item_set)\r\n return C1",
"def create_C1(data_set):\n C1 = set()\n for t in data_set:\n for item in t:\n item_set = frozenset([item])\n C1.add(item_set)\n return C1",
"def make_set(node):\n node.parent = node\n node.rank = 0",
"def __init__(self,s={}) -> None:\n\n self.set=list()",
"def strict(cls):\n return frozenset()"
] | [
"0.80757755",
"0.7471948",
"0.7425762",
"0.70587444",
"0.6936791",
"0.6936791",
"0.6876868",
"0.6673173",
"0.6640021",
"0.663485",
"0.6614221",
"0.6581556",
"0.65792",
"0.6541001",
"0.6405655",
"0.63981545",
"0.6355021",
"0.6317065",
"0.63162047",
"0.6313064",
"0.62325305",
"0.62295485",
"0.62295485",
"0.6204737",
"0.6196658",
"0.61963344",
"0.61853707",
"0.6142419",
"0.61385113",
"0.61082596"
] | 0.7948503 | 1 |
Tests that the process method invokes the correct GRR API calls. | def testProcess(self):
self.grr_hunt_file_collector.PreProcess()
self.grr_hunt_file_collector.Process()
# extract call kwargs
call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]
self.assertEqual(call_kwargs['flow_args'].paths,
['/etc/passwd', '/etc/shadow', '/etc/hosts'])
self.assertEqual(call_kwargs['flow_args'].action.action_type,
flows_pb2.FileFinderAction.DOWNLOAD)
self.assertEqual(call_kwargs['flow_name'], 'FileFinder')
self.assertEqual(call_kwargs['hunt_runner_args'].description,
'random reason') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testProcess(self):\n self.grr_hunt_osquery_collector.Process()\n # extract call kwargs\n call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]\n self.assertEqual(call_kwargs['flow_args'].query,\n 'SELECT * FROM processes')\n self.assertEqual(call_kwargs['flow_args'].timeout_millis,\n 300000)\n self.assertEqual(call_kwargs['flow_args'].ignore_stderr_errors, False)\n self.assertEqual(call_kwargs['flow_name'], 'OsqueryFlow')\n self.assertEqual(call_kwargs['hunt_runner_args'].description,\n 'random reason')",
"def process():\n pass",
"def testProcess(self):\n self.grr_hunt_artifact_collector.Process()\n # extract call kwargs\n call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]\n self.assertEqual(call_kwargs['flow_args'].artifact_list,\n ['RandomArtifact'])\n self.assertEqual(call_kwargs['flow_args'].use_raw_filesystem_access, True)\n self.assertEqual(call_kwargs['flow_name'], 'ArtifactCollectorFlow')\n self.assertEqual(call_kwargs['hunt_runner_args'].description,\n 'random reason')",
"def testProcess(self, mock_get_write_results):\n self.mock_grr_api.Hunt.return_value.Get.return_value = \\\n mock_grr_hosts.MOCK_HUNT\n self.grr_hunt_downloader.Process()\n mock_get_write_results.assert_called_with(mock_grr_hosts.MOCK_HUNT,\n '/tmp/test')",
"def process():",
"def test_can_process(self):\n self.assertTrue(self.adapter.can_process(''))",
"def test_get_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first process', rpc._get_process('proc_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_process('proc')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: process proc unknown in Supvisors',\n exc.exception.text)",
"def test_process_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test first RPC call with process namespec\n self.assertEqual([{'name': 'proc'}], rpc.get_process_info('appli:proc'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:proc')], mocked_get.call_args_list)\n # reset patches\n mocked_check.reset_mock()\n mocked_get.reset_mock()\n # test second RPC call with group namespec\n self.assertEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_process_info('appli:*'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*')], mocked_get.call_args_list)",
"def test_runs_given_function(self):\n from furious.processors import _handle_results\n\n processor = Mock()\n\n _handle_results({'_process_results': processor})\n\n processor.assert_called_once_with()",
"def test(self):\n \"\"\"WARNING: IT IS HIGHLY RECOMMENDED TO HAVE ONE TEST ONLY TO ISOLATE FUNCTIONAL TESTS FROM EACH OTHER. i.e. \n Start a new Python Interpreter and JVM for each test. In the end, it means only one test in this class. \"\"\"\n \n logger.info('**Starting test**')\n q = Queue()\n\n p = Process(target=self.client_process1, args=(q,))\n p.start()\n result = q.get()\n p.join()\n self.assertEqual(result, \"OK\") \n logger.debug(\"Restarting dataClay\")\n self.mock.mock.restartDataClay()\n p = Process(target=self.client_process2, args=(q,))\n p.start()\n result = q.get()\n p.join()\n self.assertEqual(result, \"OK\") \n\n logger.info(\"** Test OK!\")",
"def test_block_builtin_processes_from_api(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"baseCommand\": [\"python3\"],\n \"inputs\": {\n \"stringInput\": \"string\"\n },\n \"requirements\": {\n CWL_REQUIREMENT_APP_DOCKER: {\n \"dockerPull\": \"python:3.7-alpine\"\n },\n },\n \"outputs\": [],\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n \"type\": PROCESS_BUILTIN,\n },\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n with contextlib.ExitStack() as stack_exec:\n for mock_exec in mocked_execute_process():\n stack_exec.enter_context(mock_exec)\n resp = mocked_sub_requests(self.app, \"post_json\", \"/processes\", data=body, timeout=5,\n headers=self.json_headers, only_local=True, expect_errors=True)\n # With Weaver<=4.1.x, the 'type' was explicitly checked to block it since Deploy payload was kept as is\n # This field was allowed to trickle all they way down to the instantiation of Process object\n # assert resp.status_code == 200\n\n # With Weaver>4.1.x, the deserialized result from Deploy payload is employed, which drops unknown 'type'\n # Ensure that deploy now succeeds, but the obtained Process is not 'builtin' (just a regular application)\n assert resp.status_code == 201\n assert PROCESS_BUILTIN not in resp.json[\"processSummary\"][\"keywords\"]\n process = self.process_store.fetch_by_id(self._testMethodName)\n assert process.type == PROCESS_APPLICATION",
"def test_get_run(self):\n pass",
"def test_valid_process(client, monkeypatch):\n parsed_message = None\n wiki_result = None\n gmap_result = None\n\n valid_answer = (\n \"Ah oui, je vois tout à fait ce dont tu veux parler. L'adresse est \"\n )\n error_answer = \"Désolé mon grand, mais je ne comprends pas ta demande... \"\n\n wiki_response = {\n 'summary': 'Tour de fer puddlé de 324 mètres de hauteur',\n 'url': 'https://fr.wikipedia.org/wiki/Tour_Eiffel',\n 'title': 'Tour Eiffel'\n }\n\n gmap_response = [\n {\n \"formatted_address\": (\n \"Champ de Mars, 5 Avenue Anatole France, 75007 Paris, France\"\n ),\n \"geometry\": {\n \"location\": {\n \"lat\": 48.85837009999999,\n \"lng\": 2.2944813\n }\n }\n }\n ]\n\n def mock_wiki(string):\n return wiki_response\n\n def mock_gmap(string):\n return gmap_response\n\n # Monkeypatch WikiAPI\n monkeypatch.setattr(wiki, 'get_search_result', mock_wiki)\n # Monkeypatch GmapsAPI\n monkeypatch.setattr(gmap, 'get_informations', mock_gmap)\n\n parsed_message = ' '.join(parser.get_keywords(get_message))\n assert parsed_message == 'tour eiffel'\n\n wiki_result = wiki.get_search_result(parsed_message)\n assert not wiki_result == 'error'\n\n gmap_result = gmap.get_coordinates(parsed_message)\n assert not gmap_result == 'error'\n\n if wiki_result == 'error':\n first_message = error_answer\n second_message = None\n coord = None\n url = None\n title = None\n else:\n coord = gmap_result['coord']\n addr = gmap_result['addr']\n first_message = valid_answer + addr\n second_message = wiki_result['summary']\n url = wiki_result['url']\n title = wiki_result['title']\n\n res = {\n \"first_message\": first_message,\n \"second_message\": second_message,\n \"gmap_coord\": coord,\n \"url\": url,\n \"title\": title\n }\n\n assert not res[\"first_message\"] == error_answer\n assert res[\"first_message\"] == (\n valid_answer + gmap_response[0][\"formatted_address\"]\n )\n\n assert not res[\"second_message\"] is None\n assert res[\"second_message\"] == wiki_response[\"summary\"]\n\n assert not res[\"gmap_coord\"] is None\n assert res[\"gmap_coord\"] == gmap_response[0][\"geometry\"][\"location\"]\n\n assert not res[\"url\"] is None\n assert res[\"url\"] == wiki_response[\"url\"]\n\n assert not res[\"title\"] is None\n assert res[\"title\"] == wiki_response[\"title\"]",
"def test_rpcCall(self):\n pass",
"def test_check_process_output(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo test\n \"\"\")\n workflow.pre_check_processes()\n try:\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert False, \"Exception has not been not raised\"\n except ResourceError:\n assert True",
"def test_process_data(self):\n pass",
"def process(self):\n pass",
"def test_start_process(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # get patches\n mocked_start = self.supervisor.supvisors.starter.start_process\n mocked_progress = self.supervisor.supvisors.starter.in_progress\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # patch the instance\n rpc._get_application_process = Mock()\n # test RPC call with unknown strategy\n with self.assertRaises(RPCError) as exc:\n rpc.start_process('strategy', 'appli:proc')\n self.assertEqual(Faults.BAD_STRATEGY, exc.exception.code)\n self.assertEqual('BAD_STRATEGY: strategy', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running process\n rpc._get_application_process.return_value = (\n None, Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc1'}))\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running processes\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n Mock(**{'running.return_value': False}),\n Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc2'})]}), None)\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc2', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with stopped processes\n proc_1 = Mock(**{'running.return_value': False,\n 'stopped.return_value': True,\n 'namespec.return_value': 'proc1'})\n proc_2 = Mock(**{'running.return_value': False,\n 'stopped.return_value': False,\n 'namespec.return_value': 'proc2'})\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n proc_1, proc_2]}), None)\n # test RPC call with no wait and not done\n mocked_start.return_value = False\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call no wait and done\n mocked_start.return_value = True\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and done\n result = rpc.start_process(2, 'appli:*', wait=True)\n self.assertTrue(result)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and not done\n mocked_start.return_value = False\n deferred = rpc.start_process(2, 'appli:*', wait=True)\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n # test returned function: return True when job in progress\n mocked_progress.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: raise exception if job not in progress anymore\n # and process still stopped\n mocked_progress.return_value = False\n with self.assertRaises(RPCError) as exc:\n deferred()\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual('ABNORMAL_TERMINATION: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: return True if job not in progress anymore\n # and process running\n proc_1.stopped.return_value = False\n self.assertTrue(deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)",
"def test_addProcess(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.protocols, {})\r\n self.assertEqual(self.pm.processes,\r\n {\"foo\": ([\"arg1\", \"arg2\"], 1, 2, {})})\r\n self.pm.startService()\r\n self.reactor.advance(0)\r\n self.assertEqual(self.pm.protocols.keys(), [\"foo\"])",
"def test_all_process_info(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': Mock(**{'serial.return_value': {'name': 'proc_1'}}),\n 'proc_2': Mock(**{'serial.return_value': {'name': 'proc_2'}})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_all_process_info())\n self.assertEqual([call()], mocked_check.call_args_list)",
"def test_startProcess(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIsInstance(self.pm.protocols[\"foo\"], LoggingProtocol)\r\n self.assertIn(\"foo\", self.pm.timeStarted.keys())",
"def test_process(self):\n xknx = XKNX(loop=self.loop)\n sensor = Sensor(\n xknx,\n 'TestSensor',\n value_type='temperature',\n group_address_state='1/2/3')\n\n telegram = Telegram(GroupAddress('1/2/3'))\n telegram.payload = DPTArray((0x06, 0xa0))\n self.loop.run_until_complete(asyncio.Task(sensor.process(telegram)))\n self.assertEqual(sensor.sensor_value.payload, DPTArray((0x06, 0xa0)))\n self.assertEqual(sensor.resolve_state(), 16.96)",
"def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)",
"def test_BLINK_LAUNCH_PROCESS(self):\n self.verify_references_to_prerequisites(processes.BLINK_LAUNCH_PROCESS)",
"def process(self):",
"def process(self):",
"def process(self):",
"def test_process_task_with_data(self, mock_process, mock_fetch):\n mock_fetch.return_value = self.url_task\n\n TaskService.process_task_with_data(self.url_task.id, 'fake_data')\n mock_process.assert_called_once_with('fake_data')",
"def process(self):\n raise NotImplementedError",
"def test_runs_returned_async(self):\n from furious.async import Async\n from furious.processors import _handle_results\n\n processor = Mock()\n processor.return_value = Mock(spec=Async)\n\n _handle_results({'_process_results': processor})\n\n processor.return_value.start.assert_called_once_with()"
] | [
"0.7084944",
"0.6597489",
"0.6498858",
"0.6480054",
"0.6472296",
"0.63703793",
"0.63314945",
"0.6231056",
"0.621261",
"0.6134163",
"0.6015268",
"0.6010355",
"0.59589803",
"0.5906601",
"0.5904283",
"0.5899668",
"0.5875687",
"0.5854556",
"0.58414304",
"0.58292466",
"0.5740823",
"0.56908625",
"0.5686187",
"0.56753564",
"0.5643833",
"0.5643833",
"0.5643833",
"0.5629414",
"0.55969197",
"0.5576554"
] | 0.66623306 | 1 |
Tests that the process method invokes the correct GRR API calls. | def testProcess(self):
self.grr_hunt_osquery_collector.Process()
# extract call kwargs
call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]
self.assertEqual(call_kwargs['flow_args'].query,
'SELECT * FROM processes')
self.assertEqual(call_kwargs['flow_args'].timeout_millis,
300000)
self.assertEqual(call_kwargs['flow_args'].ignore_stderr_errors, False)
self.assertEqual(call_kwargs['flow_name'], 'OsqueryFlow')
self.assertEqual(call_kwargs['hunt_runner_args'].description,
'random reason') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testProcess(self):\n self.grr_hunt_file_collector.PreProcess()\n self.grr_hunt_file_collector.Process()\n # extract call kwargs\n call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]\n self.assertEqual(call_kwargs['flow_args'].paths,\n ['/etc/passwd', '/etc/shadow', '/etc/hosts'])\n self.assertEqual(call_kwargs['flow_args'].action.action_type,\n flows_pb2.FileFinderAction.DOWNLOAD)\n self.assertEqual(call_kwargs['flow_name'], 'FileFinder')\n self.assertEqual(call_kwargs['hunt_runner_args'].description,\n 'random reason')",
"def process():\n pass",
"def testProcess(self):\n self.grr_hunt_artifact_collector.Process()\n # extract call kwargs\n call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]\n self.assertEqual(call_kwargs['flow_args'].artifact_list,\n ['RandomArtifact'])\n self.assertEqual(call_kwargs['flow_args'].use_raw_filesystem_access, True)\n self.assertEqual(call_kwargs['flow_name'], 'ArtifactCollectorFlow')\n self.assertEqual(call_kwargs['hunt_runner_args'].description,\n 'random reason')",
"def testProcess(self, mock_get_write_results):\n self.mock_grr_api.Hunt.return_value.Get.return_value = \\\n mock_grr_hosts.MOCK_HUNT\n self.grr_hunt_downloader.Process()\n mock_get_write_results.assert_called_with(mock_grr_hosts.MOCK_HUNT,\n '/tmp/test')",
"def process():",
"def test_can_process(self):\n self.assertTrue(self.adapter.can_process(''))",
"def test_get_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first process', rpc._get_process('proc_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_process('proc')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: process proc unknown in Supvisors',\n exc.exception.text)",
"def test_process_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test first RPC call with process namespec\n self.assertEqual([{'name': 'proc'}], rpc.get_process_info('appli:proc'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:proc')], mocked_get.call_args_list)\n # reset patches\n mocked_check.reset_mock()\n mocked_get.reset_mock()\n # test second RPC call with group namespec\n self.assertEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_process_info('appli:*'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*')], mocked_get.call_args_list)",
"def test_runs_given_function(self):\n from furious.processors import _handle_results\n\n processor = Mock()\n\n _handle_results({'_process_results': processor})\n\n processor.assert_called_once_with()",
"def test(self):\n \"\"\"WARNING: IT IS HIGHLY RECOMMENDED TO HAVE ONE TEST ONLY TO ISOLATE FUNCTIONAL TESTS FROM EACH OTHER. i.e. \n Start a new Python Interpreter and JVM for each test. In the end, it means only one test in this class. \"\"\"\n \n logger.info('**Starting test**')\n q = Queue()\n\n p = Process(target=self.client_process1, args=(q,))\n p.start()\n result = q.get()\n p.join()\n self.assertEqual(result, \"OK\") \n logger.debug(\"Restarting dataClay\")\n self.mock.mock.restartDataClay()\n p = Process(target=self.client_process2, args=(q,))\n p.start()\n result = q.get()\n p.join()\n self.assertEqual(result, \"OK\") \n\n logger.info(\"** Test OK!\")",
"def test_block_builtin_processes_from_api(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"baseCommand\": [\"python3\"],\n \"inputs\": {\n \"stringInput\": \"string\"\n },\n \"requirements\": {\n CWL_REQUIREMENT_APP_DOCKER: {\n \"dockerPull\": \"python:3.7-alpine\"\n },\n },\n \"outputs\": [],\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n \"type\": PROCESS_BUILTIN,\n },\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n with contextlib.ExitStack() as stack_exec:\n for mock_exec in mocked_execute_process():\n stack_exec.enter_context(mock_exec)\n resp = mocked_sub_requests(self.app, \"post_json\", \"/processes\", data=body, timeout=5,\n headers=self.json_headers, only_local=True, expect_errors=True)\n # With Weaver<=4.1.x, the 'type' was explicitly checked to block it since Deploy payload was kept as is\n # This field was allowed to trickle all they way down to the instantiation of Process object\n # assert resp.status_code == 200\n\n # With Weaver>4.1.x, the deserialized result from Deploy payload is employed, which drops unknown 'type'\n # Ensure that deploy now succeeds, but the obtained Process is not 'builtin' (just a regular application)\n assert resp.status_code == 201\n assert PROCESS_BUILTIN not in resp.json[\"processSummary\"][\"keywords\"]\n process = self.process_store.fetch_by_id(self._testMethodName)\n assert process.type == PROCESS_APPLICATION",
"def test_get_run(self):\n pass",
"def test_valid_process(client, monkeypatch):\n parsed_message = None\n wiki_result = None\n gmap_result = None\n\n valid_answer = (\n \"Ah oui, je vois tout à fait ce dont tu veux parler. L'adresse est \"\n )\n error_answer = \"Désolé mon grand, mais je ne comprends pas ta demande... \"\n\n wiki_response = {\n 'summary': 'Tour de fer puddlé de 324 mètres de hauteur',\n 'url': 'https://fr.wikipedia.org/wiki/Tour_Eiffel',\n 'title': 'Tour Eiffel'\n }\n\n gmap_response = [\n {\n \"formatted_address\": (\n \"Champ de Mars, 5 Avenue Anatole France, 75007 Paris, France\"\n ),\n \"geometry\": {\n \"location\": {\n \"lat\": 48.85837009999999,\n \"lng\": 2.2944813\n }\n }\n }\n ]\n\n def mock_wiki(string):\n return wiki_response\n\n def mock_gmap(string):\n return gmap_response\n\n # Monkeypatch WikiAPI\n monkeypatch.setattr(wiki, 'get_search_result', mock_wiki)\n # Monkeypatch GmapsAPI\n monkeypatch.setattr(gmap, 'get_informations', mock_gmap)\n\n parsed_message = ' '.join(parser.get_keywords(get_message))\n assert parsed_message == 'tour eiffel'\n\n wiki_result = wiki.get_search_result(parsed_message)\n assert not wiki_result == 'error'\n\n gmap_result = gmap.get_coordinates(parsed_message)\n assert not gmap_result == 'error'\n\n if wiki_result == 'error':\n first_message = error_answer\n second_message = None\n coord = None\n url = None\n title = None\n else:\n coord = gmap_result['coord']\n addr = gmap_result['addr']\n first_message = valid_answer + addr\n second_message = wiki_result['summary']\n url = wiki_result['url']\n title = wiki_result['title']\n\n res = {\n \"first_message\": first_message,\n \"second_message\": second_message,\n \"gmap_coord\": coord,\n \"url\": url,\n \"title\": title\n }\n\n assert not res[\"first_message\"] == error_answer\n assert res[\"first_message\"] == (\n valid_answer + gmap_response[0][\"formatted_address\"]\n )\n\n assert not res[\"second_message\"] is None\n assert res[\"second_message\"] == wiki_response[\"summary\"]\n\n assert not res[\"gmap_coord\"] is None\n assert res[\"gmap_coord\"] == gmap_response[0][\"geometry\"][\"location\"]\n\n assert not res[\"url\"] is None\n assert res[\"url\"] == wiki_response[\"url\"]\n\n assert not res[\"title\"] is None\n assert res[\"title\"] == wiki_response[\"title\"]",
"def test_rpcCall(self):\n pass",
"def test_check_process_output(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo test\n \"\"\")\n workflow.pre_check_processes()\n try:\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert False, \"Exception has not been not raised\"\n except ResourceError:\n assert True",
"def test_process_data(self):\n pass",
"def process(self):\n pass",
"def test_start_process(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # get patches\n mocked_start = self.supervisor.supvisors.starter.start_process\n mocked_progress = self.supervisor.supvisors.starter.in_progress\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # patch the instance\n rpc._get_application_process = Mock()\n # test RPC call with unknown strategy\n with self.assertRaises(RPCError) as exc:\n rpc.start_process('strategy', 'appli:proc')\n self.assertEqual(Faults.BAD_STRATEGY, exc.exception.code)\n self.assertEqual('BAD_STRATEGY: strategy', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running process\n rpc._get_application_process.return_value = (\n None, Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc1'}))\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running processes\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n Mock(**{'running.return_value': False}),\n Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc2'})]}), None)\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc2', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with stopped processes\n proc_1 = Mock(**{'running.return_value': False,\n 'stopped.return_value': True,\n 'namespec.return_value': 'proc1'})\n proc_2 = Mock(**{'running.return_value': False,\n 'stopped.return_value': False,\n 'namespec.return_value': 'proc2'})\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n proc_1, proc_2]}), None)\n # test RPC call with no wait and not done\n mocked_start.return_value = False\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call no wait and done\n mocked_start.return_value = True\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and done\n result = rpc.start_process(2, 'appli:*', wait=True)\n self.assertTrue(result)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and not done\n mocked_start.return_value = False\n deferred = rpc.start_process(2, 'appli:*', wait=True)\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n # test returned function: return True when job in progress\n mocked_progress.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: raise exception if job not in progress anymore\n # and process still stopped\n mocked_progress.return_value = False\n with self.assertRaises(RPCError) as exc:\n deferred()\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual('ABNORMAL_TERMINATION: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: return True if job not in progress anymore\n # and process running\n proc_1.stopped.return_value = False\n self.assertTrue(deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)",
"def test_addProcess(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.protocols, {})\r\n self.assertEqual(self.pm.processes,\r\n {\"foo\": ([\"arg1\", \"arg2\"], 1, 2, {})})\r\n self.pm.startService()\r\n self.reactor.advance(0)\r\n self.assertEqual(self.pm.protocols.keys(), [\"foo\"])",
"def test_all_process_info(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': Mock(**{'serial.return_value': {'name': 'proc_1'}}),\n 'proc_2': Mock(**{'serial.return_value': {'name': 'proc_2'}})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_all_process_info())\n self.assertEqual([call()], mocked_check.call_args_list)",
"def test_startProcess(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIsInstance(self.pm.protocols[\"foo\"], LoggingProtocol)\r\n self.assertIn(\"foo\", self.pm.timeStarted.keys())",
"def test_process(self):\n xknx = XKNX(loop=self.loop)\n sensor = Sensor(\n xknx,\n 'TestSensor',\n value_type='temperature',\n group_address_state='1/2/3')\n\n telegram = Telegram(GroupAddress('1/2/3'))\n telegram.payload = DPTArray((0x06, 0xa0))\n self.loop.run_until_complete(asyncio.Task(sensor.process(telegram)))\n self.assertEqual(sensor.sensor_value.payload, DPTArray((0x06, 0xa0)))\n self.assertEqual(sensor.resolve_state(), 16.96)",
"def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)",
"def test_BLINK_LAUNCH_PROCESS(self):\n self.verify_references_to_prerequisites(processes.BLINK_LAUNCH_PROCESS)",
"def process(self):",
"def process(self):",
"def process(self):",
"def test_process_task_with_data(self, mock_process, mock_fetch):\n mock_fetch.return_value = self.url_task\n\n TaskService.process_task_with_data(self.url_task.id, 'fake_data')\n mock_process.assert_called_once_with('fake_data')",
"def process(self):\n raise NotImplementedError",
"def test_runs_returned_async(self):\n from furious.async import Async\n from furious.processors import _handle_results\n\n processor = Mock()\n processor.return_value = Mock(spec=Async)\n\n _handle_results({'_process_results': processor})\n\n processor.return_value.start.assert_called_once_with()"
] | [
"0.66623306",
"0.6597489",
"0.6498858",
"0.6480054",
"0.6472296",
"0.63703793",
"0.63314945",
"0.6231056",
"0.621261",
"0.6134163",
"0.6015268",
"0.6010355",
"0.59589803",
"0.5906601",
"0.5904283",
"0.5899668",
"0.5875687",
"0.5854556",
"0.58414304",
"0.58292466",
"0.5740823",
"0.56908625",
"0.5686187",
"0.56753564",
"0.5643833",
"0.5643833",
"0.5643833",
"0.5629414",
"0.55969197",
"0.5576554"
] | 0.7084944 | 0 |
Tests that hunt results are correctly extracted. | def testExtractHuntResults(self, _, mock_remove):
self.grr_hunt_downloader.output_path = '/directory'
expected = sorted([
('greendale-student04.c.greendale.internal',
'/directory/hunt_H_A43ABF9D/C.4c4223a2ea9cf6f1'),
('greendale-admin.c.greendale.internal',
'/directory/hunt_H_A43ABF9D/C.ba6b63df5d330589'),
('greendale-student05.c.greendale.internal',
'/directory/hunt_H_A43ABF9D/C.fc693a148af801d5')
])
test_zip = 'tests/lib/collectors/test_data/hunt.zip'
# pylint: disable=protected-access
result = sorted(self.grr_hunt_downloader._ExtractHuntResults(test_zip))
self.assertEqual(result, expected)
mock_remove.assert_called_with('tests/lib/collectors/test_data/hunt.zip') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testCollectHuntResults(self,\n mock_get_write_archive,\n mock_ExtractHuntResults):\n self.mock_grr_api.Hunt.return_value.Get.return_value = \\\n mock_grr_hosts.MOCK_HUNT\n self.grr_hunt_downloader.Process()\n mock_get_write_archive.assert_called_with(mock_grr_hosts.MOCK_HUNT,\n '/tmp/test/H:12345.zip')\n mock_ExtractHuntResults.assert_called_with('/tmp/test/H:12345.zip')",
"def test_get_results(self):\n pass",
"def testGenericHuntWithoutOutputPlugins(self):\n hunt = hunts.GRRHunt.StartHunt(\n \"GenericHunt\",\n flow_name=\"GetFile\",\n args=rdfvalue.Dict(\n pathspec=rdfvalue.PathSpec(\n path=\"/tmp/evil.txt\",\n pathtype=rdfvalue.PathSpec.PathType.OS,\n )\n ),\n output_plugins=[],\n token=self.token)\n\n regex_rule = rdfvalue.ForemanAttributeRegex(\n attribute_name=\"GRR client\",\n attribute_regex=\"GRR\")\n hunt.AddRule([regex_rule])\n hunt.Run()\n\n # Pretend to be the foreman now and dish out hunting jobs to all the\n # client..\n foreman = aff4.FACTORY.Open(\"aff4:/foreman\", mode=\"rw\", token=self.token)\n for client_id in self.client_ids:\n foreman.AssignTasksToClient(client_id)\n\n # Run the hunt.\n client_mock = test_lib.SampleHuntMock()\n test_lib.TestHuntHelper(client_mock, self.client_ids,\n check_flow_errors=False, token=self.token)\n\n # Stop the hunt now.\n hunt.Stop()\n hunt.Save()\n\n hunt_obj = aff4.FACTORY.Open(hunt.session_id, age=aff4.ALL_TIMES,\n token=self.token)\n\n started = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.CLIENTS)\n finished = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.FINISHED)\n errors = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.ERRORS)\n\n self.assertEqual(len(set(started)), 10)\n self.assertEqual(len(set(finished)), 10)\n self.assertEqual(len(set(errors)), 5)\n\n # We shouldn't receive any entries as no output plugin was specified.\n self.assertRaises(IOError, aff4.FACTORY.Open,\n hunt.session_id.Add(\"Results\"),\n \"RDFValueCollection\", \"r\", False, self.token)",
"def getTestResults():",
"def test_get_results_verbose(self):\n\t\tpass",
"def test_tarballs_not_extracted(self) -> None:\n import hammer_config\n\n tech_dir, tech_dir_base = HammerToolTestHelpers.create_tech_dir(\"dummy28\")\n tech_json_filename = os.path.join(tech_dir, \"dummy28.tech.json\")\n\n # Add defaults to specify tarball_dir.\n with open(os.path.join(tech_dir, \"defaults.json\"), \"w\") as f:\n f.write(json.dumps({\n \"technology.dummy28.tarball_dir\": tech_dir\n }, cls=HammerJSONEncoder))\n\n HammerToolTestHelpers.write_tech_json(tech_json_filename, self.add_tarballs)\n sys.path.append(tech_dir_base)\n tech = self.get_tech(hammer_tech.HammerTechnology.load_from_dir(\"dummy28\", tech_dir))\n tech.cache_dir = tech_dir\n\n database = hammer_config.HammerDatabase()\n database.update_technology(tech.get_config())\n HammerVLSISettings.load_builtins_and_core(database)\n tech.set_database(database)\n outputs = tech.process_library_filter(pre_filts=[], filt=hammer_tech.filters.gds_filter,\n must_exist=False,\n output_func=lambda str, _: [str])\n\n self.assertEqual(outputs, [\"{0}/extracted/foobar.tar.gz/test.gds\".format(tech_dir)])\n\n # Cleanup\n shutil.rmtree(tech_dir_base)",
"def _collect_test_result(duthost, ptfhost, request):\n logger.info(\"Collecting test result and related information.\")\n # TODO : collect DUT test report\n _collect_sonic_os_and_platform_info(duthost, request)\n _collect_sai_test_report_xml(ptfhost, request)",
"def test_extract_recipe_from_website(self):\n pass",
"def testGenericHunt(self):\n hunt = hunts.GRRHunt.StartHunt(\n \"GenericHunt\",\n flow_name=\"GetFile\",\n args=rdfvalue.Dict(\n pathspec=rdfvalue.PathSpec(\n path=\"/tmp/evil.txt\", pathtype=rdfvalue.PathSpec.PathType.OS)),\n token=self.token)\n\n regex_rule = rdfvalue.ForemanAttributeRegex(\n attribute_name=\"GRR client\",\n attribute_regex=\"GRR\")\n hunt.AddRule([regex_rule])\n hunt.Run()\n\n # Pretend to be the foreman now and dish out hunting jobs to all the\n # clients..\n foreman = aff4.FACTORY.Open(\"aff4:/foreman\", mode=\"rw\", token=self.token)\n for client_id in self.client_ids:\n foreman.AssignTasksToClient(client_id)\n\n # Run the hunt.\n client_mock = test_lib.SampleHuntMock()\n test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token)\n\n # Stop the hunt now.\n hunt.Stop()\n hunt.Save()\n\n hunt_obj = aff4.FACTORY.Open(hunt.session_id, age=aff4.ALL_TIMES,\n token=self.token)\n\n started = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.CLIENTS)\n finished = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.FINISHED)\n errors = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.ERRORS)\n\n self.assertEqual(len(set(started)), 10)\n self.assertEqual(len(set(finished)), 10)\n self.assertEqual(len(set(errors)), 5)\n\n collection = aff4.FACTORY.Open(hunt.state.output_objects[0].collection.urn,\n mode=\"r\", token=self.token)\n\n # We should receive stat entries.\n i = 0\n for i, x in enumerate(collection):\n self.assertEqual(x.payload.__class__, rdfvalue.StatEntry)\n self.assertEqual(x.payload.aff4path.Split(2)[-1], \"fs/os/tmp/evil.txt\")\n\n self.assertEqual(i, 4)",
"def test_tarballs_pre_extracted(self) -> None:\n import hammer_config\n\n tech_dir, tech_dir_base = HammerToolTestHelpers.create_tech_dir(\"dummy28\")\n tech_json_filename = os.path.join(tech_dir, \"dummy28.tech.json\")\n\n # Add defaults to specify tarball_dir.\n with open(os.path.join(tech_dir, \"defaults.json\"), \"w\") as f:\n f.write(json.dumps({\n \"technology.dummy28.tarball_dir\": tech_dir,\n \"vlsi.technology.extracted_tarballs_dir\": tech_dir_base\n }, cls=HammerJSONEncoder))\n\n HammerToolTestHelpers.write_tech_json(tech_json_filename, self.add_tarballs)\n sys.path.append(tech_dir_base)\n tech = self.get_tech(hammer_tech.HammerTechnology.load_from_dir(\"dummy28\", tech_dir))\n tech.cache_dir = tech_dir\n\n database = hammer_config.HammerDatabase()\n database.update_technology(tech.get_config())\n HammerVLSISettings.load_builtins_and_core(database)\n tech.set_database(database)\n outputs = tech.process_library_filter(pre_filts=[], filt=hammer_tech.filters.gds_filter,\n must_exist=False,\n output_func=lambda str, _: [str])\n\n self.assertEqual(outputs, [\"{0}/foobar.tar.gz/test.gds\".format(tech_dir_base)])\n\n # Cleanup\n shutil.rmtree(tech_dir_base)",
"def test_get_analyzed_recipe_instructions(self):\n pass",
"def test_get_site_scans(self):\n pass",
"def testOSErrorExtractHuntResults(self, mock_extract, mock_remove):\n self.grr_hunt_downloader.output_path = '/directory'\n test_zip = 'tests/lib/collectors/test_data/hunt.zip'\n mock_extract.side_effect = OSError\n # pylint: disable=protected-access\n\n with self.assertRaises(errors.DFTimewolfError) as error:\n self.grr_hunt_downloader._ExtractHuntResults(test_zip)\n self.assertEqual(1, len(self.test_state.errors))\n self.assertEqual(\n error.exception.message,\n 'Error manipulating file tests/lib/collectors/test_data/hunt.zip: ')\n self.assertTrue(error.exception.critical)\n mock_remove.assert_not_called()",
"def test_workloads_list_command_human_readable(\n workloads_list_success, workloads_list_success_hr\n):\n hr_output = prepare_workloads_list_output(workloads_list_success)\n assert hr_output == workloads_list_success_hr",
"def test_analyze_recipe_instructions(self):\n pass",
"def test_get_run(self):\n pass",
"def test_get_goals(self):\n pass",
"def test_analyze_a_recipe_search_query(self):\n pass",
"def test_get_stats(self):\n pass",
"def test_print_result(capsys):\n assert \"\"\"Total 5 hands solved\nTotal 4 hands solved with hint\nTotal 4 hands failed to solve\"\"\" in hl.test_help_print_result(capsys)",
"def assertResults(self, expected, result, deduped=False):\n self.assertEqual([u'shards'], result.keys())\n self.assertEqual(1, len(result[u'shards']))\n self.assertTrue(result[u'shards'][0], result)\n result = result[u'shards'][0].copy()\n self.assertFalse(result.get(u'abandoned_ts'))\n bot_version = result.pop(u'bot_version')\n self.assertTrue(bot_version)\n if result.get(u'costs_usd') is not None:\n expected.pop(u'costs_usd', None)\n self.assertLess(0, result.pop(u'costs_usd'))\n if result.get(u'cost_saved_usd') is not None:\n expected.pop(u'cost_saved_usd', None)\n self.assertLess(0, result.pop(u'cost_saved_usd'))\n self.assertTrue(result.pop(u'created_ts'))\n self.assertTrue(result.pop(u'completed_ts'))\n self.assertLess(0, result.pop(u'duration'))\n task_id = result.pop(u'task_id')\n run_id = result.pop(u'run_id')\n self.assertTrue(task_id)\n self.assertTrue(task_id.endswith('0'), task_id)\n if not deduped:\n self.assertEqual(task_id[:-1] + '1', run_id)\n self.assertTrue(result.pop(u'bot_idle_since_ts'))\n self.assertTrue(result.pop(u'modified_ts'))\n self.assertTrue(result.pop(u'started_ts'))\n\n if getattr(expected.get(u'output'), 'match', None):\n expected_output = expected.pop(u'output')\n output = result.pop('output')\n self.assertTrue(\n expected_output.match(output),\n '%s does not match %s' % (output, expected_output.pattern))\n\n # Bot python version may be different.\n result[u'bot_dimensions'] = sorted(\n [d for d in result[u'bot_dimensions'] if not d['key'] == 'python'])\n\n self.assertEqual(expected, result)\n return bot_version",
"def test_parse_hit_details(self):\n for query in self.result:\n first_hsp = self.result[query][0][0]\n self.assertEqual(first_hsp[\"SUBJECT_ID\"], \"gi|148670104|gb|EDL02051.1|\")\n self.assertEqual(\n first_hsp[\"HIT_DEF\"],\n \"insulin-like growth factor 2 receptor, isoform CRA_c [Mus musculus]\",\n )\n self.assertEqual(first_hsp[\"HIT_ACCESSION\"], \"2001\")\n self.assertEqual(first_hsp[\"HIT_LENGTH\"], 707)",
"def test_scrape_results(self):\n self.assertIsInstance(self.scrapes, EntityList)\n self.assertEqual(len(self.scrapes), 3)\n self.assertEqual([s.title for s in self.scrapes[1:]], ['Second article', 'Third article'])",
"def test_scrape_results(self):\n self.assertIsInstance(self.scrapes, EntityList)\n self.assertEqual(len(self.scrapes), 3)\n self.assertEqual([s.title for s in self.scrapes[1:]], ['Second article', 'Third article'])",
"def test_result():\n obs1 = response_stats('https://stackoverflow.com/questions/49010465/override-class-for-plugin-in-elasticsearch')\n exp1 = {\"Author's reputation score\": 283,\n 'Average reputation score': 1220,\n 'Number of responses': 0,\n 'Top reputation score': 2157}\n assert obs1 == exp1\n\n obs2 = response_stats('https://stackoverflow.com/questions/100003/what-are-metaclasses-in-python?rq=1')\n exp2 = {\"Author's reputation score\": 318000,\n 'Average reputation score': 37460,\n 'Number of responses': 16,\n 'Top reputation score': 318000}\n assert obs2 == exp2\n\n obs3 = response_stats('https://stackoverflow.com/questions/49206233/how-can-i-convert-this-date-03-01-2018-1200-am-to-2018-03-01-in-c')\n exp3 = {\"Author's reputation score\": 1,\n 'Average reputation score': 67133,\n 'Number of responses': 5,\n 'Top reputation score': 531000}\n assert obs3 == exp3\n\n obs4 = response_stats('https://stackoverflow.com/questions/49218523/highcharts-drilldown-json-from-php-mysql')\n exp4 = {\"Author's reputation score\": 76,\n 'Average reputation score': 76,\n 'Number of responses': 1,\n 'Top reputation score': 76}\n assert obs4 == exp4",
"def test_tarballs_pre_extracted_tech_specific(self) -> None:\n import hammer_config\n\n tech_dir, tech_dir_base = HammerToolTestHelpers.create_tech_dir(\"dummy28\")\n tech_json_filename = os.path.join(tech_dir, \"dummy28.tech.json\")\n\n # Add defaults to specify tarball_dir.\n with open(os.path.join(tech_dir, \"defaults.json\"), \"w\") as f:\n f.write(json.dumps({\n \"technology.dummy28.tarball_dir\": tech_dir,\n \"vlsi.technology.extracted_tarballs_dir\": \"/should/not/be/used\",\n \"technology.dummy28.extracted_tarballs_dir\": tech_dir_base\n }, cls=HammerJSONEncoder))\n\n HammerToolTestHelpers.write_tech_json(tech_json_filename, self.add_tarballs)\n sys.path.append(tech_dir_base)\n tech = self.get_tech(hammer_tech.HammerTechnology.load_from_dir(\"dummy28\", tech_dir))\n tech.cache_dir = tech_dir\n\n database = hammer_config.HammerDatabase()\n database.update_technology(tech.get_config())\n HammerVLSISettings.load_builtins_and_core(database)\n tech.set_database(database)\n outputs = tech.process_library_filter(pre_filts=[], filt=hammer_tech.filters.gds_filter,\n must_exist=False,\n output_func=lambda str, _: [str])\n\n self.assertEqual(outputs, [\"{0}/foobar.tar.gz/test.gds\".format(tech_dir_base)])\n\n # Cleanup\n shutil.rmtree(tech_dir_base)",
"def test_untar(self):",
"def testInitialization(self):\n self.assertEqual(self.grr_hunt_downloader.hunt_id, 'H:12345')",
"def extract_results_test(self):\n assert len(self.results.keys()) != 0\n TESTS = [\n {\n \"input\": {\"molecules\": [\"DDSPDLPK\"], \"score_threshold\": 0.95},\n \"output\": {\n \"formula\": \"C(37)H(59)N(9)O(16)\",\n \"file_name\": \"BSA1.mzML\",\n \"scaling_factor\": 100,\n \"spec_id\": 1337,\n },\n }\n ]\n for test_dict in TESTS:\n for key, n, entry in self.results.extract_results(**test_dict[\"input\"]):\n print(key, entry)\n assert key.formula == test_dict[\"output\"][\"formula\"]\n assert key.file_name == test_dict[\"output\"][\"file_name\"]\n assert entry.scaling_factor == test_dict[\"output\"][\"scaling_factor\"]\n assert entry.spec_id == test_dict[\"output\"][\"spec_id\"]\n # print(self.results)\n # print(self.results.lookup)\n assert n == 0",
"def test_fixture_available_results(tmp_sample_project):\n config_dir = tmp_sample_project\n output = subprocess.run([\"smif\", \"available_results\", \"energy_central\", \"-d\", config_dir],\n stdout=subprocess.PIPE)\n\n out_str = str(output.stdout)\n assert(out_str.count('model run: energy_central') == 1)\n assert(out_str.count('sos model: energy') == 1)\n assert(out_str.count('sector model:') == 1)\n assert(out_str.count('output:') == 2)\n assert(out_str.count('output: cost') == 1)\n assert(out_str.count('output: water_demand') == 1)\n assert(out_str.count('no results') == 2)\n assert(out_str.count('decision') == 0)\n\n # Run energy_central and re-check output with optional flag for completed results\n subprocess.run([\"smif\", \"run\", \"energy_central\", \"-d\", config_dir], stdout=subprocess.PIPE)\n output = subprocess.run([\"smif\", \"available_results\", \"energy_central\", \"-d\", config_dir],\n stdout=subprocess.PIPE)\n\n out_str = str(output.stdout)\n assert(out_str.count('model run: energy_central') == 1)\n assert(out_str.count('sos model: energy') == 1)\n assert(out_str.count('sector model:') == 1)\n assert(out_str.count('output:') == 2)\n assert(out_str.count('output: cost') == 1)\n assert(out_str.count('output: water_demand') == 1)\n assert(out_str.count('no results') == 0)\n assert(out_str.count('decision') == 8)\n assert(out_str.count('decision 1') == 2)\n assert(out_str.count('decision 2') == 2)\n assert(out_str.count('decision 3') == 2)\n assert(out_str.count('decision 4') == 2)\n assert(out_str.count(': 2010') == 4)\n assert(out_str.count(': 2015') == 2)\n assert(out_str.count(': 2020') == 2)"
] | [
"0.6822283",
"0.66746986",
"0.6442038",
"0.6374539",
"0.62894946",
"0.62524617",
"0.6248636",
"0.6225992",
"0.62244165",
"0.61812615",
"0.6111694",
"0.60976326",
"0.6070672",
"0.6062699",
"0.6052158",
"0.603377",
"0.5992978",
"0.59575725",
"0.5954192",
"0.5941559",
"0.5938896",
"0.5921197",
"0.59147435",
"0.59147435",
"0.5914274",
"0.5891484",
"0.589024",
"0.5856305",
"0.5846842",
"0.58460677"
] | 0.741359 | 0 |
Test the negative sampling self adversarial loss function. | def test_negative_sampling_self_adversarial_loss(self):
loss_fct = NSSALoss(margin=1.0, adversarial_temperature=1.0)
self.assertIs(loss_fct._reduction_method, torch.mean)
pos_scores = torch.tensor([0.0, 0.0, -0.5, -0.5])
neg_scores = torch.tensor([0.0, 0.0, -1.0, -1.0])
# ≈ result of softmax
weights = torch.tensor([0.37, 0.37, 0.13, 0.13])
# neg_distances - margin = [-1., -1., 0., 0.]
# sigmoids ≈ [0.27, 0.27, 0.5, 0.5]
log_sigmoids = torch.tensor([-1.31, -1.31, -0.69, -0.69])
intermediate = weights * log_sigmoids
# sum over the softmax dim as weights sum up to 1
neg_loss = torch.sum(intermediate, dim=-1)
# pos_distances = [0., 0., 0.5, 0.5]
# margin - pos_distances = [1. 1., 0.5, 0.5]
# ≈ result of sigmoid
# sigmoids ≈ [0.73, 0.73, 0.62, 0.62]
log_sigmoids = torch.tensor([-0.31, -0.31, -0.48, -0.48])
pos_loss = torch.mean(log_sigmoids)
# expected_loss ≈ 0.34
expected_loss = (-pos_loss - neg_loss) / 2.0
loss = loss_fct(pos_scores, neg_scores, weights).item()
self.assertAlmostEqual(expected_loss, 0.77, delta=0.02)
self.assertAlmostEqual(expected_loss, loss, delta=0.02) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_non_zero_loss(self):\n # Reset models.\n self.model.load_state_dict(self.initial_model_dict)\n self.actor_model.load_state_dict(self.initial_actor_model_dict)\n\n polybeast.learn(*self.learn_args)\n\n self.assertNotEqual(self.stats[\"total_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"pg_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"baseline_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"entropy_loss\"], 0.0)",
"def test_false_negative_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(\n 0.0, 1.0 - self._penalty_predictions) * (self._penalty_labels > 0.0)\n * self._penalty_weights * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels > 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - np.sign(self._constraint_predictions))) *\n (self._constraint_labels > 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels > 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.false_negative_rate(self.context)\n self.check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)",
"def test_true_negative_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 - self._penalty_predictions) *\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - np.sign(self._constraint_predictions))) *\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.true_negative_rate(self.context)\n self.check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)",
"def test_false_positive_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 + self._penalty_predictions) *\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 + np.sign(self._constraint_predictions))) *\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.false_positive_rate(self.context)\n self.check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)",
"def loss(A, Y):\n return A - Y",
"def test_negative_prediction_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 - self._penalty_predictions) * self._penalty_weights\n * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - np.sign(self._constraint_predictions))) *\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.negative_prediction_rate(self.context)\n self.check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)",
"def test_policy_loss(self):\n PolicyEstimator = self.notebook_locals[\"PolicyEstimator\"]\n student_policy_loss = self.notebook_locals[\"util_compute_policy_loss\"]\n obs = torch.ones(3, 2) * 2.0\n actions = torch.ones(3, 1) * 3.0\n returns = torch.arange(3, 6).float()\n policy_f = PolicyEstimator(\n num_hidden=1, hidden_dim=2, obs_dim=2, action_dim=1\n )\n # initialize to constant values\n for p in policy_f.parameters():\n torch.nn.init.ones_(p)\n loss_student = student_policy_loss(policy_f, obs, actions, returns)\n self.assertTrue(\n torch.abs(loss_student - 7.6758) < 0.1,\n \"The policy loss computation is incorrect\",\n )",
"def compute_loss(self):",
"def negative_gradient(self, y, y_pred, **kargs):",
"def negSamplingCostAndGradient(predicted, target, outputVectors, dataset,\n K=10):\n\n # Sampling of indices is done for you. Do not modify this if you\n # wish to match the autograder and receive points!\n indices = [target]\n indices.extend(getNegativeSamples(target, dataset, K))\n\n ### YOUR CODE HERE\n grad = np.zeros_like(outputVectors)\n gradPred = np.zeros_like(predicted)\n cost = 0.0\n probability = 0.0\n for sample_idx in indices:\n similarity = outputVectors[sample_idx].dot(predicted.T)\n probability = sigmoid(similarity) # squash to 0 ~ 1\n if sample_idx == target: # positive sample\n #p = sigmoid(outputVectors[sample_idx].dot(predicted.T))\n cost += -np.log(sigmoid(similarity))\n else: # negative sample\n #p = sigmoid(-outputVectors[sample_idx].dot(predicted.T))\n cost += -np.log(sigmoid(-similarity)) # deduction from reference 2.\n \n if sample_idx == target:\n grad[sample_idx, :] += (probability - 1) * predicted\n gradPred += (probability - 1) * outputVectors[sample_idx]\n else:\n grad[sample_idx, :] += probability * predicted\n gradPred += probability * outputVectors[sample_idx]\n '''\n V, D = outputVectors.shape\n one_hot_target = np.zeros(V)\n one_hot_target[target] = 1\n cost = 0\n gradPred = np.zeros_like(predicted)\n grad = np.zeros_like(outputVectors)\n \n for idx in indices:\n context_vector = outputVectors[idx] # embedding vector (1, D)\n cosine_similarity = normalizeRows(predicted).dot(normalizeRows(context_vector).T)\n print('neg sample, consine_similarity={0}'.format(cosine_similarity))\n binary_class = sigmoid(cosine_similarity)\n print('neg sample, binary_class={0}'.format(binary_class))\n \n if idx == target:\n cost += binary_class - 1\n else:\n cost += binary_class\n \n dlogits = sigmoid_grad(cosine_similarity)\n #gradPred += dlogits * normalizeRows(context_vector)\n #grad += np.outer(one_hot_target, dlogits * normalizeRows(predicted))\n gradPred += dlogits\n grad += np.outer(one_hot_target, dlogits)\n '''\n ### END YOUR CODE\n\n return cost, gradPred, grad",
"def __call__(self, target_labels: List[Tensor], fg_probs: Tensor):\n return super(NegativeSampler, self).__call__(target_labels)",
"def loss_fn(gr_truth, pred):\n return 100 * dice_loss(pred, gr_truth) + softmax_weighted_loss(pred, gr_truth)",
"def test_dice_crossentropy_loss():\n assert dice_dice_crossentropy_loss() == expected_dice_crossentropy_loss",
"def compute_test_loss(self, x):\n temp_training = self.training\n self.training = False\n mean, logvar = self.encode(x)\n z = self.reparameterize(mean, logvar)\n x_logit = self.decode(z)\n\n cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=x)\n logpx_z = -tf.math.reduce_sum(cross_ent, axis=[1, 2, 3]) # JAH removed 3D axis=[1, 2, 3,4]\n logpz = self.log_normal_pdf(z, 0.0, 0.0)\n logqz_x = self.log_normal_pdf(z, mean, logvar)\n\n\n kl_divergence = logqz_x - logpz\n neg_log_likelihood = -logpx_z\n\n #elbo = tf.math.reduce_mean(-self.kl_weight * kl_divergence - neg_log_likelihood) # shape=()\n neg_elbo = tf.math.reduce_mean(self.kl_weight * kl_divergence + neg_log_likelihood) # shape=()\n self.training = temp_training\n return neg_elbo",
"def derivative_loss(self, y, y_pred):\n return y_pred - y",
"def test_negative_values(self):\n rain = self.rain_prob_cube\n high_prob = self.high_prob_cube\n msg = \"Negative values of sleet probability have been calculated.\"\n with self.assertRaisesRegex(ValueError, msg):\n calculate_sleet_probability(rain, high_prob)",
"def ls_generator_loss(scores_fake):\r\n loss = torch.mean((scores_fake - 1) ** 2) / 2\r\n return loss",
"def loss(self, dataset=None, loss=None, training=None):\n # Recover the defaults, if missing\n dataset, loss = self._resolve_defaults(trainset=dataset, loss=loss)\n # Sample the train batch\n inputs, targets = dataset.sample(self._config)\n # Guess whether computation is for training, if necessary\n if training is None:\n training = torch.is_grad_enabled()\n # Forward pass\n return loss(self.run(inputs), targets, self._params)",
"def init_negative_sampler(self, unigram_power=3/4, strategy=\"word2vec\"):\n if strategy == \"word2vec\":\n counts = self.target['dst'].value_counts(normalize=True)\n freq = counts.values ** unigram_power\n self.freq = freq / sum(freq)\n self.dst_idxs = counts.index\n self.dst_neg_sampling = lambda size: np.random.choice(self.dst_idxs, size, replace=True, p=self.freq)\n elif strategy == \"uniform\":\n self.dst_neg_sampling = lambda size: np.random.choice(self.unique_dst, size, replace=True)",
"def compute_loss(self, obs, returns):",
"def loss_fn(self, pred: Tensor, true: Tensor) -> Tensor:\n pass",
"def ss_loss_(self, batch):\n raise NotImplementedError",
"def negSamplingCostAndGradient(predicted_vc, target, outputVectors_uk, dataset,\n K=10):\n\n # Sampling of indices is done for you. Do not modify this if you\n # wish to match the autograder and receive points!\n indices = [target]\n indices.extend(getNegativeSamples(target, dataset, K))\n\n cost = 0.0\n sigmd_uoT_vc = sigmoid(np.dot(predicted_vc.reshape(-1), outputVectors_uk[target].T))\n cost += -np.log(sigmd_uoT_vc)\n\n gradPred_dJ_vc = np.zeros_like(predicted_vc)\n gradPred_dJ_vc += (sigmd_uoT_vc - 1) * outputVectors_uk[target]\n\n grad_dJ_uw = np.zeros_like(outputVectors_uk)\n grad_dJ_uw[target:target + 1] = (sigmd_uoT_vc - 1) * predicted_vc\n\n neg_samples = []\n for i in range(K):\n j = dataset.sampleTokenIdx()\n if j == target or (j in neg_samples):\n i -= 1 # if negative sample is same with target or already sampled, then resample.\n continue\n neg_samples.append(j)\n\n sigmd_ukT_vc = sigmoid(-np.dot(predicted_vc.reshape(-1), outputVectors_uk[j].T))\n cost += -np.log(sigmd_ukT_vc) # cost for negative sample\n\n grad_dJ_uw[j:j + 1] = (1 - sigmd_ukT_vc) * predicted_vc # gradient for negative sample\n gradPred_dJ_vc += (1 - sigmd_ukT_vc) * outputVectors_uk[j]\n\n return cost, gradPred_dJ_vc, grad_dJ_uw",
"def negSamplingCostAndGradient(predicted, target, outputVectors, dataset,\n K=10):\n\n # Sampling of indices is done for you. Do not modify this if you\n # wish to match the autograder and receive points!\n indices = [target]\n indices.extend(getNegativeSamples(target, dataset, K))\n\n ### YOUR CODE HERE\n # Similar to softmax, we find the target matrix\n # v_c structured as 1xD matrix\n # u_o assume to be 1xD matrix\n # u_k assume to be K x D matrix\n # we pull the data assuming that each row represent one vector\n v_c = predicted\n u_o = outputVectors[target]\n u_k = outputVectors[indices]\n\n\n # The intermediary matrix outputs\n # z_o, h_o: single scalar number\n # z_k, h_k: K x 1 vector, wich each number associated with a neg sample\n z_o = np.dot(u_o, v_c)\n h_o = sigmoid(z_o)\n z_k = np.dot(u_k, v_c)\n h_k = sigmoid( - z_k)\n\n J_1 = - np.log(h_o)\n J_2 = - np.sum( np.log(h_k) )\n cost = J_1+ J_2\n\n # Return the gradient for the prediction function\n # the prediction vector interacts with both the predicted vector\n # the negative sample vectors so below are both parts of the gradient\n # here we are trying to increase the prediction matrix to maximize\n # the similarity with the predicted vector\n # output is a 1 x D matrix\n grad_pred_o = - (1 - h_o)*u_o\n\n # the second part is tyring to decrease\n # similarity with the negative sample vectors\n # K x 1 multiply be input is a k x D matrix, we will need to sum all negative samples\n # along the rows. output is a 1 x D matrix\n # reshape h_k so that it can multiple\n grad_pred_k = np.dot(( 1 - h_k).T, u_k)\n # find the predicted matrix gradient\n # output is a 1 x D matrix\n gradPred = grad_pred_o + grad_pred_k\n\n\n # Return the gradient of the output vector\n # create a matrix the same shape as outputVector\n grad = np.zeros(outputVectors.shape)\n # first find the gradient wrt to the target output vector\n # here we want to increase the similarity between\n # the target output vector and the center vector\n # outputs is a 1 x D matrix\n grad_u_o = - (1-h_o)*v_c\n\n # print('***************grad_u_o************')\n # print(grad_u_o)\n # print(grad_u_o.shape)\n # replace the target row in output vector gradient\n grad[target, ] = grad_u_o\n # then find the gradient descent of all the u_k matrices\n # K x 1 matrix multiply by 1 x 3\n # K x D\n grad_uk = - np.outer((h_k - 1), v_c)\n # print('***************grad_uk************')\n # print(grad_uk)\n # for each token (row) replace gradient\n for k in xrange(u_k.shape[0]):\n index = indices[k]\n grad[index] += grad_uk[k]\n\n ### END YOUR CODE\n return cost, gradPred, grad",
"def loss(self, **kwargs):\n pass",
"def test_squared_loss_forward():\n from your_code import SquaredLoss\n X = np.array([[-1, 2, 1], [-3, 4, 1]])\n w = np.array([1, 2, 3])\n y = np.array([1, -1])\n\n loss = SquaredLoss(regularization=None)\n\n _true = 26.5\n _est = loss.forward(X, w, y)\n print(_est)",
"def test_wrong_predicate(self):\n\n with pytest.raises(ValueError):\n naughty_case_opt = lq.optimizers.CaseOptimizer(\n (lambda var: False, lq.optimizers.Bop()),\n default_optimizer=optimizers.Adam(0.01),\n )\n\n # Simple MNIST model\n mnist = tf.keras.datasets.mnist\n (train_images, train_labels), _ = mnist.load_data()\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n )\n model.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=naughty_case_opt,\n metrics=[\"acc\"],\n )\n\n # Should raise on first call to apply_gradients()\n model.fit(train_images[:1], train_labels[:1], epochs=1)",
"def negSamplingCostAndGradient(predicted, target, outputVectors, dataset, K=10):\n\t#sampling of indices\n\tindices = [target]\n\tindices.extend(getNegativeSamples(target, dataset, K))\n\n\tgrad = np.zeros(outputVectors.shape)\n\tgradPred = np.zeros(predicted.shape)\n\tcost = 0\n\t#a true pair\n\tz = sigmoid(np.dot(outputVectors[target], predicted))\n\n\tcost -= np.log(z)\n\tgrad[target] += predicted*(z-1.0)\n\tgradPred += outputVectors[target]*(z-1.0)\n\n\tfor k in range(K):\n\t\tsamp = indices[k+1]\n\t\tz = sigmoid(np.dot(outputVectors[samp], predicted))\t#real number\n\t\tcost -= np.log(1.0 - z)\n\t\tgrad[samp] += predicted*z \n\t\tgradPred += outputVectors[samp]*z \n\n\treturn cost, gradPred, grad",
"def loss(self, y_pred=None, y_true=None):\n n = y_pred.shape[0]\n ahat = transform(self.ahat)\n bhat = transform(self.bhat)\n return -1 * (-0.5 * n * self.const + 0.5 * n * (torch.digamma(ahat) - torch.log(bhat)) \\\n - 0.5 * (ahat/bhat) * ((y_pred - y_true) ** 2).sum())",
"def loss_(self, batch):\n raise NotImplementedError"
] | [
"0.6969207",
"0.69518435",
"0.6858083",
"0.6652001",
"0.6562485",
"0.6560385",
"0.6536775",
"0.6470992",
"0.6404801",
"0.63875115",
"0.6325799",
"0.6324553",
"0.63079697",
"0.6183252",
"0.61738074",
"0.6160125",
"0.615615",
"0.61495835",
"0.6144141",
"0.6114965",
"0.61058164",
"0.61029756",
"0.60741115",
"0.6046711",
"0.60331064",
"0.6032784",
"0.59894574",
"0.5966302",
"0.596287",
"0.59593874"
] | 0.7992889 | 0 |
Test the pipeline on RotatE with negative sampling self adversarial loss and nations. | def test_pipeline(self):
loss = NSSALoss
loss_kwargs = {"margin": 1.0, "adversarial_temperature": 1.0}
pipeline_results = pipeline(
model="RotatE",
dataset="nations",
loss=loss,
loss_kwargs=loss_kwargs,
training_kwargs=dict(use_tqdm=False),
)
self.assertIsInstance(pipeline_results, PipelineResult)
self.assertIsInstance(pipeline_results.model.loss, loss)
self.assertEqual(pipeline_results.model.loss.margin, 1.0)
self.assertEqual(pipeline_results.model.loss.inverse_softmax_temperature, 1.0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test(self):\n # Load the trained models.\n self.train_ev_ea()\n self.restore_model(self.test_iters)\n self.encoder_v.eval()\n # Set data loader.\n data_loader = self.data_loader\n empty = torch.FloatTensor(1, 3,self.image_size,self.image_size).to(self.device) \n empty.fill_(1)\n noise = torch.FloatTensor(self.batch_size, self.nz_num)\n noise = noise.to(self.device)\n step = 0\n data_loader.test.reinitialize_index()\n with torch.no_grad():\n while True:\n try:\n x_real, wrong_images, attributes, _, label_org = data_loader.test.next_batch_test(self.batch_size,10)\n except:\n break\n x_real = x_real.to(self.device) \n label_org = label_org.to(self.device)\n attributes = attributes.to(self.device)\n \n \n ev_x = self.encoder_v(x_real)\n noise.normal_(0, 1)\n ea_a = self.encoder_a(attributes, noise)\n \n out_A2B_results = [empty]\n out_A2B_results_a = [empty]\n\n for idx1 in range(label_org.size(0)):\n out_A2B_results.append(x_real[idx1:idx1+1])\n out_A2B_results_a.append(x_real[idx1:idx1+1])\n\n for idx2 in range(label_org.size(0)):\n out_A2B_results.append(x_real[idx2:idx2+1])\n out_A2B_results_a.append(x_real[idx2:idx2+1])\n \n for idx1 in range(label_org.size(0)):\n x_fake = self.decoder(self.encoder(x_real[idx2:idx2+1]), ev_x[idx1:idx1+1])\n out_A2B_results.append(x_fake)\n \n x_fake_a = self.decoder(self.encoder(x_real[idx2:idx2+1]), ea_a[idx1:idx1+1])\n out_A2B_results_a.append(x_fake_a)\n results_concat = torch.cat(out_A2B_results)\n x_AB_results_path = os.path.join(self.result_dir, '{}_x_AB_results_test_v.jpg'.format(step+1)) \n save_image(self.denorm(results_concat.data.cpu()), x_AB_results_path, nrow=label_org.size(0)+1,padding=0)\n print('Saved real and fake images into {}...'.format(x_AB_results_path))\n \n results_concat = torch.cat(out_A2B_results_a)\n x_AB_results_path = os.path.join(self.result_dir, '{}_x_AB_results_test_a.jpg'.format(step+1)) \n save_image(self.denorm(results_concat.data.cpu()), x_AB_results_path, nrow=label_org.size(0)+1,padding=0)\n print('Saved real and fake images into {}...'.format(x_AB_results_path))\n \n step += 1",
"def test(self):\n self.model.eval()\n test_loss, test_correct_preds = 0, defaultdict(int)\n if self.test_loader is None: # running G2E\n self.test_loader, self.test_size, _ = self._get_smi_dl(phase=\"test\", shuffle=False)\n test_loader = tqdm(self.test_loader, desc='testing...')\n\n running_topk_accs = defaultdict(lambda: np.nan)\n with torch.no_grad():\n epoch_test_size = 0\n for i, batch in enumerate(test_loader):\n batch_data = batch[0]\n if not isinstance(batch_data, tuple):\n batch_data = batch_data.to(self.device)\n if self.model_name == 'TransformerEBM':\n batch_data = (batch_data, 'test')\n batch_mask = batch[1].to(self.device)\n batch_energies = self._one_batch(\n batch_data, batch_mask, backprop=False,\n )\n test_batch_size = batch_energies.shape[0]\n epoch_test_size += test_batch_size\n\n # for validation/test data, true rxn may not be present!\n batch_idx = batch[2]\n batch_true_ranks_array = self.proposals_data['test'][batch_idx, 2].astype('int')\n batch_true_ranks_valid = batch_true_ranks_array[batch_true_ranks_array < self.args.minibatch_eval]\n batch_true_ranks = torch.as_tensor(batch_true_ranks_array).unsqueeze(dim=-1)\n # slightly tricky as we have to ignore rxns with no 'positive' rxn for loss calculation\n # (bcos nothing in the numerator, loss is undefined)\n loss_numerator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n batch_true_ranks_valid\n ]\n loss_denominator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n :\n ]\n batch_loss = (loss_numerator + torch.logsumexp(-loss_denominator, dim=1)).sum().item()\n for k in self.k_to_test:\n # index with lowest energy is what the model deems to be the most feasible rxn\n batch_preds = torch.topk(batch_energies, k=k, dim=1, largest=False)[1] \n batch_correct_preds = torch.where(batch_preds == batch_true_ranks)[0].shape[0]\n test_correct_preds[k] += batch_correct_preds\n running_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n\n if k == 1:\n # overhead is only 5 ms, will check ~5 times each epoch (regardless of batch_size)\n try:\n for j in range(i * self.args.batch_size_eval, (i+1) * self.args.batch_size_eval):\n if j % (self.test_size // 5) == random.randint(0, 3) or j % (self.test_size // 8) == random.randint(0, 5): # peek at a random sample of current batch to monitor training progress\n rxn_idx = random.sample(list(range(self.args.batch_size_eval)), k=1)[0]\n rxn_true_rank = batch_true_ranks_array[rxn_idx]\n rxn_pred_rank = batch_preds[rxn_idx, 0].item()\n rxn_pred_energy = batch_energies[rxn_idx, rxn_pred_rank].item()\n rxn_true_energy = batch_energies[rxn_idx, rxn_true_rank].item() if rxn_true_rank != 9999 else 'NaN'\n rxn_orig_energy = batch_energies[rxn_idx, 0].item()\n rxn_orig_energy2 = batch_energies[rxn_idx, 1].item()\n rxn_orig_energy3 = batch_energies[rxn_idx, 2].item()\n\n rxn_true_prod = self.proposals_data['test'][batch_idx[rxn_idx], 0]\n rxn_true_prec = self.proposals_data['test'][batch_idx[rxn_idx], 1]\n rxn_cand_precs = self.proposals_data['test'][batch_idx[rxn_idx], 3:]\n rxn_pred_prec = rxn_cand_precs[batch_preds[rxn_idx]]\n rxn_orig_prec = rxn_cand_precs[0]\n rxn_orig_prec2 = rxn_cand_precs[1]\n rxn_orig_prec3 = rxn_cand_precs[2]\n logging.info(f'\\ntrue product: \\t\\t\\t\\t{rxn_true_prod}')\n logging.info(f'pred precursor (rank {rxn_pred_rank}, energy = {rxn_pred_energy:+.4f}):\\t\\t\\t{rxn_pred_prec}')\n if rxn_true_energy == 'NaN':\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy}):\\t\\t\\t\\t{rxn_true_prec}')\n else:\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy:+.4f}):\\t\\t\\t{rxn_true_prec}')\n logging.info(f'orig precursor (rank 0, energy = {rxn_orig_energy:+.4f}):\\t\\t\\t{rxn_orig_prec}')\n logging.info(f'orig precursor (rank 1, energy = {rxn_orig_energy2:+.4f}):\\t\\t\\t{rxn_orig_prec2}')\n logging.info(f'orig precursor (rank 2, energy = {rxn_orig_energy3:+.4f}):\\t\\t\\t{rxn_orig_prec3}\\n')\n break\n except Exception as e:\n tb_str = traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)\n logging.info(\"\".join(tb_str))\n logging.info('\\nIndex out of range (last minibatch)')\n\n test_loss += batch_loss\n test_loader.set_description(f\"testing...loss={test_loss / epoch_test_size:.4f}, top-1 acc={running_topk_accs[1]:.4f}, top-5 acc={running_topk_accs[5]:.4f}, top-10 acc={running_topk_accs[10]:.4f}\")\n test_loader.refresh()\n \n for k in self.k_to_test:\n self.test_topk_accs[k] = test_correct_preds[k] / epoch_test_size # self.test_size\n\n logging.info(f'\\nTest loss: {test_loss / epoch_test_size:.4f}')\n message = f\"{self.args.expt_name}\\n\"\n for k in self.k_to_test:\n this_topk_message = f'Test top-{k} accuracy: {100 * self.test_topk_accs[k]:.3f}%'\n logging.info(this_topk_message)\n message += this_topk_message + '\\n'\n try:\n send_message(message)\n except Exception as e:\n pass",
"def test(self):\n img_gen, self.loss_reg, self.parsav = self.net_G(self.input_P1, self.input_P2, self.input_BP1, self.input_BP2, self.input_SPL1, self.input_SPL2)\n ## test flow ##\n\n self.save_results(img_gen, data_name='vis')\n if self.opt.save_input or self.opt.phase == 'val':\n self.save_results(self.input_P1, data_name='ref')\n self.save_results(self.input_P2, data_name='gt')\n result = torch.cat([self.input_P1, img_gen, self.input_P2], 3)\n self.save_results(result, data_name='all')",
"def test_non_zero_loss(self):\n # Reset models.\n self.model.load_state_dict(self.initial_model_dict)\n self.actor_model.load_state_dict(self.initial_actor_model_dict)\n\n polybeast.learn(*self.learn_args)\n\n self.assertNotEqual(self.stats[\"total_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"pg_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"baseline_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"entropy_loss\"], 0.0)",
"def test_detector1pipeline1(_bigdata):\n\n step = Detector1Pipeline()\n step.group_scale.skip = True\n step.dq_init.skip = True\n step.saturation.skip = True\n step.ipc.skip = True\n step.superbias.skip = True\n step.refpix.skip = True\n step.rscd.skip = True\n step.firstframe.skip = True\n step.lastframe.skip = True\n step.linearity.skip = True\n step.dark_current.skip = True\n step.persistence.skip = True\n step.jump.skip = True\n step.ramp_fit.skip = False\n\n step.gain_scale.skip = False\n step.gain_scale.save_results = True\n\n expfile = 'jw00001001001_01101_00001_MIRIMAGE'\n step.run(_bigdata+'/miri/test_sloperpipeline/' + expfile + '_uncal.fits')\n\n\n files = glob('*.fits')\n\n output_file = expfile + '_gain_scale.fits'\n assert output_file in files\n files.remove(output_file)\n\n output_file = expfile + '_gain_scaleints.fits'\n assert output_file in files\n files.remove(output_file)\n\n assert not len(files)",
"def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%",
"def generate_negative_samples(self, data, sampled_data, zeros=[], validation=False):\n negative_sampled_data = []\n negative_sampled_indices = []\n for sample in sampled_data:\n i = data['pos'].index(sample) ## index of a particular move in a demo\n all_num = 0\n for which, num in enumerate(data['leng_pos']):\n all_num += num\n if all_num > i:\n which_demo = which ## index of a demo the move with index i comes from\n break\n\n sum_neg_lengths = sum(data['leng_neg'][:which_demo])\n\n key = sum_neg_lengths-1 \n value = sum_neg_lengths + data['leng_neg'][which_demo]\n demo_negative_data = data['neg'][key : value]\n state, action = sample\n for demo_state, demo_action in demo_negative_data:\n if demo_state == state:\n negative_sampled_data.extend([(demo_state, demo_action)])\n demo_index = data['neg'].index((demo_state, demo_action))\n negative_sampled_indices.append(demo_index)\n\n if not validation:\n num_pos = sum(self.pipeline_y == 1)\n num_neg = len(negative_sampled_data)\n pos_sample = self.pipeline_X[:num_pos, :]\n neg_sample = self.pipeline_X[num_pos + negative_sampled_indices, :]\n y_vector = [1] * num_pos + [0] * num_neg\n ######################### Mouselab ad-hc #########################\n ########################## Removing 0's ##########################\n non_zero = [self.pipeline_X[i, :] for i in range(num_pos)\n if i not in zeros]\n pos_sample = vstack(non_zero) if non_zero != [] else self.pipeline_X[0,:]\n num_pos = pos_sample.shape[0]\n y_vector = [1] * num_pos + [0] * num_neg\n ##################################################################\n\n self.pipeline_X = vstack((pos_sample, neg_sample))\n self.pipeline_y = np.array(y_vector, dtype='uint8')\n \n return negative_sampled_data",
"def test_predictor():",
"def main():\n \"\"\"\n This is just for testing the functions\n \"\"\"\n\n x1 = np.array([1, 1, 1, 1, -1, -1, 1, 1, 1])\n x2 = np.array([1, -1, 1, 1, 1, 1, 1, -1, 1])\n x3 = np.array([-1, 1, -1, -1, 1, -1, -1, 1, -1])\n train_set = np.vstack((x1, x2))\n train_set = np.vstack((train_set, x3))\n\n\n params = {\n \"epochs\": 100,\n \"neurons\": len(x1),\n \"learn_method\": 'classic'\n }\n\n hop = hop_net.HopfieldNet(train_set, **params)\n hop.batch_train()\n show_trained(train_set)\n\n x4d = [1,1,1,1,1,1,1,1,1]\n x5d = [1,1,1,1,-1,-1,1,-1,-1]\n x45d = np.vstack((x4d, x5d))\n test_set = np.vstack((x45d, train_set))\n recalled_set = hop.recall(test_set)\n for i in range(test_set.shape[0]):\n show_tested(test_set[i], recalled_set[i])",
"def test(model, dataloader, params, args, val):\n\n # evaluation mode\n model.eval()\n\n # initialise buffers\n dice_lv_buffer = []\n dice_myo_buffer = []\n dice_rv_buffer = []\n\n mcd_lv_buffer = []\n hd_lv_buffer = []\n mcd_myo_buffer = []\n hd_myo_buffer = []\n mcd_rv_buffer = []\n hd_rv_buffer = []\n\n mean_mag_grad_detJ_buffer = []\n negative_detJ_buffer = []\n\n\n with tqdm(total=len(dataloader)) as t:\n # iterate over validation subjects\n for idx, (image_ed_batch, image_es_batch, label_ed_batch, label_es_batch) in enumerate(dataloader):\n # (data all in shape of (c, N, H, W))\n\n # extend to (N, c, H, W)\n image_ed_batch = image_ed_batch.permute(1, 0, 2, 3).to(device=args.device)\n image_es_batch = image_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n label_es_batch = label_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n\n with torch.no_grad():\n # compute optical flow and warped ED images towards ES\n dvf = model(image_ed_batch, image_es_batch)\n\n # transform label mask of ES frame\n warped_label_es_batch = resample_transform(label_es_batch.float(), dvf, interp='nearest')\n\n\n \"\"\" Move data to device \"\"\"\n if args.cuda:\n # move data to cpu to calculate metrics\n # (the axis permutation is to comply with metric calculation code which takes input shape H, W, N)\n warped_label_es_batch = warped_label_es_batch.squeeze(1).cpu().numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.cpu().numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n else:\n # CPU version of the code\n warped_label_es_batch = warped_label_es_batch.squeeze(1).numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n \"\"\"\"\"\"\n\n \"\"\" Calculate the metrics (only works with SAX images) \"\"\"\n # (optional) extract 3 slices (apical, mid-ventricle and basal)\n if not args.all_slices:\n num_slices = label_ed_batch.shape[-1]\n apical_idx = int(round((num_slices - 1) * 0.75)) # 75% from basal\n mid_ven_idx = int(round((num_slices - 1) * 0.5)) # 50% from basal\n basal_idx = int(round((num_slices - 1) * 0.25)) # 25% from basal\n slices_idx = [apical_idx, mid_ven_idx, basal_idx]\n\n warped_label_es_batch = warped_label_es_batch[:, :, slices_idx]\n label_ed_batch = label_ed_batch[:, :, slices_idx]\n dvf = dvf[slices_idx, :, :, :] # needed for detJac\n\n # dice\n dice_lv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=1)\n dice_myo = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=2)\n dice_rv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=3)\n\n dice_lv_buffer += [dice_lv]\n dice_myo_buffer += [dice_myo]\n dice_rv_buffer += [dice_rv]\n\n # contour distances\n mcd_lv, hd_lv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=1, dx=params.pixel_size)\n mcd_myo, hd_myo = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=2, dx=params.pixel_size)\n mcd_rv, hd_rv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=3, dx=params.pixel_size)\n\n # determinant of Jacobian\n mean_grad_detJ, mean_negative_detJ = detJac_stack(dvf)\n\n\n # update buffers\n mcd_lv_buffer += [mcd_lv]\n hd_lv_buffer += [hd_lv]\n mcd_myo_buffer += [mcd_myo]\n hd_myo_buffer += [hd_myo]\n mcd_rv_buffer += [mcd_rv]\n hd_rv_buffer += [hd_rv]\n\n mean_mag_grad_detJ_buffer += [mean_grad_detJ]\n negative_detJ_buffer += [mean_negative_detJ]\n\n t.update()\n\n # construct metrics dict\n metrics = {'dice_lv_mean': np.mean(dice_lv_buffer), 'dice_lv_std': np.std(dice_lv_buffer),\n 'dice_myo_mean': np.mean(dice_myo_buffer), 'dice_myo_std': np.std(dice_myo_buffer),\n 'dice_rv_mean': np.mean(dice_rv_buffer), 'dice_rv_std': np.std(dice_rv_buffer),\n\n 'mcd_lv_mean': np.mean(mcd_lv_buffer), 'mcd_lv_std': np.std(mcd_lv_buffer),\n 'mcd_myo_mean': np.mean(mcd_myo_buffer), 'mcd_myo_std': np.std(mcd_myo_buffer),\n 'mcd_rv_mean': np.mean(mcd_rv_buffer), 'mcd_rv_std': np.std(mcd_rv_buffer),\n\n 'hd_lv_mean': np.mean(hd_lv_buffer), 'hd_lv_std': np.std(hd_lv_buffer),\n 'hd_myo_mean': np.mean(hd_myo_buffer), 'hd_myo_std': np.std(hd_myo_buffer),\n 'hd_rv_mean': np.mean(hd_rv_buffer), 'hd_rv_std': np.std(hd_rv_buffer),\n\n 'mean_mag_grad_detJ_mean': np.mean(mean_mag_grad_detJ_buffer),\n 'mean_mag_grad_detJ_std': np.std(mean_mag_grad_detJ_buffer),\n\n 'negative_detJ_mean': np.mean(negative_detJ_buffer),\n 'negative_detJ_std': np.std(negative_detJ_buffer)\n }\n\n\n if not val:\n # testing only: save all metrics evaluated for all test subjects in pandas dataframe\n test_result_dir = os.path.join(args.model_dir, \"test_results\")\n if not os.path.exists(test_result_dir):\n os.makedirs(test_result_dir)\n\n # save metrics results mean & std\n xutils.save_dict_to_json(metrics,\n f\"{test_result_dir}/test_results_3slices_{not args.all_slices}.json\")\n\n # save accuracy metrics of every subject\n subj_id_buffer = dataloader.dataset.dir_list\n df_buffer = []\n column_method = ['DL'] * len(subj_id_buffer)\n for struct in ['LV', 'MYO', 'RV']:\n if struct == 'LV':\n ls_dice = dice_lv_buffer\n ls_mcd = mcd_lv_buffer\n ls_hd = hd_lv_buffer\n elif struct == 'MYO':\n ls_dice = dice_myo_buffer\n ls_mcd = mcd_myo_buffer\n ls_hd = hd_myo_buffer\n elif struct == 'RV':\n ls_dice = dice_rv_buffer\n ls_mcd = mcd_rv_buffer\n ls_hd = hd_rv_buffer\n\n ls_struct = [struct] * len(subj_id_buffer)\n data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'Structure': ls_struct,\n 'Dice': ls_dice,\n 'MCD': ls_mcd,\n 'HD': ls_hd}\n df_buffer += [pd.DataFrame(data=data)]\n # concatenate df and save\n metrics_df = pd.concat(df_buffer, axis=0)\n metrics_df.to_pickle(f\"{test_result_dir}/test_accuracy_results_3slices_{not args.all_slices}.pkl\")\n\n # save detJac metrics for every subject\n jac_data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'GradDetJac': mean_mag_grad_detJ_buffer,\n 'NegDetJac': negative_detJ_buffer}\n jac_df = pd.DataFrame(data=jac_data)\n jac_df.to_pickle(f\"{test_result_dir}/test_Jacobian_results_3slices{not args.all_slices}.pkl\")\n\n return metrics",
"def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)",
"def test_TLearner(self):\n # TLearner test\n # Instantiate TLearner\n Y, T, X, _ = ihdp_surface_B()\n est = AutomatedTLearner(models=automl_model_reg())\n\n # Test constant and heterogeneous treatment effect, single and multi output y\n\n est.fit(Y, T, X=X)\n _ = est.effect(X)",
"def test_tpr_fwer_alex(self, syn_genomic_data, syn_labels, syn_labels_0based, syn_labels_cat, syn_fm, syn_idx, rep, syn_true_pvalues):\n\n window_length = 35\n best_params_montaez['n_snps'] = n_total_snps\n\n def combi_compute_pvalues(d, x, fm, l,filter_window_size,pf,ps,k):\n\n idx, pvalues, raw_weights = combi_method(d, x,fm, l,filter_window_size,pf,ps,k)\n pvalues_filled = np.ones(n_total_snps)\n pvalues_filled[idx] = pvalues\n raw_weights = postprocess_weights_without_avg(raw_weights, p_svm)\n\t\t\t\n # Map the raw weights to look like p-values between 0 and 1 (reverse order)\t\t\n # Figure out how 'wide' range is\n leftSpan = np.max(raw_weights) - np.min(raw_weights)\n\n # Convert the left range into a 0-1 range (float)\n valueScaled = (raw_weights - np.min(raw_weights)) / leftSpan\n\n # Reverse order\n raw_weights = 1 - valueScaled\t\n\t\t\t\n del d, l\n return pvalues_filled, raw_weights\n\n def challenger_compute_pvalues(d, x, l_0b, l, idx):\n is_only_zeros = False\n with tensorflow.Session().as_default():\n\n model = create_montaez_dense_model(best_params_montaez)\n y_integers = np.argmax(l_0b[idx.train], axis=1)\n class_weights = class_weight.compute_class_weight('balanced', np.unique(y_integers), y_integers)\n d_class_weights = dict(enumerate(class_weights))\n model.fit(x=x[idx.train], y=l_0b[idx.train], validation_data=(x[idx.test], l_0b[idx.test]), epochs=best_params_montaez['epochs'],class_weight=d_class_weights, callbacks=[ReduceLROnPlateau(monitor='val_loss', factor=best_params_montaez['factor'], patience=best_params_montaez['patience'], mode='min'),])\n\n model = iutils.keras.graph.model_wo_softmax(model)\n analyzer = innvestigate.analyzer.LRPAlpha1Beta0(model)\n weights = analyzer.analyze(x).sum(0)\n\n if np.max(abs(weights)) < 0.005:\n fig, axes = plt.subplots(1)\n is_only_zeros = True\n axes.plot(np.absolute(weights).sum(axis=1))\n fig.savefig(os.path.join(IMG_DIR, 'test.png'))\n\n top_indices_sorted, _ = postprocess_weights(weights, top_k, window_length, p_svm, p_pnorm_filter)\n rawlrp_scores_now = postprocess_weights_without_avg(weights,p_svm)\n\t\t\t\t\n # Map the raw weights to look like p-values between 0 and 1 (reverse order)\t\t\n # Figure out how 'wide' range is\n leftSpan = np.max(rawlrp_scores_now) - np.min(rawlrp_scores_now)\n\n # Convert the left range into a 0-1 range (float)\n valueScaled = (rawlrp_scores_now - np.min(rawlrp_scores_now)) / leftSpan\n\n # Reverse order\n rawlrp_scores_now = 1 - valueScaled\t\t\t\t\t\n\t\t\t\t\n pvalues = chi_square(d[:, top_indices_sorted], l)\n pvalues_filled = np.ones(n_total_snps)\n pvalues_filled[top_indices_sorted] = pvalues\n del d, x, l\n\n return pvalues_filled, is_only_zeros, rawlrp_scores_now\n\n fm_2d = syn_fm(\"2d\")\n fm_3d = syn_fm(\"3d\")\n\n clf = LinearSVC(penalty='l2', loss='hinge', C=0.0022, dual=True, tol=1e-3, verbose=0, class_weight='balanced')\n\n bla = Parallel(n_jobs=-1, require='sharedmem')(delayed(combi_compute_pvalues)(clf, syn_genomic_data[str(i)][:], fm_2d[str(i)][:], syn_labels[str(i)], 35, 2, 2, 30) for i in tqdm(range(rep)))\n raw_svmweights_per_run_combi = np.array(list(np.array(bla)[:, 1]))\n pvalues_per_run_combi = np.array(list(np.array(bla)[:, 0]))\n\n pvalues_per_run_rpvt = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(chi_square)(syn_genomic_data[str(i)][:], syn_labels[str(i)]) for i in tqdm(range(rep))))\n\n # len(thresholds) * len(window_sizes) * 10020\n abl = Parallel(n_jobs=-1, require='sharedmem')(delayed(challenger_compute_pvalues)(syn_genomic_data[str(i)][:], fm_3d[str(i)][:], syn_labels_cat[str(i)], syn_labels[str(i)], syn_idx[str(i)]) for i in tqdm(range(rep)))\n\n # Collect results\n pvalues_per_run_dense = np.array(list(np.array(abl)[:, 0]))\n rawlrp_scores_per_run_dense = np.array(list(np.array(abl)[:, 2]))\n \n # INNvestigate bugfix\n zeros_index = np.array(list(np.array(abl)[:, 1])) \n pvalues_per_run_combi = pvalues_per_run_combi[np.logical_not(zeros_index)]\n raw_svmweights_per_run_combi = raw_svmweights_per_run_combi[np.logical_not(zeros_index)]\n pvalues_per_run_dense = pvalues_per_run_dense[np.logical_not(zeros_index)]\n rawlrp_scores_per_run_dense = rawlrp_scores_per_run_dense[np.logical_not(zeros_index)]\n pvalues_per_run_rpvt = pvalues_per_run_rpvt[np.logical_not(zeros_index)]\n true_pvalues = syn_true_pvalues[np.logical_not(zeros_index)]\n\n # COMBI\n res_combi = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(pvalues_per_run_combi, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n tpr_combi, _, fwer_combi, precision_combi = res_combi.T\n\n # SVM weights\n res_rawsvm = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(raw_svmweights_per_run_combi, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n tpr_rawsvm, _, fwer_rawsvm, precision_rawsvm = res_rawsvm.T\n\n # RPVT\n res_rpvt = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(pvalues_per_run_rpvt, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n\t\t\n tpr_rpvt, _, fwer_rpvt, precision_rpvt = res_rpvt.T\n\n # Plot\n fig, axes = plt.subplots(1,2)\n\n fig.set_size_inches(15, 9)\n ax1, ax2 = axes\n\n ax1.set_ylim(0, 0.7)\n ax1.set_xlim(0, 0.3)\n ax1.set_ylabel('True positive rate', fontsize=14)\n ax1.set_xlabel('Family-wise error rate', fontsize=14)\n ax2.set_ylabel('Precision', fontsize=14)\n ax2.set_xlabel('True positive rate', fontsize=14)\n\n # CURVES must stop somewhere\n tpr_rpvt_new = tpr_rpvt[tpr_rpvt < 1]\n fwer_rpvt = fwer_rpvt[tpr_rpvt < 1]\n precision_rpvt = precision_rpvt[tpr_rpvt < 1]\n tpr_rpvt = tpr_rpvt_new\n\n tpr_combi_new = tpr_combi[tpr_combi < 1]\n fwer_combi = fwer_combi[tpr_combi < 1]\n precision_combi = precision_combi[tpr_combi < 1]\t\n tpr_combi = tpr_combi_new\n\n tpr_rawsvm_new = tpr_rawsvm[tpr_rawsvm < 1]\n fwer_rawsvm = fwer_rawsvm[tpr_rawsvm < 1]\n precision_rawsvm = precision_rawsvm[tpr_rawsvm < 1]\n tpr_rawsvm = tpr_rawsvm_new\n\t\t\n # RPVT\n ax1.plot(fwer_rpvt, tpr_rpvt, label='RPVT', color='lightsteelblue', linewidth=2)\n ax2.plot(tpr_rpvt, precision_rpvt, color='lightsteelblue', label='RPVT', linewidth=2)\n\n # COMBI \n ax1.plot(fwer_combi, tpr_combi, color='darkblue', label='COMBI', linewidth=2)\n ax2.plot(tpr_combi, precision_combi, color='darkblue', label='COMBI', linewidth=2)\n \n # raw SVM weights\n ax1.plot(fwer_rawsvm, tpr_rawsvm, linestyle='--', color='darkblue', label='SVM weights', linewidth=2)\n ax2.plot(tpr_rawsvm, precision_rawsvm, linestyle='--', color='darkblue', label='SVM weights', linewidth=2)\n\n # DeepCOMBI + LRP scores\n res_dense = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(pvalues_per_run_dense, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n res_rawlrp_dense = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(rawlrp_scores_per_run_dense, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n\n tpr_dense, _, fwer_dense, precision_dense = res_dense.T\n tpr_rawlrp_dense, _, fwer_rawlrp_dense, precision_rawlrp_dense = res_rawlrp_dense.T\n\n assert fwer_combi.max() <= 1 and fwer_combi.min() >= 0\n tpr_dense_new = tpr_dense[tpr_dense < 1]\n fwer_dense = fwer_dense[tpr_dense < 1]\n precision_dense = precision_dense[tpr_dense < 1]\n tpr_dense = tpr_dense_new\n\n tpr_rawlrp_dense_new = tpr_rawlrp_dense[tpr_rawlrp_dense < 1]\n fwer_rawlrp_dense = fwer_rawlrp_dense[tpr_rawlrp_dense < 1]\n precision_rawlrp_dense = precision_rawlrp_dense[tpr_rawlrp_dense < 1]\n tpr_rawlrp_dense = tpr_rawlrp_dense_new\n\n # DeepCOMBI\n ax1.plot(fwer_dense, tpr_dense, color='fuchsia', label='DeepCOMBI', linewidth=3)\n ax2.plot(tpr_dense, precision_dense, color='fuchsia', label='DeepCOMBI', linewidth=3)\n\n # LRP scores\n ax1.plot(fwer_rawlrp_dense, tpr_rawlrp_dense, color='fuchsia', linestyle='--', label='LRP scores', linewidth=2)\n ax2.plot(tpr_rawlrp_dense, precision_rawlrp_dense, color='fuchsia', linestyle='--', label='LRP scores', linewidth=2)\n\n ax1.legend(fontsize=14,loc= 'lower right')\n ax2.legend(fontsize=14, loc= 'lower right')\n fig.savefig(os.path.join(IMG_DIR, 'tpr_fwer_montaez_final1000_NAR.png'), bbox_inches='tight', dpi=300)\n print(np.sum(zeros_index))\n pdb.set_trace()\n\n # CURVES must stop somewhere\n #combi_fp = combi_fp[combi_fp < 80]\n #combi_tp = combi_tp[:len(combi_fp)]\n #deepcombi_fp = deepcombi_fp[deepcombi_fp < 80]\n #deepcombi_tp = deepcombi_tp[:len(deepcombi_fp)]",
"def test_deterministic(self):\n add_noise = self.variant(exploration.add_ornstein_uhlenbeck_noise)\n # Test that noisy and noisless actions match for zero stddev\n noise_tm1 = np.zeros((self._num_actions,))\n for _ in range(10):\n action = np.random.normal(0., 1., self._num_actions)\n # Test output.\n self._rng_key, key = jax.random.split(self._rng_key)\n noisy_action = add_noise(key, action, noise_tm1, 1., 0.)\n noise_tm1 = action - noisy_action\n np.testing.assert_allclose(action, noisy_action)",
"def test():\n \n print('Loading best networks')\n env.guesser, agent.dqn = load_networks(i_episode='best')\n #env.guesser, agent.dqn = load_networks(i_episode='best', avg_reward = )\n\n # predict outcome on test data\n y_hat_test = np.zeros(len(env.y_test))\n y_hat_test_prob = np.zeros(len(env.y_test))\n \n print('Computing predictions of test data')\n n_test = len(env.X_test)\n for i in range(n_test):\n \n if i % 1000 == 0:\n print('{} / {}'.format(i, n_test))\n \n state = env.reset(mode='test', \n patient=i,\n train_guesser=False)\n mask = env.reset_mask()\n \n # run episode\n for t in range(FLAGS.episode_length):\n\n # select action from policy\n action = agent.get_action(state, eps=0, mask=mask)\n mask[action] = 0\n \n # take the action\n state, reward, done, guess = env.step(action, mode='test') \n \n if guess != -1:\n y_hat_test_prob[i] = torch.argmax(env.probs).item()\n \n if done:\n break\n y_hat_test[i] = guess\n \n C = confusion_matrix(env.y_test, y_hat_test)\n print('confusion matrix: ')\n print(C)\n\n acc = np.sum(np.diag(C)) / len(env.y_test)\n\n print('Test accuracy: ', np.round(acc, 3))",
"def Test(self, K_test=5, n_eg=None, K=5, \\\n C_parameters=np.array([0.01, 0.1, 1.0, 10., 100.]), \\\n norm_SF=True, norm_plane=True, tol_=0.0001, \\\n max_iter_=10000):\n\n # test acc array\n test_acc = np.zeros((self.__n_type, 3))\n n_test_eg = np.zeros((self.__n_type, 3))\n\n # Saving examples for real training later and shuffling examples\n for idx_type, type_ in enumerate(self.__types_unique):\n np.random.shuffle(self.training_R[idx_type])\n np.random.shuffle(self.training_NR[idx_type])\n training_R = list(self.training_R)\n training_NR = list(self.training_NR)\n\n\n for K_ in range(K_test):\n print('K = '+str(K_))\n\n # Creates a testing an training set from original training set\n testing_R = []\n testing_NR = []\n for idx_type, type_ in enumerate(self.__types_unique):\n\n # Finds rearrangements and non-rearrangements of given particle\n # type\n Rs_type = self.training_R[idx_type]\n NRs_type = self.training_NR[idx_type]\n\n # Finds a test set of rearrangements\n Rs_type_test = Rs_type[K_::K_test]\n NRs_type_test = NRs_type[K_::K_test]\n n_test = min(len(Rs_type_test),len(NRs_type_test))\n Rs_type_test = Rs_type_test[:n_test]\n NRs_type_test = NRs_type_test[:n_test]\n testing_R.append(Rs_type_test)\n testing_NR.append(NRs_type_test)\n\n # Deletes test set from training set\n Rs_type = np.delete(Rs_type, \\\n list(range(K_,len(Rs_type),K_test)),axis=0)\n NRs_type = np.delete(NRs_type, \\\n list(range(K_,len(NRs_type),K_test)), axis=0)\n self.training_R[idx_type] = Rs_type\n self.training_NR[idx_type] = NRs_type\n\n # Trains softness model using reduced training set\n self.Train(n_eg=n_eg, K=K, \\\n C_parameters=C_parameters, norm_SF=norm_SF, \\\n norm_plane=norm_plane, tol_=tol_, \\\n max_iter_=max_iter_)\n\n # Tests new plane against test set\n for idx_type, type_ in enumerate(self.__types_unique):\n\n # Finds number of soft rearrangements and hard \n # non-rearrangements\n soft_Rs = self.GetSoft(testing_R[idx_type])\n soft_NRs = self.GetSoft(testing_NR[idx_type])\n nRs = np.sum(soft_Rs > 0)\n nNRs = np.sum(soft_NRs < 0)\n\n # Calculates # of correctly classified particles\n test_acc[idx_type][0] += nRs+nNRs\n n_test_eg[idx_type][0] += len(soft_Rs)+len(soft_NRs)\n test_acc[idx_type][1] += nRs\n n_test_eg[idx_type][1] += len(soft_Rs)\n test_acc[idx_type][2] += nNRs\n n_test_eg[idx_type][2] += len(soft_NRs)\n\n # Returns self.training_R and self.training_NR to original state\n self.training_R = list(training_R)\n self.training_NR = list(training_NR)\n\n # Calculates test set accuracy\n test_acc = test_acc / n_test_eg\n\n return test_acc",
"def test_no_backg_subt():\n \n test_object = fa.read_in_envision(data_csv=HsHis6_PEX5C_vs_HsPEX5C, platemap_csv=Hs_His6_PEX5C_vs_HsPEX5C_platemap, data_type='plate', size=384)\n test_object.calculate_r_i(correct=True, plot_i=False, thr=80)",
"def test(self):\n self.training = False",
"def test_behaviour(self):\r\n\r\n with qml.tape.QuantumTape() as tape:\r\n qml.PauliZ(0)\r\n qml.RX(1.0, wires=0)\r\n qml.CNOT(wires=[0, 2])\r\n qml.Rot(2.0, 3.0, 4.0, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n tape.trainable_params = {0, 2}\r\n shifts = [0.1, -0.2, 1.6]\r\n res = generate_shifted_tapes(tape, 1, shifts=shifts)\r\n\r\n assert len(res) == len(shifts)\r\n assert res[0].get_parameters(trainable_only=False) == [1.0, 2.0, 3.1, 4.0]\r\n assert res[1].get_parameters(trainable_only=False) == [1.0, 2.0, 2.8, 4.0]\r\n assert res[2].get_parameters(trainable_only=False) == [1.0, 2.0, 4.6, 4.0]",
"def ignore_test_pipeline_flow(self):\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n # create tmp train set\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n # rm tmp train set\n rm_tmp_file(\"tmp.json\")\n\n interpreter = trainer.train(train_data)\n assert interpreter is not None\n out1 = interpreter.parse((\"点连接拿红包啦\"))\n\n # test persist and load\n persisted_path = trainer.persist(config['path'],\n config['project'],\n config['fixed_model_name'])\n\n interpreter_loaded = Interpreter.load(persisted_path, config)\n out2 = interpreter_loaded.parse(\"点连接拿红包啦\")\n assert out1.get(\"classifylabel\").get(\"name\") == out2.get(\"classifylabel\").get(\"name\")\n\n # remove tmp models\n shutil.rmtree(config['path'], ignore_errors=True)",
"def test_deterministic(self):\n add_noise = self.variant(exploration.add_dirichlet_noise)\n\n # Test that noisy and noisless actions match for zero Dirichlet noise\n for _ in range(10):\n prior = np.random.normal(0., 1., (self._batch_size, self._num_actions))\n\n # Test output.\n self._rng_key, key = jax.random.split(self._rng_key)\n noisy_prior = add_noise(\n key, prior, dirichlet_alpha=0.3, dirichlet_fraction=0.)\n np.testing.assert_allclose(prior, noisy_prior)",
"def fit(self, plot_loss=True):\n adam = optim.Adam(self.learning_rate)\n self.svi = SVI(\n self.vae_model, \n self.vae_guide, \n adam, \n Trace_ELBO()\n )\n # encoder_nn = self.vae_encoder()\n # decoder_nn = self.vae_decoder()\n rng_key, rng_key_samp, rng_key_init = random.split(self.rng_key, 3)\n\n self.gp_predictive = Predictive(self.gp.sample, num_samples=self.batch_size)\n\n # initialise with a sample batch\n sample_batch = self.gp_predictive(rng_key=rng_key_samp, x=self.x)\n \n svi_state = self.svi.init(rng_key_init, sample_batch['y'])\n test_loss_list = []\n\n for i in range(self.num_epochs):\n rng_key, rng_key_train, rng_key_test = random.split(rng_key, 3)\n t_start = time.time()\n\n _, svi_state = self.epoch_train(rng_key_train, svi_state)\n test_loss = self.eval_test(rng_key_test, svi_state)\n test_loss_list += [test_loss]\n\n print(\n \"Epoch {}: loss = {} ({:.2f} s.)\".format(\n i, test_loss, time.time() - t_start\n )\n )\n \n if np.isnan(test_loss): break\n\n if plot_loss:\n plt.figure()\n plt.plot(np.arange(0, self.num_epochs, 1)[0:len(test_loss_list)], test_loss_list)\n plt.xlabel(\"epochs\")\n plt.ylabel(\"test error\")\n plt.savefig('src/test/plots/vae_lost.png')\n plt.show()\n plt.close()\n\n # return optimal parameters for decoder\n return self.svi.get_params(svi_state)[\"decoder$params\"]",
"def _test(self):\r\n lr, hr = self.sess.run(self.test_batch)\r\n res = self.sess.run(\r\n [self.merged,\r\n self.GAN.g_loss, self.GAN.mse_loss, self.GAN.g_ad_loss,\r\n self.GAN.d_loss, self.GAN.d_loss_real, self.GAN.d_loss_fake],\r\n feed_dict={\r\n self.GAN.g_images: lr,\r\n self.GAN.d_images: hr,\r\n self.GAN.is_training: False\r\n })\r\n\r\n return res",
"def test(self):\n self.model.eval()\n\n for step, sample in enumerate(self.test_loader):\n x, _, _ = sample\n x = x.to(self.device)\n\n y_pred = self.model.forward(x)\n loss = nn.MSELoss()(y_pred, x)\n self.test_losses.append(loss.item())\n\n if step % (max(8, len(self.test_loader)) // 8) == 0:\n out_img = torch.cat([x[0], torch.clamp(y_pred[0], 0, 1)], dim=2)\n self.sw.add_image(tag=f'sample_{step}', img_tensor=out_img, global_step=self.epoch)\n\n # log average loss on test set\n mean_test_loss = np.mean(self.test_losses)\n self.test_losses = []\n print(f'\\t● AVG Loss on TEST-set: {mean_test_loss:.6f} │ patience: ', end='')\n self.sw.add_scalar(tag='test_loss', scalar_value=mean_test_loss, global_step=self.epoch)\n\n # save best model and update training patience\n if self.best_test_loss is None or mean_test_loss < self.best_test_loss:\n self.best_test_loss = mean_test_loss\n self.patience = conf.FX_PATIENCE\n torch.save(self.model.state_dict(), self.log_path / 'best.pth')\n else:\n self.patience = self.patience - 1\n print(f'{self.patience}/{conf.FX_PATIENCE}')\n\n if self.patience == 0:\n self.show_completion_msg()",
"def test_no_trainable_parameters(self, mocker):\r\n spy = mocker.spy(qml.gradients.finite_difference, \"generate_shifted_tapes\")\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(0.543, wires=[0])\r\n qml.RY(-0.654, wires=[1])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n tape.trainable_params = {}\r\n\r\n tapes, fn = finite_diff(tape)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.size == 0\r\n assert np.all(res == np.array([[]]))\r\n\r\n spy.assert_not_called()\r\n assert len(tapes) == 0",
"def test(self):\n with torch.no_grad():\n self.forward()\n self.compute_visuals()",
"def test_distributed(self):\n self.model.eval()\n test_loss, test_correct_preds = 0, defaultdict(int)\n if self.test_loader is None: # running G2E\n self.test_loader, self.test_size, self.test_sampler = self._get_smi_dl(phase=\"test\", shuffle=False)\n self.test_sampler.set_epoch(0)\n if self.rank == 0:\n test_loader = tqdm(self.test_loader, desc='testing...')\n else:\n test_loader = self.test_loader\n \n running_topk_accs = defaultdict(lambda: np.nan)\n with torch.no_grad():\n epoch_test_size = 0\n for i, batch in enumerate(test_loader):\n batch_data = batch[0]\n if not isinstance(batch_data, tuple):\n batch_data = batch_data.cuda(non_blocking=True)\n if self.model_name == 'TransformerEBM':\n batch_data = (batch_data, 'test')\n batch_mask = batch[1].cuda(non_blocking=True)\n batch_energies = self._one_batch(\n batch_data, batch_mask, backprop=False,\n )\n test_batch_size = batch_energies.shape[0]\n test_batch_size = torch.tensor([test_batch_size]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(test_batch_size, dist.ReduceOp.SUM)\n test_batch_size = test_batch_size.item()\n epoch_test_size += test_batch_size\n\n # for validation/test data, true rxn may not be present!\n batch_idx = batch[2]\n batch_true_ranks_array = self.proposals_data['test'][batch_idx, 2].astype('int')\n batch_true_ranks_valid = batch_true_ranks_array[batch_true_ranks_array < self.args.minibatch_eval]\n batch_true_ranks = torch.as_tensor(batch_true_ranks_array).unsqueeze(dim=-1)\n # slightly tricky as we have to ignore rxns with no 'positive' rxn for loss calculation\n # (bcos nothing in the numerator, loss is undefined)\n loss_numerator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n batch_true_ranks_valid\n ]\n loss_denominator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n :\n ]\n batch_loss = (loss_numerator + torch.logsumexp(-loss_denominator, dim=1)).sum().item()\n\n for k in self.k_to_test:\n # index with lowest energy is what the model deems to be the most feasible rxn\n batch_preds = torch.topk(batch_energies, k=k, dim=1, largest=False)[1] \n batch_correct_preds = torch.where(batch_preds == batch_true_ranks)[0].shape[0]\n batch_correct_preds = torch.tensor([batch_correct_preds]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(batch_correct_preds, dist.ReduceOp.SUM)\n batch_correct_preds = batch_correct_preds.item()\n test_correct_preds[k] += batch_correct_preds\n running_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n\n if k == 1 and self.rank == 0: # overhead is only 5 ms, will check ~5 times each epoch (regardless of batch_size)\n try:\n for j in range(i * self.args.batch_size_eval, (i+1) * self.args.batch_size_eval):\n if j % (self.test_size // 5) == random.randint(0, 3) or j % (self.test_size // 8) == random.randint(0, 5): # peek at a random sample of current batch to monitor training progress\n rxn_idx = random.sample(list(range(self.args.batch_size_eval)), k=1)[0]\n rxn_true_rank = batch_true_ranks_array[rxn_idx]\n rxn_pred_rank = batch_preds[rxn_idx, 0].item()\n rxn_pred_energy = batch_energies[rxn_idx, rxn_pred_rank].item()\n rxn_true_energy = batch_energies[rxn_idx, rxn_true_rank].item() if rxn_true_rank != 9999 else 'NaN'\n rxn_orig_energy = batch_energies[rxn_idx, 0].item()\n rxn_orig_energy2 = batch_energies[rxn_idx, 1].item()\n rxn_orig_energy3 = batch_energies[rxn_idx, 2].item()\n\n rxn_true_prod = self.proposals_data['test'][batch_idx[rxn_idx], 0]\n rxn_true_prec = self.proposals_data['test'][batch_idx[rxn_idx], 1]\n rxn_cand_precs = self.proposals_data['test'][batch_idx[rxn_idx], 3:]\n rxn_pred_prec = rxn_cand_precs[batch_preds[rxn_idx]]\n rxn_orig_prec = rxn_cand_precs[0]\n rxn_orig_prec2 = rxn_cand_precs[1]\n rxn_orig_prec3 = rxn_cand_precs[2]\n logging.info(f'\\ntrue product: \\t\\t\\t\\t{rxn_true_prod}')\n logging.info(f'pred precursor (rank {rxn_pred_rank}, energy = {rxn_pred_energy:+.4f}):\\t\\t\\t{rxn_pred_prec}')\n if rxn_true_energy == 'NaN':\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy}):\\t\\t\\t\\t{rxn_true_prec}')\n else:\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy:+.4f}):\\t\\t\\t{rxn_true_prec}')\n logging.info(f'orig precursor (rank 0, energy = {rxn_orig_energy:+.4f}):\\t\\t\\t{rxn_orig_prec}')\n logging.info(f'orig precursor (rank 1, energy = {rxn_orig_energy2:+.4f}):\\t\\t\\t{rxn_orig_prec2}')\n logging.info(f'orig precursor (rank 2, energy = {rxn_orig_energy3:+.4f}):\\t\\t\\t{rxn_orig_prec3}\\n')\n break\n except Exception as e:\n tb_str = traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)\n logging.info(\"\".join(tb_str))\n logging.info('\\nIndex out of range (last minibatch)')\n \n batch_loss = torch.tensor([batch_loss]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(batch_loss, dist.ReduceOp.SUM)\n batch_loss = batch_loss.item()\n test_loss += batch_loss\n if self.rank == 0:\n test_loader.set_description(f\"testing...loss={test_loss / test_batch_size:.4f}, top-1 acc={running_topk_accs[1]:.4f}, top-5 acc={running_topk_accs[5]:.4f}, top-10 acc={running_topk_accs[10]:.4f}\")\n test_loader.refresh()\n \n for k in self.k_to_test:\n self.test_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n \n dist.barrier()\n message = f\"{self.args.expt_name}\\n\"\n if self.rank == 0:\n logging.info(f'\\nTest loss: {test_loss / epoch_test_size:.4f}')\n for k in self.k_to_test:\n this_topk_message = f'Test top-{k} accuracy: {100 * self.test_topk_accs[k]:.3f}%'\n logging.info(this_topk_message)\n message += this_topk_message + '\\n'\n try:\n send_message(message)\n except Exception as e:\n pass",
"def test_transformer2d_single_step_e2e(self):\n\n problem_object = allen_brain.Img2imgAllenBrainDim8to32()\n\n with TemporaryDirectory() as tmp_dir:\n\n mock_raw_data(tmp_dir, raw_dim=256, num_images=100)\n\n with TemporaryDirectory() as data_dir:\n\n problem_object.generate_data(data_dir, tmp_dir)\n\n input_xy_dim = problem_object.input_dim\n target_xy_dim = problem_object.output_dim\n num_channels = problem_object.num_channels\n\n hparams = image_transformer_2d.img2img_transformer2d_tiny()\n hparams.data_dir = data_dir\n\n p_hparams = problem_object.get_hparams(hparams)\n\n model = image_transformer_2d.Img2imgTransformer(\n hparams, tf.estimator.ModeKeys.TRAIN, p_hparams\n )\n\n @tfe.implicit_value_and_gradients\n def loss_fn(features):\n _, losses = model(features)\n return losses[\"training\"]\n\n batch_size = 1\n train_dataset = problem_object.dataset(Modes.TRAIN, data_dir)\n train_dataset = train_dataset.repeat(None).batch(batch_size)\n\n optimizer = tf.train.AdamOptimizer()\n\n example = tfe.Iterator(train_dataset).next()\n example[\"targets\"] = tf.reshape(example[\"targets\"],\n [batch_size,\n target_xy_dim,\n target_xy_dim,\n num_channels])\n _, gv = loss_fn(example)\n optimizer.apply_gradients(gv)\n\n model.set_mode(Modes.EVAL)\n dataset = problem_object.dataset(Modes.EVAL, data_dir)\n\n example = tfe.Iterator(dataset).next()\n example[\"inputs\"] = tf.reshape(example[\"inputs\"],\n [1,\n input_xy_dim,\n input_xy_dim,\n num_channels])\n example[\"targets\"] = tf.reshape(example[\"targets\"],\n [1,\n target_xy_dim,\n target_xy_dim,\n num_channels])\n\n predictions, _ = model(example)\n\n self.assertEqual(predictions.numpy().shape,\n (1,\n target_xy_dim,\n target_xy_dim,\n num_channels,\n 256))",
"def test_tpr_fwer(self, syn_genomic_data, syn_labels, syn_labels_0based, syn_labels_cat, syn_fm, syn_idx, rep, syn_true_pvalues):\n\n window_lengths = [35]\n\n best_params_montaez = {'epochs': 500, 'l1_reg': 0.001, 'l2_reg': 0.0001,'lr' :1e-05, 'dropout_rate':0.3, 'hidden_neurons':64, 'n_snps': n_total_snps}\n\n # n_permutations = 2\n\n def combi_compute_pvalues(d, x, fm, l,filter_window_size,pf,ps,k):\n #clf, syn_genomic_data[str(i)][:], fm_2d[str(i)][:], syn_labels[str(i)], 35, 2, 2, 30\n idx, pvalues, _ = combi_method(d, x,fm, l,filter_window_size,pf,ps,k)\n\t\t\t#combi_method(classifier,data, fm, labels, filter_window_size, pnorm_filter, psvm, top_k)\n pvalues_filled = np.ones(n_total_snps)\n pvalues_filled[idx] = pvalues\n del d, l\n return pvalues_filled\n\n def challenger_compute_pvalues(d, x, l_0b, l, idx):\n is_only_zeros = False\n with tensorflow.Session().as_default():\n\n model = create_montaez_dense_model(best_params_montaez)\n\n model.fit(x=x[idx.train], y=l_0b[idx.train],\n validation_data=(x[idx.test], l_0b[idx.test]),\n epochs=best_params_montaez['epochs'],\n callbacks=[\n ReduceLROnPlateau(monitor='val_loss',\n mode='min'),\n ])\n\n model = iutils.keras.graph.model_wo_softmax(model)\n analyzer = innvestigate.analyzer.LRPAlpha2Beta1(model)\n weights = analyzer.analyze(x).sum(0)\n\n if np.max(abs(weights)) < 0.005:\n fig, axes = plt.subplots(1)\n is_only_zeros = True\n axes.plot(np.absolute(weights).sum(axis=1))\n fig.savefig(os.path.join(IMG_DIR, 'test.png'))\n\n pvalues_list = np.zeros((len(window_lengths), weights.shape[0]))\n for i, filter_size in enumerate(window_lengths):\n top_indices_sorted, _ = postprocess_weights(\n weights, top_k, filter_size, p_svm, p_pnorm_filter)\n pvalues = chi_square(d[:, top_indices_sorted], l)\n pvalues_filled = np.ones(n_total_snps)\n pvalues_filled[top_indices_sorted] = pvalues\n pvalues_list[i] = pvalues_filled\n del d, x, l\n\n return pvalues_list, is_only_zeros\n\n fm_2d = syn_fm(\"2d\")\n fm_3d = syn_fm(\"3d\")\n clf = LinearSVC(penalty='l2', loss='hinge', C=1.0000e-05, dual=True, tol=1e-3, verbose=0)\n\n pvalues_per_run_combi = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(\n combi_compute_pvalues)(clf, syn_genomic_data[str(i)][:], fm_2d[str(i)][:], syn_labels[str(i)], 35, 2, 2, 30) for i in tqdm(range(rep))))\n\n pvalues_per_run_rpvt = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(\n chi_square)(syn_genomic_data[str(i)][:], syn_labels[str(i)]) for i in tqdm(range(rep))))\n\n # len(thresholds) * len(window_sizes) * 10020\n a = Parallel(n_jobs=-1, require='sharedmem')(delayed(\n challenger_compute_pvalues)(syn_genomic_data[str(i)][:], fm_3d[str(i)][:], syn_labels_cat[str(i)], syn_labels[str(i)], syn_idx[str(i)]) for i in tqdm(range(rep)))\n\n # INNvestigate bugfix\n zeros_index = np.array(list(np.array(a)[:, 1]))\n pvalues_per_run_dense = np.array(list(np.array(a)[:, 0]))\n\n pvalues_per_run_combi = pvalues_per_run_combi[np.logical_not(zeros_index)]\n pvalues_per_run_dense = pvalues_per_run_dense[np.logical_not(zeros_index)]\n pvalues_per_run_rpvt = pvalues_per_run_rpvt[np.logical_not(zeros_index)]\n true_pvalues = syn_true_pvalues[np.logical_not(zeros_index)]\n\n # COMBI\n res_combi = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(pvalues_per_run_combi, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n tpr_combi, _, fwer_combi, precision_combi = res_combi.T\n\n\n # T_star - WARNING TAKES FOREVER\n tpr_permuted = 0\n fwer_permuted = 0\n precision_permuted = 0\n\n \"\"\"\n for i in range(rep):\n with tensorflow.Session().as_default():\n\n model = create_montaez_dense_model_2(best_params_montaez_2)\n t_star = permuted_deepcombi_method(model, h5py_data[str(i)][:], fm_3d[str(i)][:], labels[str(i)], labels_cat[str(i)], n_permutations, alpha_sig_toy, filter_window_size, top_k, mode='all' )\n ground_truth = np.zeros((1,n_total_snps),dtype=bool)\n ground_truth[:,5000:5020] = True\n tpr, _, fwer, precision = compute_metrics(pvalues_per_run_rpvt[i], ground_truth, t_star) \n tpr_permuted += tpr\n fwer_permuted += fwer\n precision_permuted += precision\n tpr_permuted/=rep\n fwer_permuted/=rep\n precision_permuted/=rep\n \"\"\"\n\n # RPVT\n\n res_rpvt = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(\n pvalues_per_run_rpvt, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n\n tpr_rpvt, _, fwer_rpvt, precision_rpvt = res_rpvt.T\n\n # Plot\n fig, axes = plt.subplots(2)\n fig.set_size_inches(18.5, 10.5)\n ax1, ax2 = axes\n\n ax1.set_ylim(0, 0.45)\n ax1.set_xlim(0, 0.1)\n\n ax1.set_ylabel('TPR')\n ax1.set_xlabel('FWER')\n ax1.plot(fwer_combi, tpr_combi, '-o',\n label='Combi')\n ax1.plot(fwer_rpvt, tpr_rpvt, '-o',\n label='RPVT')\n #ax1.plot(fwer_permuted, tpr_permuted, '-x',\n # label='COMBI & permuted threshold - ttbr={}'.format(ttbr))\n\n ax2.set_ylabel('Precision')\n ax2.set_xlabel('TPR')\n ax2.plot(tpr_combi, precision_combi, '-o',\n label='Combi')\n ax2.plot(tpr_rpvt, precision_rpvt, '-o',\n label='RPVT')\n #ax2.plot(tpr_permuted, precision_permuted, '-x',\n # label='COMBI & permuted threshold - ttbr={}'.format(ttbr))\n\n # Save results\n np.save(os.path.join(NUMPY_ARRAYS, 'combi-tpr-{}'.format(ttbr)), tpr_combi)\n np.save(os.path.join(NUMPY_ARRAYS, 'combi-fwer-{}'.format(ttbr)), fwer_combi)\n np.save(os.path.join(NUMPY_ARRAYS, 'combi-precision-{}'.format(ttbr)), precision_combi)\n np.save(os.path.join(NUMPY_ARRAYS, 'permuted-avg-tpr-pt{}'.format(ttbr)), tpr_permuted)\n np.save(os.path.join(NUMPY_ARRAYS, 'permuted-avg-fwer-pt{}'.format(ttbr)), fwer_permuted)\n np.save(os.path.join(NUMPY_ARRAYS, 'permuted-avg-precision-pt{}'.format(ttbr)), precision_permuted)\n\n np.save(os.path.join(NUMPY_ARRAYS, 'rpvt-tpr-{}'.format(ttbr)), tpr_rpvt)\n np.save(os.path.join(NUMPY_ARRAYS, 'rpvt-fwer-{}'.format(ttbr)), fwer_rpvt)\n np.save(os.path.join(NUMPY_ARRAYS, 'rpvt-precision-{}'.format(ttbr)), precision_rpvt)\n\n # CHALLENGER\n for i, window in enumerate(window_lengths):\n pvalues_challenger = pvalues_per_run_dense[:, i]\n\n res_dense = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(\n pvalues_challenger, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n\n tpr_dense, _, fwer_dense, precision_dense = res_dense.T\n np.save(os.path.join(NUMPY_ARRAYS, 'tpr-{}-{}'.format(window, ttbr)), tpr_dense)\n np.save(os.path.join(NUMPY_ARRAYS, 'fwer-{}-{}'.format(window, ttbr)), fwer_dense)\n np.save(os.path.join(NUMPY_ARRAYS, 'precision-{}-{}'.format(window, ttbr)), precision_dense)\n assert fwer_combi.max() <= 1 and fwer_combi.min() >= 0\n ax1.plot(fwer_dense, tpr_dense, '-x', label='DeepCOMBI')\n ax2.plot(tpr_dense, precision_dense, '-x', label='DeepCOMBI')\n\n ax1.legend()\n ax2.legend()\n fig.savefig(\n os.path.join(IMG_DIR, 'tpr_fwer_montaez_combi_newsettings.png'.format(zeros_index.sum())),\n dpi=300)",
"def test(self):\n\t\ttest_rewards = []\n\t\tobs = self.env.reset()\n\t\tdone_test = False\n\t\tfor t in range(self.test_cycles):\n\t\t\t# to render or not to render\n\t\t\tif self.render_test:\n\t\t\t\tenv.render()\n\n\t\t\tcycle_rewards = 0\n\t\t\twhile not done_test:\n\t\t\t\tfeed_dict_test = {self.va_input: obs}\n\t\t\t\taction_test = self.sess.run(self.va_out, feed_dict = feed_dict_test)\n\t\t\t\taction_test = self.sess.run(tf.argmax(action_test))\n\t\t\t\tobs_test, r_test, done_test,_ = env.step(action_test)\n\t\t\t\tcycle_rewards += r_test\n\n\t\t\ttest_rewards.append(cycle_rewards)\n\n\t\treturn test_rewards"
] | [
"0.65215564",
"0.6213105",
"0.6165407",
"0.613751",
"0.6076847",
"0.5993722",
"0.59619707",
"0.5953103",
"0.594722",
"0.59443706",
"0.59269637",
"0.59047973",
"0.5888924",
"0.586419",
"0.58521694",
"0.58508056",
"0.5788468",
"0.57867897",
"0.57517517",
"0.57516396",
"0.5741047",
"0.5730047",
"0.5723718",
"0.57094395",
"0.5700751",
"0.5687346",
"0.567767",
"0.56641716",
"0.56459177",
"0.56381935"
] | 0.7003118 | 0 |
Assert that the refund request event works. | async def test_refund_request(app, session, stan_server, event_loop, client_id, events_stan, future):
# Call back for the subscription
from account_mailer.worker import cb_subscription_handler
# vars
invoice_id = '1'
events_subject = 'test_subject'
events_queue = 'test_queue'
events_durable_name = 'test_durable'
# register the handler to test it
await subscribe_to_queue(events_stan,
events_subject,
events_queue,
events_durable_name,
cb_subscription_handler)
# add an event to queue
mail_details = {
'identifier': 'NR 123456789',
'orderNumber': '1',
'transactionDateTime': '2020-12-12 14:10:20',
'transactionAmount': 50.00,
'transactionId': 'REG1234'
}
await helper_add_ref_req_to_queue(events_stan, events_subject, invoice_id=invoice_id, mail_details=mail_details)
assert True # If no errors, we assumed test passed.
# Test drawdown refund
mail_details = {
'identifier': 'NR 123456789',
'orderNumber': '1',
'transactionDateTime': '2020-12-12 14:10:20',
'transactionAmount': 50.00,
'transactionId': 'REG1234',
'refunDate': '2000-01-01',
'bcolAccount': '12345',
'bcolUser': '009900'
}
await helper_add_ref_req_to_queue(events_stan, events_subject, invoice_id=invoice_id, mail_details=mail_details,
pay_method='drawdown') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_refund_request(app, session, stan_server, event_loop, client_id, events_stan, future):\n # Call back for the subscription\n from account_mailer.worker import cb_subscription_handler\n\n # vars\n invoice_id = '1'\n\n events_subject = 'test_subject'\n events_queue = 'test_queue'\n events_durable_name = 'test_durable'\n\n # register the handler to test it\n await subscribe_to_queue(events_stan,\n events_subject,\n events_queue,\n events_durable_name,\n cb_subscription_handler)\n\n # add an event to queue\n mail_details = {\n 'identifier': 'NR 123456789',\n 'orderNumber': '1',\n 'transactionDateTime': '2020-12-12 14:10:20',\n 'transactionAmount': 50.00,\n 'transactionId': 'REG1234'\n }\n await helper_add_ref_req_to_queue(events_stan, events_subject, invoice_id=invoice_id, mail_details=mail_details)\n\n assert True # If no errors, we assumed test passed.",
"def test_handle_notify_request_success(\n bambora_provider_base_config,\n order: Order,\n):\n order.order_number = \"abc123\"\n order.status = OrderStatus.PAID\n order.lease.status = LeaseStatus.PAID\n order.lease.save()\n order.save()\n refund = OrderRefundFactory(\n order=order, refund_id=\"1234567\", amount=order.total_price\n )\n\n rf = RequestFactory()\n request = rf.get(\"/payments/notify_refund/\", notify_success_params)\n payment_provider = create_bambora_provider(bambora_provider_base_config, request)\n\n assert refund.status == OrderRefundStatus.PENDING\n\n returned = payment_provider.handle_notify_refund_request()\n\n refund = OrderRefund.objects.get(refund_id=notify_success_params.get(\"REFUND_ID\"))\n order = refund.order\n\n assert refund.status == OrderRefundStatus.ACCEPTED\n assert order.status == OrderStatus.REFUNDED\n assert order.lease.status == LeaseStatus.TERMINATED\n\n assert isinstance(returned, HttpResponse)\n assert returned.status_code == 204",
"def test_initiate_refund_another_pending_refund(\n bambora_provider_base_config: dict, order: Order\n):\n request = RequestFactory().request()\n order.status = OrderStatus.PAID\n order.lease.status = LeaseStatus.PAID\n order.lease.save()\n order.save()\n OrderRefundFactory(order=order, status=OrderRefundStatus.PENDING)\n\n OrderToken.objects.create(\n order=order, token=\"98765\", valid_until=now() - relativedelta(hours=1)\n )\n OrderToken.objects.create(\n order=order, token=\"12345\", valid_until=now() - relativedelta(days=7)\n )\n\n products = [\n {\n \"id\": \"123123123\",\n \"product_id\": 1123,\n \"title\": order.product.name,\n \"count\": 1,\n \"pretax_price\": 100,\n \"tax\": 24,\n \"price\": 100,\n \"type\": 1,\n }\n ]\n\n assert OrderRefund.objects.count() == 1\n\n payment_provider = create_bambora_provider(bambora_provider_base_config, request)\n with mock.patch(\n \"payments.providers.bambora_payform.requests.post\",\n side_effect=mocked_refund_response_create,\n ), mock.patch(\n \"payments.providers.bambora_payform.BamboraPayformProvider.get_payment_details\",\n side_effect=mocked_refund_payment_details(products=products),\n ), pytest.raises(\n ValidationError\n ) as exception:\n payment_provider.initiate_refund(order)\n\n assert \"Cannot refund an order that has another pending refund\" in str(exception)\n assert OrderRefund.objects.count() == 1",
"def test_initiate_refund_refunded_amount_does_not_match(\n bambora_provider_base_config: dict, order: Order\n):\n request = RequestFactory().request()\n order.status = OrderStatus.PAID\n order.lease.status = LeaseStatus.PAID\n order.lease.save()\n order.save()\n\n OrderToken.objects.create(\n order=order, token=\"98765\", valid_until=now() - relativedelta(hours=1)\n )\n OrderToken.objects.create(\n order=order, token=\"12345\", valid_until=now() - relativedelta(days=7)\n )\n place_price = order.total_price + 10\n\n products = [\n {\n \"id\": \"123123123\",\n \"product_id\": 1123,\n \"title\": order.product.name,\n \"count\": 1,\n \"pretax_price\": price_as_fractional_int(\n convert_aftertax_to_pretax(place_price, order.product.tax_percentage)\n ),\n \"tax\": int(order.product.tax_percentage),\n \"price\": price_as_fractional_int(place_price),\n \"type\": 1,\n }\n ]\n\n payment_provider = create_bambora_provider(bambora_provider_base_config, request)\n with mock.patch(\n \"payments.providers.bambora_payform.requests.post\",\n side_effect=mocked_refund_response_create,\n ), mock.patch(\n \"payments.providers.bambora_payform.BamboraPayformProvider.get_payment_details\",\n side_effect=mocked_refund_payment_details(products=products),\n ), pytest.raises(\n RefundPriceError\n ) as exception:\n payment_provider.initiate_refund(order)\n\n assert (\n f\"The amount to be refunded ({currency_format(place_price)}) \"\n f\"does not match the amount paid ({currency_format(order.total_price)})\"\n ) in str(exception)\n assert not OrderRefund.objects.exists()",
"def test_handle_initiate_refund_error_validation(order, bambora_provider_base_config):\n r = {\"result\": 1, \"type\": \"e-payment\", \"errors\": [\"Invalid auth code\"]}\n OrderToken.objects.create(\n order=order, token=\"12345\", valid_until=now() - relativedelta(days=7)\n )\n order.status = OrderStatus.PAID\n order.customer_email = \"[email protected]\"\n order.lease.status = LeaseStatus.PAID\n order.lease.save()\n order.save()\n request = RequestFactory().request()\n\n payment_provider = create_bambora_provider(bambora_provider_base_config, request)\n\n with mock.patch(\n \"payments.providers.bambora_payform.requests.post\",\n side_effect=[MockResponse(data=r)],\n ):\n with pytest.raises(PayloadValidationError):\n payment_provider.initiate_refund(order)",
"def test_handle_notify_request_payment_failed(bambora_provider_base_config, order):\n order.order_number = \"abc123\"\n order.status = OrderStatus.PAID\n order.save()\n refund = OrderRefundFactory(\n order=order, refund_id=\"1234567\", amount=order.total_price\n )\n\n params = {\n \"AUTHCODE\": \"8CF2D0EA9947D09B707E3C2953EF3014F1AD12D2BB0DCDBAC3ABD4601B50462B\",\n \"RETURN_CODE\": \"1\",\n \"REFUND_ID\": \"1234567\",\n }\n\n rf = RequestFactory()\n request = rf.get(\"/payments/notify_refund/\", params)\n payment_provider = create_bambora_provider(bambora_provider_base_config, request)\n\n assert refund.status == OrderRefundStatus.PENDING\n lease_status = refund.order.lease.status\n\n returned = payment_provider.handle_notify_refund_request()\n\n refund = OrderRefund.objects.get(refund_id=params.get(\"REFUND_ID\"))\n order = refund.order\n\n assert refund.status == OrderRefundStatus.REJECTED\n # The order status shouldn't change\n assert order.status == OrderStatus.PAID\n assert order.lease.status == lease_status\n\n assert isinstance(returned, HttpResponse)\n assert returned.status_code == 204",
"def test_initiate_refund_invalid_lease_status(\n bambora_provider_base_config: dict, order: Order, lease_status\n):\n request = RequestFactory().request()\n order.status = OrderStatus.PAID\n order.lease.status = lease_status\n order.lease.save()\n order.save()\n\n OrderToken.objects.create(\n order=order, token=\"98765\", valid_until=now() - relativedelta(hours=1)\n )\n OrderToken.objects.create(\n order=order, token=\"12345\", valid_until=now() - relativedelta(days=7)\n )\n\n payment_provider = create_bambora_provider(bambora_provider_base_config, request)\n with pytest.raises(ValidationError) as exception:\n payment_provider.initiate_refund(order)\n\n assert \"Cannot refund an order that is not paid\" in str(exception)",
"def test_initiate_refund_success(bambora_provider_base_config: dict, order: Order):\n request = RequestFactory().request()\n order.status = OrderStatus.PAID\n order.lease.status = LeaseStatus.PAID\n order.lease.save()\n order.save()\n\n OrderToken.objects.create(\n order=order, token=\"98765\", valid_until=now() - relativedelta(hours=1)\n )\n valid_token = OrderToken.objects.create(\n order=order, token=\"12345\", valid_until=now() - relativedelta(days=7)\n )\n if hasattr(order.product, \"price_for_tier\"):\n place_price = order.product.price_for_tier(order.lease.berth.pier.price_tier)\n area = order.lease.berth.pier.harbor\n else:\n # Winter products are priced per m2\n place_price = rounded(\n order.product.price_value\n * order.lease.place.place_type.width\n * order.lease.place.place_type.length,\n )\n area = order.lease.place.winter_storage_section.area\n\n products = [\n {\n \"id\": get_talpa_product_id(order.product.id, area, False),\n \"product_id\": 1123,\n \"title\": order.product.name,\n \"count\": 1,\n \"pretax_price\": price_as_fractional_int(\n convert_aftertax_to_pretax(place_price, order.product.tax_percentage)\n ),\n \"tax\": int(order.product.tax_percentage),\n \"price\": str(price_as_fractional_int(place_price)),\n \"type\": 1,\n }\n ]\n\n payment_provider = create_bambora_provider(bambora_provider_base_config, request)\n with mock.patch(\n \"payments.providers.bambora_payform.requests.post\",\n side_effect=mocked_refund_response_create,\n ) as mock_call, mock.patch(\n \"payments.providers.bambora_payform.BamboraPayformProvider.get_payment_details\",\n side_effect=mocked_refund_payment_details(products=products),\n ):\n refund = payment_provider.initiate_refund(order)\n\n assert refund.refund_id == \"123456\"\n assert refund.order == order\n assert refund.status == OrderRefundStatus.PENDING\n assert refund.amount == order.total_price\n\n args = mock_call.call_args.kwargs.get(\"json\")\n assert (\n args.get(\"order_number\")\n == f\"{order.order_number}-{valid_token.created_at.timestamp()}\"\n )",
"def test_refund_cert_callback_before_expiration_email(self):\r\n course = CourseFactory.create(org='refund_before_expiration', number='test', run='course', display_name='one')\r\n course_key = course.id\r\n many_days = datetime.timedelta(days=60)\r\n\r\n course_mode = CourseMode(course_id=course_key,\r\n mode_slug=\"verified\",\r\n mode_display_name=\"verified cert\",\r\n min_price=self.cost,\r\n expiration_datetime=datetime.datetime.now(pytz.utc) + many_days)\r\n course_mode.save()\r\n\r\n CourseEnrollment.enroll(self.user, course_key, 'verified')\r\n cart = Order.get_cart_for_user(user=self.user)\r\n CertificateItem.add_to_order(cart, course_key, self.cost, 'verified')\r\n cart.purchase()\r\n\r\n mail.outbox = []\r\n with patch('shoppingcart.models.log.error') as mock_error_logger:\r\n CourseEnrollment.unenroll(self.user, course_key)\r\n self.assertFalse(mock_error_logger.called)\r\n self.assertEquals(len(mail.outbox), 1)\r\n self.assertEquals('[Refund] User-Requested Refund', mail.outbox[0].subject)\r\n self.assertEquals(settings.PAYMENT_SUPPORT_EMAIL, mail.outbox[0].from_email)\r\n self.assertIn('has requested a refund on Order', mail.outbox[0].body)",
"def test_refund_with_applied_refund(self):\n # Invoice 700.00\n debit_jobs(\n [(self.job, A(680), Entry.WORK_DEBIT), (self.job2, A(20), Entry.WORK_DEBIT)]\n )\n\n # Payment of 700.00 is incorrectly applied to first job\n credit_jobs([(self.job, A(700), A(0), A(0))], D(700))\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(-20),\n promised=A(0),\n debited=A(\n 680\n ), # invoice debit (680) + refund debit (0) = total debited (680)\n invoiced=A(\n 680\n ), # invoice debit (680) + adjustment (0) = total invoiced (680)\n paid=A(-700), # payment credit (-700) + refund debit (0) = paid (-700)\n credited=A(\n -700\n ), # payment credit (-700) + adjustment (0) = total credited (-700)\n partial=A(700).net_amount,\n tax=A(700).tax_amount,\n )\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(20),\n promised=A(0),\n debited=A(20), # invoice debit (20) + refund debit (0) = total debited (20)\n invoiced=A(20), # invoice debit (20) + adjustment (0) = total invoiced (20)\n paid=A(0), # payment credit (0) + refund debit (0) = paid (0)\n credited=A(0), # payment credit (0) + adjustment (0) = total credited (0)\n partial=A(700).net_amount,\n tax=A(700).tax_amount,\n switch_to_job=self.job2,\n )\n\n # Refund 20.00 from first job and apply to second job\n refund_jobs([(self.job, A(20), A(0)), (self.job2, A(0), A(20))])\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(0),\n promised=A(0),\n debited=A(\n 700\n ), # invoice debit (680) + refund debit (20) = total debited (700)\n invoiced=A(\n 680\n ), # invoice debit (680) + adjustment (0) = total invoiced (680)\n paid=A(-680), # payment credit (-700) + refund debit (20) = paid (-680)\n credited=A(\n -700\n ), # payment credit (-700) + adjustment (0) = total credited (-700)\n partial=A(700).net_amount,\n tax=A(700).tax_amount,\n )\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(0),\n promised=A(0),\n debited=A(\n 20\n ), # invoice debit (20) + refund debit (20) = total debited (70)\n invoiced=A(20), # invoice debit (20) + adjustment (0) = total invoiced (20)\n paid=A(-20), # payment credit (-20) + refund debit (0) = paid (-20)\n credited=A(\n -20\n ), # payment credit (-20) + adjustment (0) = total credited (-20)\n partial=A(700).net_amount,\n tax=A(700).tax_amount,\n switch_to_job=self.job2,\n )",
"def handle_notify_refund_request(self) -> None:\n raise NotImplementedError",
"def refund_payment(self, **kwargs):",
"def refund_payment(self, **kwargs):",
"def test_initiate_refund_token_still_valid(\n bambora_provider_base_config: dict, order: Order\n):\n request = RequestFactory().request()\n order.status = OrderStatus.PAID\n order.lease.status = LeaseStatus.PAID\n order.lease.save()\n order.save()\n\n OrderToken.objects.create(\n order=order, token=\"12345\", valid_until=now() - relativedelta(days=7)\n )\n OrderToken.objects.create(\n order=order, token=\"98765\", valid_until=now() + relativedelta(hours=1)\n )\n\n payment_provider = create_bambora_provider(bambora_provider_base_config, request)\n with pytest.raises(ValidationError) as exception:\n payment_provider.initiate_refund(order)\n\n assert \"Cannot refund an order that has not been settled yet (active token)\" in str(\n exception\n )",
"def test_refund_with_bank_refund(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(650), A(0), A(0))], D(650))\n refund_jobs([(self.job, A(50), A(0))])\n self.assert_balances(\n bank=A(600, 0, 0),\n balance=A(0),\n promised=A(0),\n debited=A(\n 650\n ), # invoice debit (600) + refund debit (50) = total debited (650)\n invoiced=A(600), # invoice debit (600) = total invoiced (600)\n paid=A(-600), # payment credit (-650) + refund (50) = paid (-600)\n credited=A(\n -650\n ), # payment credit (-650) + adjustment (0) = credited (-650)\n partial=A(600).net_amount,\n tax=A(600).tax_amount,\n )",
"def test_initiate_refund_invalid_order_status(\n bambora_provider_base_config: dict, order: Order, order_status\n):\n request = RequestFactory().request()\n order.status = order_status\n order.save()\n\n OrderToken.objects.create(\n order=order, token=\"98765\", valid_until=now() - relativedelta(hours=1)\n )\n OrderToken.objects.create(\n order=order, token=\"12345\", valid_until=now() - relativedelta(days=7)\n )\n\n payment_provider = create_bambora_provider(bambora_provider_base_config, request)\n with pytest.raises(ValidationError) as exception:\n payment_provider.initiate_refund(order)\n\n assert \"Cannot refund an order that is not paid\" in str(exception)",
"def test_initiate_refund_no_order_email(\n bambora_provider_base_config: dict, order: Order\n):\n request = RequestFactory().request()\n order.status = OrderStatus.PAID\n order.email = None\n order.lease.status = LeaseStatus.PAID\n order.lease.save()\n order.save()\n\n OrderToken.objects.create(\n order=order, token=\"12345\", valid_until=now() - relativedelta(days=7)\n )\n\n if hasattr(order.product, \"price_for_tier\"):\n place_price = order.product.price_for_tier(order.lease.berth.pier.price_tier)\n area = order.lease.berth.pier.harbor\n else:\n # Winter products are priced per m2\n place_price = rounded(\n order.product.price_value\n * order.lease.place.place_type.width\n * order.lease.place.place_type.length,\n )\n area = order.lease.place.winter_storage_section.area\n\n products = [\n {\n \"id\": get_talpa_product_id(order.product.id, area, False),\n \"product_id\": 1123,\n \"title\": order.product.name,\n \"count\": 1,\n \"pretax_price\": price_as_fractional_int(\n convert_aftertax_to_pretax(place_price, order.product.tax_percentage)\n ),\n \"tax\": int(order.product.tax_percentage),\n \"price\": price_as_fractional_int(place_price),\n \"type\": 1,\n }\n ]\n\n payment_provider = create_bambora_provider(bambora_provider_base_config, request)\n with mock.patch(\n \"payments.providers.bambora_payform.requests.post\",\n side_effect=mocked_refund_response_create,\n ), mock.patch(\n \"payments.providers.bambora_payform.BamboraPayformProvider.get_payment_details\",\n side_effect=mocked_refund_payment_details(products=products),\n ):\n refund = payment_provider.initiate_refund(order)\n\n assert refund.refund_id == \"123456\"",
"def test_refund_with_applied_refund_and_bank_refund(self):\n # Invoice 600.00\n debit_jobs(\n [(self.job, A(580), Entry.WORK_DEBIT), (self.job2, A(20), Entry.WORK_DEBIT)]\n )\n\n # Payment of 700.00 is incorrectly applied to first job\n credit_jobs([(self.job, A(700), A(0), A(0))], D(700))\n\n one = A(n=\"-0.01\", t=\"0.01\")\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(\"-120\") + one,\n promised=A(-100) + one,\n debited=A(\n 580\n ), # invoice debit (680) + refund debit (0) = total debited (680)\n invoiced=A(\n 580\n ), # invoice debit (680) + adjustment (0) = total invoiced (680)\n paid=A(-700), # payment credit (-700) + refund debit (0) = paid (-700)\n credited=A(\n -700\n ), # payment credit (-700) + adjustment (0) = total credited (-700)\n partial=A(700).net_amount,\n tax=A(700).tax_amount,\n )\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(20),\n promised=A(-100) + one,\n debited=A(20), # invoice debit (20) + refund debit (0) = total debited (20)\n invoiced=A(20), # invoice debit (20) + adjustment (0) = total invoiced (20)\n paid=A(0), # payment credit (0) + refund debit (0) = paid (0)\n credited=A(0), # payment credit (0) + adjustment (0) = total credited (0)\n partial=A(700).net_amount,\n tax=A(700).tax_amount,\n switch_to_job=self.job2,\n )\n\n # Refund 20.00 from first job and apply to second job\n refund_jobs([(self.job, A(120) - one, A(0)), (self.job2, A(0), A(20))])\n\n self.assert_balances(\n bank=A(600, 0, 0),\n balance=A(0),\n promised=A(0),\n debited=A(\n 700\n ), # invoice debit (680) + refund debit (20) = total debited (700)\n invoiced=A(\n 580\n ), # invoice debit (680) + adjustment (0) = total invoiced (680)\n paid=A(-580), # payment credit (-700) + refund debit (20) = paid (-680)\n credited=A(\n -700\n ), # payment credit (-700) + adjustment (0) = total credited (-700)\n partial=A(600).net_amount,\n tax=A(600).tax_amount,\n )\n\n self.assert_balances(\n bank=A(600, 0, 0),\n balance=A(0),\n promised=A(0),\n debited=A(\n 20\n ), # invoice debit (20) + refund debit (20) = total debited (70)\n invoiced=A(20), # invoice debit (20) + adjustment (0) = total invoiced (20)\n paid=A(-20), # payment credit (-20) + refund debit (0) = paid (-20)\n credited=A(\n -20\n ), # payment credit (-20) + adjustment (0) = total credited (-20)\n partial=A(600).net_amount,\n tax=A(600).tax_amount,\n switch_to_job=self.job2,\n )",
"def test_pending_request_accepted_with_event(self):\n data = {\n 'slug': '2016-06-30-test-event',\n 'host': Organization.objects.first().pk,\n 'tags': [1],\n 'invoice_status': 'unknown',\n }\n rv = self.client.post(\n reverse('eventrequest_accept_event', args=[self.er1.pk]),\n data)\n self.assertEqual(rv.status_code, 302)\n request = Event.objects.get(slug='2016-06-30-test-event').eventrequest\n self.assertEqual(request, self.er1)",
"def test_refund_with_applied_refund_and_bank_refund_and_recognized_revenue(self):\n\n # Invoice 600.00\n debit_jobs(\n [\n (self.job, A(580), Entry.WORK_DEBIT),\n (self.job2, A(20), Entry.WORK_DEBIT),\n ],\n recognize_revenue=True,\n )\n\n # Payment of 700.00 is incorrectly applied to first job\n credit_jobs([(self.job, A(700), A(0), A(0))], D(700))\n\n one = A(n=\"-0.01\", t=\"0.01\")\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(\"-120\") + one,\n debited=A(\n 580\n ), # invoice debit (680) + refund debit (0) = total debited (680)\n invoiced=A(\n 580\n ), # invoice debit (680) + adjustment (0) = total invoiced (680)\n paid=A(-700), # payment credit (-700) + refund debit (0) = paid (-700)\n credited=A(\n -700\n ), # payment credit (-700) + adjustment (0) = total credited (-700)\n income=A(600).net_amount,\n tax=A(600).tax_amount,\n )\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(20),\n debited=A(20), # invoice debit (20) + refund debit (0) = total debited (20)\n invoiced=A(20), # invoice debit (20) + adjustment (0) = total invoiced (20)\n paid=A(0), # payment credit (0) + refund debit (0) = paid (0)\n credited=A(0), # payment credit (0) + adjustment (0) = total credited (0)\n income=A(600).net_amount,\n tax=A(600).tax_amount,\n switch_to_job=self.job2,\n )\n\n # Refund 20.00 from first job and apply to second job\n refund_jobs([(self.job, A(120) - one, A(0)), (self.job2, A(0), A(20))])\n\n self.assert_balances(\n bank=A(600, 0, 0),\n balance=A(0),\n debited=A(\n 700\n ), # invoice debit (680) + refund debit (20) = total debited (700)\n invoiced=A(\n 580\n ), # invoice debit (680) + adjustment (0) = total invoiced (680)\n paid=A(-580), # payment credit (-700) + refund debit (20) = paid (-680)\n credited=A(\n -700\n ), # payment credit (-700) + adjustment (0) = total credited (-700)\n income=A(600).net_amount,\n tax=A(600).tax_amount,\n )\n\n self.assert_balances(\n bank=A(600, 0, 0),\n balance=A(0),\n debited=A(\n 20\n ), # invoice debit (20) + refund debit (20) = total debited (70)\n invoiced=A(20), # invoice debit (20) + adjustment (0) = total invoiced (20)\n paid=A(-20), # payment credit (-20) + refund debit (0) = paid (-20)\n credited=A(\n -20\n ), # payment credit (-20) + adjustment (0) = total credited (-20)\n income=A(600).net_amount,\n tax=A(600).tax_amount,\n switch_to_job=self.job2,\n )",
"def test_check_refund_authcode_success(bambora_payment_provider):\n rf = RequestFactory()\n request = rf.get(\"/payments/notify_refund/\", notify_success_params)\n assert bambora_payment_provider.check_new_refund_authcode(request)",
"def test_cancel_shipment(self):\n pass",
"def test_resuableitem_cancel_changerequest_submitter(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n data1 = submit_change_request_1(self, self.user_1)\n\n # delete any notifications prior to the next step\n Notification.objects.all().delete()\n self.assertEqual(Notification.objects.count(), 0)\n\n # user 1 now cancels the change request\n self.client.force_authenticate(user=self.user_1)\n data2 = {'cancel': 'true'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n updated_reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # change request has been removed\n self.assertEqual(updated_reusableitem.change_request, None)\n self.assertEqual(updated_reusableitem.change_request_votes_no.count(), 0)\n self.assertEqual(updated_reusableitem.change_request_votes_yes.count(), 0)\n\n # history has been updated\n history_entry = updated_reusableitem.history[1]\n\n self.assertNotEqual(history_entry, None)\n self.assertEqual(history_entry['change_request_resolution'], 'cancelled')\n self.assertNotEqual(history_entry['changed_request_resolved_at'], None)\n self.assertEqual(history_entry['changed_request_submitted_by_id'], self.user_1.id.__str__())\n self.assertEqual(history_entry['number_of_users'], 2)\n self.assertEqual(history_entry['change_request_votes_yes_count'], 1)\n self.assertEqual(history_entry['change_request_votes_no_count'], 0)\n\n self.assertEqual(history_entry['change_request']['name'], data1['name'])\n self.assertEqual(history_entry['change_request']['definition'], data1['definition'])\n self.assertEqual(history_entry['change_request']['link'], data1['link'])\n\n # user 2 should get a notification of the change request cancellation\n self.assertEqual(Notification.objects.count(), 1)\n\n notification2 = Notification.objects.get(created_by=self.user_2)\n self.assertEqual(notification2.context, 'reusableItem')\n self.assertEqual(notification2.event, 'changeRequestCancelled')\n self.assertEqual(notification2.reusableItem, updated_reusableitem)",
"def test_cancel_shipment_old(self):\n pass",
"def RefundOrder(capture_id, refund_amount=0, currency_code=\"EUR\"):\n\tsale = Sale.find(capture_id)\n\n\trefund = sale.refund({\n\t\"amount\": {\n\t\t\"total\": refund_amount,\n\t\t\"currency\": currency_code\n\t}\n\t})\n\n\tif refund.success():\n\t\tprint(\"Refund[%s] Success\" % (refund.id))\n\t\treturn True # Return True if the Refund was successfull\n\telse:\n\t\tprint(refund.error)\n\t\treturn False # Return False if the Refund failed",
"def test_api_user_resend_confirmation_post(self):\n pass",
"def check_new_refund_authcode(self, request: HttpRequest):\n return self.check_authcode_params(\n request,\n (\n \"RETURN_CODE\",\n \"REFUND_ID\",\n ),\n )",
"def test_non_contractor_acks_receipt(self):\n res = self.client.post(self.url)\n self.assertEqual(res.status_code, 403)",
"def test_manually_confirm(self):\n data = {}\n response = self.client.post(self.url, data)\n self.assertRedirects(response, reverse('reminders_dashboard'))\n\n reminder = reminders.SentNotification.objects.get(pk=self.unconfirmed.pk)\n self.assertEqual(reminder.status, 'manual')\n self.assertEqual(reminder.date_confirmed.date(), datetime.date.today())",
"def test_manually_confirm(self):\n data = {}\n response = self.client.post(self.url, data)\n self.assertRedirects(response, reverse('reminders_dashboard'))\n\n reminder = reminders.SentNotification.objects.get(pk=self.unconfirmed.pk)\n self.assertEqual(reminder.status, 'manual')\n self.assertEqual(reminder.date_confirmed.date(), datetime.date.today())"
] | [
"0.7186693",
"0.661709",
"0.6575661",
"0.6517174",
"0.64606166",
"0.6411402",
"0.63819194",
"0.634786",
"0.6343884",
"0.62957674",
"0.62583494",
"0.6201193",
"0.6201193",
"0.61731523",
"0.61508113",
"0.61438215",
"0.6119798",
"0.611924",
"0.6054042",
"0.60197675",
"0.5991011",
"0.594607",
"0.593688",
"0.5889151",
"0.5862161",
"0.5847105",
"0.5822218",
"0.57607645",
"0.5708775",
"0.5708775"
] | 0.7257154 | 0 |
Tell if a process is running. The proc object is cached so it doesn't need to be looked up every time. | def is_process_running(name):
if not hasattr(is_process_running, "proc"):
is_process_running.proc = None # it doesn't exist yet, so init it
if is_process_running.proc:
if is_process_running.proc.is_running():
return True
else:
is_process_running.proc = None
return False
else:
for p in psutil.process_iter():
if p.name() == name:
is_process_running.proc = p
return True
#
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_proc_running(name):\n\n for p in psutil.process_iter(['name']):\n if p.info['name'] == name:\n return True\n\n return False",
"def is_running(self):\r\n if self._gone:\r\n return False\r\n try:\r\n # Checking if pid is alive is not enough as the pid might\r\n # have been reused by another process.\r\n # pid + creation time, on the other hand, is supposed to\r\n # identify a process univocally.\r\n return self.create_time == \\\r\n self.get_process_create_time()\r\n except NoSuchProcess:\r\n self._gone = True\r\n return False",
"def is_running(proc_name:str) -> bool:\r\n with Popen(\"tasklist /NH /FO TABLE\", shell=False, stdout=PIPE) as proc:\r\n rprocs = proc.stdout.read().decode(\"utf-8\")\r\n plist = rprocs.split(\"\\r\\n\")\r\n return(any(i.lower().startswith(proc_name.lower()) for i in plist))",
"def is_process_running(pid):\n return os.path.exists(\"/proc/%s\" % pid)",
"def is_running(self):\n if self._process:\n return self._process.poll() is None\n else:\n return False",
"def running(self):\n return bool(self.proc and self._running())",
"def _is_running(self):\n try:\n # Process is not killed, os.kill(pid, 0) does nothing but raise if process does not\n # exist.\n os.kill(self.pid, 0)\n except ProcessLookupError:\n return False\n else:\n return True",
"def is_running(self):\n if self._process and self._process.poll() is None:\n return True\n return False",
"def _proc_is_alive(self):\n if self._proc is None:\n return False\n\n return self._proc.poll() is None",
"def _is_alive(self, pid):\n process = next(x for x in self._processes if x.pid == pid)\n return process.is_alive()",
"def is_process_running(self, name):\n log_tag = self.get_log_tag()\n self.logger.info(\"{} Checking to see if the process {} is \"\n \"running\".format(log_tag, name))\n return self.get_pids(name) is not None",
"def isRunning(self):\n if not self.running:\n return False\n elif self.process.poll() == 0 or self.process.returncode >= 0:\n return False\n else:\n return True",
"def running(self):\n return self.sub_process and self.sub_process.is_alive()",
"def is_running(program):\n return program in get_running()",
"def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True",
"def is_proc_alive(pid):\n return os.path.isdir(\"/proc/%i\" % pid)",
"def is_running(self):\n qstat = self._grep_qstat('running')\n if qstat:\n return True\n return False",
"def proc_is_alive(pid):\n handle = windll.kernel32.OpenProcess(\n win32con.SYNCHRONIZE | win32con.PROCESS_QUERY_INFORMATION, 0, pid)\n if handle == 0:\n return False\n\n # If the process exited recently, a pid may still exist for the handle.\n # So, check if we can get the exit code.\n exit_code = DWORD()\n rval = windll.kernel32.GetExitCodeProcess(handle, byref(exit_code))\n windll.kernel32.CloseHandle(handle)\n if rval == 0: # GetExitCodeProcess failure\n raise WinError()\n return exit_code.value == win32con.STILL_ACTIVE",
"def _isSubProcessRunning(self): \n # Check if child process has terminated. Set and return returncode attribute.\n if self.__process.poll() is None:\n return True\n else:\n return False",
"def is_started(self):\n return bool(self._processes)",
"def is_running(self):\n\t\treturn self in _running",
"def is_running(self):\n\t\treturn self._running",
"def check_pid_is_running(self):\n if not os.path.exists(self.__file):\n return True\n\n with open(self.__file, \"r\") as f:\n try:\n pid = int(f.read().strip())\n except Exception:\n return True\n\n try:\n os.kill(pid, 0)\n except OSError:\n return True\n\n return self.check_process_cmd_line(pid)",
"def _is_running(self):\n return self._run_state.is_running()",
"def alive(self):\n return self._proc is not None and self._proc.poll() is None",
"def alive(self):\n return self._process.is_alive()",
"def already_running(pid_file):\n\n\tassert ltrace(TRACE_PROCESS, u'| already_running({0}) ↣ {1}',\n\t\t(ST_PATH, pid_file), (ST_ATTR, os.path.exists(pid_file) and\n\t\t\tos.path.exists('/proc/' + open(pid_file, 'r').read().strip())))\n\n\treturn os.path.exists(pid_file) and \\\n\t\tos.path.exists('/proc/' + open(pid_file, 'r').read().strip())",
"def proc_is_alive(pid):\n try:\n os.kill(pid, 0)\n except OSError as e:\n if e.errno == errno.EPERM:\n return True\n if e.errno == errno.ESRCH:\n return False\n raise # something else went wrong\n else:\n return True",
"def is_running(self):\n self.__condition.acquire()\n result = self.__is_running\n self.__condition.release()\n return result",
"def is_running(self):\n return self._is_running"
] | [
"0.7895711",
"0.7814375",
"0.7768379",
"0.77449167",
"0.7729471",
"0.7717063",
"0.77083045",
"0.77081454",
"0.7664434",
"0.7623029",
"0.75754946",
"0.75263417",
"0.742384",
"0.7372659",
"0.7331689",
"0.72935724",
"0.7286316",
"0.72350216",
"0.7228423",
"0.7215268",
"0.71597713",
"0.7153505",
"0.71393657",
"0.7132692",
"0.71094424",
"0.7089416",
"0.7069667",
"0.7059253",
"0.7038913",
"0.70368916"
] | 0.8501636 | 0 |
Processing for the endpoint /content which handles the content form | def content_index():
form = ContentForm()
# POST - Following block executes to handle submission of ContentForm
if form.validate_on_submit():
# content type choice is extracted from the form
choice = form.content_type.data # content type choice by user
choices = dict(ContentForm.SELECT_CHOICES) # all possible choices
# user input from the content form is stored in the following:
owner_email = form.owner_email.data
content_name = form.content_name.data.title()
content_type = choices.get(choice)
valid_months = form.valid_months.data
# validation - if owner does not exist, reload page with error msg
owner_obj = Owner.find_by_email(owner_email)
if not owner_obj:
flash(f'Owner with the email {owner_email} does not exist!',
'danger')
return redirect(url_for('content'))
# validation - if content already exists, reload page with error msg
if Content.find_by_name(content_name):
flash(f'Content with the name {content_name} already exists!',
'danger')
return redirect(url_for('content'))
# new content is saved and page is reloaded with success msg
new_content = Content(content_name=content_name,
content_type=content_type,
updated_at=date.today(),
valid_months=valid_months,
owner_id=owner_obj.id)
# saving content errors handled
try:
new_content.save_content()
except HTTPException:
return "Server cannot save the content at this time", 500
flash(f'{owner_obj.owner_name} has been assigned a new '
f'{content_type.lower()}!', 'success')
return redirect(url_for('content'))
# GET all existing contents and render it in the view
contents = Content.get_all_content()
# for each content __valid_days is tested for negative values
# view is rendered with all contents and owner data
return render_template('content.html',
title='Content Form',
form=form,
contents=contents) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getContent(self) -> object:\n ...",
"def get_content(self) -> Content:\n pass",
"def content(request):\n if not request.method in ('GET', 'POST'):\n raise Http404(\"Invalid method: {}\".format(request.method))\n\n if not authuser_is_user(request.user):\n raise Http404(\"User does not have a profile, contact admin\")\n\n section = getattr(request, request.method).get('section', None)\n if section not in SECTION_CHOICES:\n raise Http404(\"valid section required\")\n content = latest_content(request.user, section)\n\n if request.method == 'POST':\n form = PartialContentForm(request.POST, instance=content)\n if form.is_valid():\n logging.info(\"creating record with data: {}\".format(form.save(commit=False)))\n form.save()\n\n form = PartialContentForm(instance=content)\n return render(request, 'editor/content-form.html', {'form': form})",
"def handleContentComplete():",
"def read_content(self):\n pass",
"def submit(self, content):\n pass",
"def handle(self, content):\n # Check the API request\n serializer = ApiRequestSerializer(data=content)\n if not serializer.is_valid():\n return self.consumer.send_to_client(\n {\"topic\": \"api\", \"type\": \"error\", \"message\": \"invalid-request\"}\n )\n\n # Make request\n method = serializer.validated_data[\"method\"]\n url = serializer.validated_data[\"url\"]\n payload = serializer.validated_data.get(\"payload\", None)\n logger.info(\"API {}:{}:{}\".format(method, url, payload))\n\n response = getattr(self.client, method)(url, data=payload, follow=True)\n\n # Return to client\n # API response\n to_client = {\n \"topic\": \"api\",\n \"type\": \"response\",\n \"status_code\": response.status_code,\n }\n if response.get(\"Content-Type\") == \"application/json\":\n to_client[\"content\"] = response.json()\n else:\n to_client[\"content\"] = content\n\n # Original request params\n to_client.update({\"method\": method, \"url\": url})\n if payload is not None:\n to_client[\"payload\"] = payload\n\n self.consumer.send_to_client(to_client)",
"def receive(self, content, **kwargs):\n print(content)\n dispatch(content, self.path[1:])",
"def select_content():\n return render_template('content_management/select_content.html')",
"def content(self):\n raise NotImplementedError()",
"def process(self, request):\n pass",
"def handle_content_edit(content_id):\n\n # instance of ContentForm is available to both GET and POST requests\n form = ContentForm()\n\n # content will be None if it cannot be found\n content = Content.find_by_id(content_id)\n\n # POST - for handling the edit content form\n if form.validate_on_submit():\n\n # validation - owner email must exist\n owner_email = form.owner_email.data\n owner_obj = Owner.find_by_email(owner_email)\n if not owner_obj:\n flash(f'Owner with the email {owner_email} does not exist!',\n 'danger')\n # if owner not exist, edit page is reloaded with same content id\n return redirect(url_for('content_edit', content_id=content.id))\n\n # content type choice is extracted from the form\n choice = form.content_type.data # user choice\n choices = dict(ContentForm.SELECT_CHOICES) # all possible choices\n\n # content is updated with form values and saved to the database\n content.content_name = form.content_name.data.title()\n content.content_type = choices.get(choice)\n content.valid_months = form.valid_months.data\n content.updated_at = date.today() # today's date becomes last updated\n content.owner_id = owner_obj.id\n\n # saving content errors handled\n try:\n content.save_content()\n except HTTPException:\n return \"Server cannot update the content at this time\", 500\n\n # user is redirected to the main content page with success msg\n flash(f'{content.content_name} has been updated!', 'success')\n return redirect(url_for('content'))\n\n # GET - display the form\n # form is pre-populated with existing content data\n form.content_name.data = content.content_name\n form.owner_email.data = Owner.find_by_id(content.owner_id).owner_email\n form.valid_months.data = content.valid_months\n form.submit.data = \"Update Content\"\n\n # content type stored in this content is looked up against all types\n # each choice is a tuple pair - (stored choice, displayed choice)\n for form_type in ContentForm.SELECT_CHOICES:\n # choice becomes default value on form if it matches the stored value\n if form_type[1] == content.content_type:\n form.content_type.data = form_type[0]\n\n return render_template('content_edit.html',\n content_name=content.content_name,\n form=form)",
"def contentHandler(self):\n return self.__contentHandler",
"def get_content(self):\n url = self.build_url()\n try:\n self.content_page = requests.get(url)\n if not(self.content_page.status_code == requests.codes.ok):\n self.content_page.raise_for_status()\n except requests.exceptions.RequestException as ex:\n logging.info('A requests exception has ocurred: ' + str(ex))\n logging.error(traceback.format_exc())\n sys.exit(0)",
"def index_html(self,REQUEST): \n return self.posting_html(self,REQUEST)",
"def get_content(self):\n return self.content",
"def update_content(self):\n raise NotImplementedError",
"def fetch_content(self, content, content_type='text/html',\n response=settings.HTTP_OK):\n self.send_response(response)\n self.send_header('Content-type', content_type)\n self.end_headers()\n self.wfile.write(content)\n self.wfile.close()",
"def preview_handler(self, _, __):\r\n template = self.system.render_template('lti_form.html', self.get_context())\r\n return Response(template, content_type='text/html')",
"def content(self):\n return(self.__response.content)",
"def content():\n try:\n url = request.args.get('url')\n if not url:\n raise Exception('Expected url parameter')\n return render(cached_content(url=url), template='content.jinja2')\n except Exception, e:\n traceback.print_exc()\n return render({'url': request.url, 'error': str(e)},\n template='error.jinja2')",
"def fetchContent(self):\n print 'fetching page by its path: '+ self.path\n uri = '%s?path=%s' % (self.client.MakeContentFeedUri(), self.path)\n # get the content feed\n feed = self.client.GetContentFeed(uri=uri)\n # take out the content\n self.entry = feed.get_webpages()[0]",
"def data(self, content):\n self._data_handlers(self, content)",
"def content(self, content_type=None):\r\n params = base.get_params(None, locals())\r\n return self._get('content', params)",
"def set_content(self, content):\n self.data['content'] = content",
"def render_POST(self, request):",
"def parse(self, content):\n pass",
"def handleContentChunk(data):",
"def content_creator():\n with temporary_url_for_logger(app) as logger:\n with logger:\n content = page.render_html(\n solution=solution,\n static_url=static_url,\n lesson_url=lesson_url,\n subpage_url=subpage_url,\n vars=variables\n )\n absolute_urls = [url_for(logged[0], **logged[1]) for logged in logger.logged_calls]\n\n relative_urls = [get_relative_url(request.path, x) for x in absolute_urls]\n\n return {\"content\": content, \"urls\": relative_urls}",
"def render(self):\n _ = self.request.getText\n form = self.request.form\n \n if form.has_key('cancel'):\n # User canceled\n return self.page.send_page(self.request)\n\n try:\n if not self.allowed():\n raise ActionError(_('You are not allowed to edit this page.'))\n elif not self.page.exists():\n raise ActionError(_('This page is already deleted or was never created!'))\n \n self.package()\n except ActionError, e:\n return self.page.send_page(self.request, msg=e.args[0])"
] | [
"0.68734026",
"0.65634793",
"0.6559953",
"0.65355384",
"0.64136237",
"0.6392098",
"0.62899566",
"0.6207141",
"0.61433744",
"0.6108671",
"0.6087179",
"0.60673165",
"0.606479",
"0.60591775",
"0.60088414",
"0.60057867",
"0.59966636",
"0.5988125",
"0.5975663",
"0.5925643",
"0.59035987",
"0.5899268",
"0.58820015",
"0.58696747",
"0.58512646",
"0.58180517",
"0.58154476",
"0.58136964",
"0.5807573",
"0.57911503"
] | 0.7183763 | 0 |
Processing for the endpoint /content/edit?content_id= | def handle_content_edit(content_id):
# instance of ContentForm is available to both GET and POST requests
form = ContentForm()
# content will be None if it cannot be found
content = Content.find_by_id(content_id)
# POST - for handling the edit content form
if form.validate_on_submit():
# validation - owner email must exist
owner_email = form.owner_email.data
owner_obj = Owner.find_by_email(owner_email)
if not owner_obj:
flash(f'Owner with the email {owner_email} does not exist!',
'danger')
# if owner not exist, edit page is reloaded with same content id
return redirect(url_for('content_edit', content_id=content.id))
# content type choice is extracted from the form
choice = form.content_type.data # user choice
choices = dict(ContentForm.SELECT_CHOICES) # all possible choices
# content is updated with form values and saved to the database
content.content_name = form.content_name.data.title()
content.content_type = choices.get(choice)
content.valid_months = form.valid_months.data
content.updated_at = date.today() # today's date becomes last updated
content.owner_id = owner_obj.id
# saving content errors handled
try:
content.save_content()
except HTTPException:
return "Server cannot update the content at this time", 500
# user is redirected to the main content page with success msg
flash(f'{content.content_name} has been updated!', 'success')
return redirect(url_for('content'))
# GET - display the form
# form is pre-populated with existing content data
form.content_name.data = content.content_name
form.owner_email.data = Owner.find_by_id(content.owner_id).owner_email
form.valid_months.data = content.valid_months
form.submit.data = "Update Content"
# content type stored in this content is looked up against all types
# each choice is a tuple pair - (stored choice, displayed choice)
for form_type in ContentForm.SELECT_CHOICES:
# choice becomes default value on form if it matches the stored value
if form_type[1] == content.content_type:
form.content_type.data = form_type[0]
return render_template('content_edit.html',
content_name=content.content_name,
form=form) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edit(id):\n r = requests.get(API_ROUTE + '/' + str(id), headers={'Auth': _auth()})\n if r.status_code != requests.codes.ok:\n return r.text, r.status_code\n\n return render_template('editor.html', article=r.json())",
"def edit(self, new_content: object, reason: str = \"\") -> None:\n raise NotImplementedError",
"def save_edit(request, post_id):\n if request.method == \"PUT\":\n data = json.loads(request.body)\n user = request.user\n post = Post.objects.get(id=post_id)\n content = data.get(\"content\", \"\")\n # Check to make sure user attempting edit is author\n if user == post.author:\n post.content = content\n post.save()\n return JsonResponse({\"content\": post.content})\n else:\n return JsonResponse({\"message\": \"Not authorized to edit\"})",
"def edit(self, **kwargs):\n ...",
"def edit(self, new_content: str) -> None:\n\n # YOUR CODE HERE\n self.content = new_content",
"def action_edit(request, action_id):\n employee = request.user.employee_user.first()\n action = Action.objects.get(pk=action_id)\n if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk:\n raise PermissionDenied()\n # if request.method == 'POST':\n form = ActionForm(request.POST, instance=action)\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/action/%d' % form.instance.pk)\n # else:\n # form = ActionForm(instance=action)\n # return TemplateResponse(\n # request,\n # 'mus/action_edit.html',\n # dict(\n # form=form,\n # edit=True\n # )\n # )\n\n # return JsonResponse(status=200, data={\"data\": form.instance.title, \"edit\": True})",
"def updateContent(content, **kwargs):",
"def edit(self):\n\n pass",
"def edit_task(id):\n\n if not id:\n raise InvalidAPIUsage(\"id is required\")\n\n content = get_content_or_400(request)\n\n collection = get_db_collection()\n\n task = get_task_or_404(collection, id)\n\n collection.update_one({\"_id\": task[\"_id\"]}, {\"$set\": {\"content\": content}})\n\n response = jsonify()\n response.status_code = 200\n return response",
"def edit_entry(self, id, body=None, link=None, **args):\n args.update(id=id)\n if body: args.update(body=body)\n if link: args.update(link=link)\n return self.fetch(\"/entry\", post_args=args)",
"def update(request):\n paste = Paste.get(request.matchdict['idContent'])\n\n password = _buildPassword(paste.username, paste.created, request.POST['password'])\n\n if password == paste.password:\n paste.title = request.POST['title']\n paste.content = request.POST['content']\n\n paste.save()\n\n request.session.flash(u\"Updated\") # TODO translatoion\n\n return HTTPFound(request.route_path('oneContent', idContent=paste._id))\n\n request.session.flash(u\"Wrong password\") # TODO translatoion\n\n return HTTPFound(request.route_path('edit', idContent=paste._id))",
"def edit(request, pageName):\n \n if request.method == \"POST\":\n form = EditForm(request.POST)\n \n if form.is_valid(): \n content = form.cleaned_data[\"content\"]\n title = form.cleaned_data[\"title\"]\n \n util.save_entry(title, content)\n return HttpResponseRedirect(reverse(\"encyclopedia:visit_entry\", args=(title, )))\n \n else:\n\n form = EditForm({'title': pageName, 'content': util.get_entry(pageName) })\n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm(),\n \"pageName\": pageName\n })\n \n \n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm({'title': pageName, 'content': util.get_entry(pageName) }),\n \"pageName\": pageName\n })",
"def edit():",
"def edit_entry(id):\n if not session.get('logged_in'):\n abort(401)\n\n if request.method == 'POST':\n db = get_db()\n db.execute('update entries set title = ?, ingredients = ?, \\\n steps = ?, tags = ?, url = ? where id = ?',\n [request.form['title'], request.form['ingredients'],\n request.form['steps'], request.form['tags'],\n request.form['url'], request.form['id']])\n db.commit()\n flash('Entry ' + id + ' has been modified.', 'success')\n return view_entry(str(id))\n else:\n db = get_db()\n cur = db.execute('select id, title, ingredients, steps, tags, \\\n url from entries where id = ? order by id desc',\n [id.strip()])\n entries = cur.fetchall()\n return render_template('edit_entry.html', entries=entries)",
"def edit(self, *args, **kw):\n\t\t\ttmpl_context.widget = self.edit_form\n\t\t\tpks \t\t= self.provider.get_primary_fields(self.model)\n\t\t\tkw \t\t\t= {}\n\n\t\t\tfor i, pk in enumerate(pks):\n\t\t\t\tkw[pk] \t\t= args[i]\n\n\t\t\tvalue \t\t= self.edit_filler.get_value(kw)\n\t\t\tvalue['_method'] \t= 'PUT'\n\n\t\t\treturn dict(value = value, model = self.model.__name__, pk_count = len(pks))",
"def edit(cm_response, **data):\n return cm_response",
"def document_edit_view(document_id):\n\n doc = Document.query.filter(Document.id == document_id).first_or_404()\n return render_template('admin/documents/edit.html', document=doc, path='/admin/documents')",
"def edit_attachments(request, id):\n content = get_object_or_404(MathContent, id=id)\n result = check_edit_attachments_permissions(request.user, content)\n if not result.allowed:\n return 404 # Could be 403 in some cases.\n\n if request.method == 'POST':\n if 'delete_attachment_id' in request.POST:\n try:\n attachment = Attachment.objects.get(\n id=request.POST['delete_attachment_id']\n )\n except Attachment.DoesNotExist:\n return 403 # Always 403.\n if attachment.content_id != content.id:\n return 403 # Always 403.\n\n attachment.content = content # Reuse.\n attachment.delete_file()\n attachment.delete()\n content.html = None\n content.save()\n\n # Redirect to avoid form resubmission.\n return (content.get_edit_attachments_url(),)\n\n attachment, form = check_and_save_attachment(request, content)\n if attachment is not None:\n # Redirect to avoid form resubmission.\n return (content.get_edit_attachments_url(),)\n else:\n form = AttachmentForm()\n\n assert (\n result.task is not None\n ), \"assuming for now only Task MathContents can have attachments\"\n data = {\n 'content': content,\n 'form': form,\n 'task': result.task,\n }\n data.update(get_task_folder_data(result.task, request.user))\n\n return data",
"def edit(request):\n if 'image_id' not in request.GET:\n return HttpResponseRedirect('/imgmanip')\n image_id = request.GET['image_id']\n image = get_object_or_404(Image, pk=image_id)\n return render(request, 'imgmanip/edit.html', {'image': image, 'image_id': image_id})",
"def test_edit_view(self):\n target_url = url_for('content.edit_content')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)",
"def adminedit(object, id):\n\n db = get_db()\n\n if request.method == \"POST\":\n execute_string = 'UPDATE ' + object.title() + \" SET \"\n\n if object == 'post':\n execute_string += 'title = \"' + request.form['title'] + '\", content = \"' + request.form['content'] + '\", authorId = ' + request.form[\"authorid\"] + ', categoryId = ' + request.form[\"categoryid\"] + ''\n elif object == 'author':\n execute_string += 'name = \"' + request.form['name'] + '\"'\n elif object == 'category':\n execute_string += 'name = \"' + request.form['name'] + '\", description = \"' + request.form['description'] + '\"'\n\n execute_string += \" WHERE id = \" + str(id)\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n execute_string = \"SELECT * FROM \" + object.title() + \" WHERE id = \" + str(id)\n item = db.execute(execute_string).fetchone()\n\n return render_template(\"new.html\", object=object, item=item)",
"def link_edit_callback(self):\n pass",
"def edit_article(article_id):\n \n if 'username' in session: \n article = mongo.db.articles.find_one_or_404(\n {'_id': ObjectId(article_id)})\n form=BlogForm()\n form.title.data = article['title']\n form.content.data = article['content']\n return render_template('pages/editarticle.html',\n form=form, \n article=article, \n legend='Edit your Blog Article'\n )",
"def content_index():\n form = ContentForm()\n\n # POST - Following block executes to handle submission of ContentForm\n if form.validate_on_submit():\n\n # content type choice is extracted from the form\n choice = form.content_type.data # content type choice by user\n choices = dict(ContentForm.SELECT_CHOICES) # all possible choices\n\n # user input from the content form is stored in the following:\n owner_email = form.owner_email.data\n content_name = form.content_name.data.title()\n content_type = choices.get(choice)\n valid_months = form.valid_months.data\n\n # validation - if owner does not exist, reload page with error msg\n owner_obj = Owner.find_by_email(owner_email)\n if not owner_obj:\n flash(f'Owner with the email {owner_email} does not exist!',\n 'danger')\n return redirect(url_for('content'))\n\n # validation - if content already exists, reload page with error msg\n if Content.find_by_name(content_name):\n flash(f'Content with the name {content_name} already exists!',\n 'danger')\n return redirect(url_for('content'))\n\n # new content is saved and page is reloaded with success msg\n new_content = Content(content_name=content_name,\n content_type=content_type,\n updated_at=date.today(),\n valid_months=valid_months,\n owner_id=owner_obj.id)\n\n # saving content errors handled\n try:\n new_content.save_content()\n except HTTPException:\n return \"Server cannot save the content at this time\", 500\n\n flash(f'{owner_obj.owner_name} has been assigned a new '\n f'{content_type.lower()}!', 'success')\n return redirect(url_for('content'))\n\n # GET all existing contents and render it in the view\n contents = Content.get_all_content()\n\n # for each content __valid_days is tested for negative values\n\n # view is rendered with all contents and owner data\n return render_template('content.html',\n title='Content Form',\n form=form,\n contents=contents)",
"def edit(slug):\n entry = get_object_or_404(Entry, Entry.slug == slug)\n if request.method == 'POST':\n if request.form.get('title'):\n entry.title = request.form.get('title')\n if request.form.get('content'):\n entry.content = request.form.get('content')\n entry.published = request.form.get('published') or False\n entry.save()\n\n flash('Entry saved successfully!', 'success')\n if entry.published:\n return redirect(url_for('detail', slug=entry.slug))\n else:\n return redirect(url_for('edit', slug=entry.slug))\n return render_template('edit.html', entry=entry)",
"def edit(request,entry_id):\n assert isinstance(request, HttpRequest)\n try:\n entry = Entry.objects.get(pk=entry_id)\n except Entry.DoesNotExist:\n raise Http404(\"指定されたブログが存在しません。\")\n if not request.user or request.user.pk != entry.member.pk: # ブログ作成者以外は編集できない\n return HttpResponseForbidden() #アドレスをコピペしなければ通常は起こらないため例外処理で済ませておく。\n\n if request.method == 'POST': # フォームが提出された\n form = EntryForm(request.POST, instance = entry) # POST データの束縛フォーム\n if form.is_valid(): # バリデーションを通った\n form.save()\n return HttpResponseRedirect(reverse('entry_list')) # POST 後のリダイレクト\n else:\n form = EntryForm(instance = entry) # 非束縛フォーム\n article_list = Article.objects.order_by('-released_at')[:5]\n return render(request, 'app/entry_edit.html', { \n 'form': form,\n 'title':'ブログ記事の編集',\n 'year':datetime.now().year,\n 'articles':article_list,\n 'blogs':EntryView.get_entry_list('-posted_at',-1, request.user.pk )[:5],\n 'submit_title':'更新',\n 'entry_pk':entry.pk,\n 'current_user':request.user,\n })",
"def handle_edit_post(post_id):\n edited_post = Post.query.get_or_404(post_id)\n\n edited_post.title = request.form['post-title']\n edited_post.content = request.form['post-content']\n\n db.session.add(edited_post)\n db.session.commit()\n\n return redirect(f\"/users/{edited_post.user_id}\")",
"def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n #pks = self.provider.get_primary_fields(self.model)\n \n log.debug(\"soyRomperLB= %s\" %kw)\n\n ###########################################\n pks = self.provider.get_primary_fields(self.model)\n \n ###########################################\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n return dict(value=value, model=self.model.__name__, pk_count=len(pks))",
"def edit(ctx, docid, password):\n coll = db.get_document_collection(ctx)\n config = ctx.obj[\"config\"]\n\n doc, docid = db.get_document_by_id(ctx, docid)\n title = doc[\"title\"]\n\n template, c = db.get_content(ctx, doc, password=password)\n\n content, tmpfile = utils.get_content_from_editor(config[\"editor\"], template=template)\n d = datetime.datetime.now()\n\n if doc[\"encrypted\"] is True:\n title = utils.get_title_from_content(content)\n content = c.encrypt_content(content.decode(\"utf-8\").encode(\"utf-8\"))\n else:\n if not \"links\" in doc[\"categories\"]:\n title = utils.get_title_from_content(content)\n\n if isinstance(template, unicode):\n content = content.decode(\"utf-8\")\n\n if content != template:\n doc[\"content\"] = content\n doc[\"title\"] = title\n doc[\"updated\"] = d\n if validate(doc):\n coll.save(doc)\n else:\n utils.log_error(\"Validation of the updated object did not succeed\")\n\n transaction.log(ctx, docid, \"edit\", title)\n utils.log_info(\"Document \\\"%s\\\" updated.\" % title)\n else:\n utils.log_info(\"No changes detected for \\\"%s\\\"\" % title)\n\n utils.clean_tmpfile(tmpfile)\n\n return True",
"def edit_page(self, path, title, content=None, html_content=None,\n author_name=None, author_url=None, return_content=False):\n if content is None:\n content = html_to_nodes(html_content)\n\n content_json = json_dumps(content)\n\n return self._telegraph.method('editPage', path=path, values={\n 'title': title,\n 'author_name': author_name,\n 'author_url': author_url,\n 'content': content_json,\n 'return_content': return_content\n })"
] | [
"0.7016044",
"0.6976376",
"0.6733984",
"0.6700269",
"0.66533476",
"0.6566476",
"0.648935",
"0.6471998",
"0.64241326",
"0.64086014",
"0.6263412",
"0.6224103",
"0.6202934",
"0.6191035",
"0.6189695",
"0.6159472",
"0.6140995",
"0.61282784",
"0.61214674",
"0.61054313",
"0.6101587",
"0.6091253",
"0.60822564",
"0.6074643",
"0.60736597",
"0.60498774",
"0.6035604",
"0.6032458",
"0.60244983",
"0.60145414"
] | 0.7805241 | 0 |
Processing for the endpoint /content/delete to delete a content | def handle_content_delete(content_id):
content = Content.find_by_id(content_id)
# flash error message if content does not exist
if not content:
flash(f'Content does not exist!', 'danger')
return 'not deleted', 404
# content is deleted and user is redirected (redirect code in content.js)
# deleting content errors handled
try:
content.delete_content()
except HTTPException:
return "Server cannot delete the content at this time", 500
flash(f'{content.content_name} has been deleted!', 'success')
return 'deleted', 202 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(request, content_type, object_id):\n user = request.user\n content_type_object = ContentType.objects.get(id = content_type)\n node = content_type_object.model_class().objects.get(id = object_id)\n community_wiki.delete_content(node)\n \n redirect_url = reverse('content-list-redirect', args=[content_type_object.id])\n return http.HttpResponseRedirect(redirect_url)",
"def delete(self, endpoint, content=None, params=None):\n\t\treturn self._call(\"DELETE\", endpoint, content, params)",
"def delete(self):\n self.request().delete()",
"def delete(self, *args, **kwargs):\n return self.handle_delete_request()",
"def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)",
"def delete(self, *args, **kwargs):\n self.delete_relatives()\n old_content = self.content\n super().delete(*args, **kwargs)\n if old_content.isOrphaned():\n old_content.delete()",
"def do_DELETE(self): # pylint: disable=missing-docstring\n self._set_headers(204)\n (resource, id) = self.parse_url(self.path)\n\n if resource == \"comments\":\n delete_comment(id)\n elif resource == \"posts\":\n delete_post(id)\n elif resource == \"categories\":\n delete_category(id)\n elif resource == \"posttags\":\n remove_tag(id)\n \n self.wfile.write(\"\".encode())",
"def _delete(self, *args, **kwargs):\n return self._request('delete', *args, **kwargs)",
"def delete():",
"def delete(self, request , pk=None): \n return Response({'message':'DELETE'})",
"def delete(self, *args, **kwargs):\n pass",
"def delete(self, *args, **kwargs):\n pass",
"async def delete(self, delete: TPayload) -> None:",
"def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass",
"def delete(self):\n ...",
"def delete(self, *args, **kwargs) -> Any:\n pass",
"def view_delete():\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\", \"json\")\n )",
"def delete(self, url):\n return self.request(url, \"DELETE\")",
"def delete(self, data):\r\n pass",
"def delete(self):\r\n return http.Request('DELETE', '{0}'.format(\r\n self.get_url())), parsers.parse_json",
"def delete(self):\n if jwthandler.authorize_action(self, 1) == False:\n return None\n\n userdata = jwthandler.decode_userdata(self.request.headers[\"Authorization\"])\n\n body_categories = {\"link_id\": 1}\n link_dict = errorutil.check_fields(self.request.body.decode(), body_categories, self)\n\n if link_dict == False or linkutil.delete_link(link_dict[\"link_id\"], self) == False:\n return None\n\n formatted_message = loggerhandler.form_delete_message_dictionary(userdata, \n \"link\", \n link_dict[\"link_id\"])\n\n\n loggerhandler.log_message(\"delete\", formatted_message)\n\n self.write({\"message\":\"Success\"})",
"def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.object.delete()\n return JsonResponse({'status': 'ok'})",
"def delete(self, path):\n params = request.args.to_dict()\n if params.get(\"instances\"):\n int_list = params.get(\"instances\")\n return items_delete_response(path, int_list)\n abort(405)",
"def delete(self, _id):",
"def test_delete_content(self):\n values = {\n \"name\": \"Content 1\",\n \"description\": \"Content's Description\",\n \"content_file\": SimpleUploadedFile(\n \"uploaded_file_name\", \"This will be the contents of the uploaded file.\".encode()\n ),\n \"updated_time\": timezone.now()\n }\n content1 = Content(**values)\n content1.content_file_uploaded = True\n content1.save()\n self.assertTrue(os.path.exists(content1.content_file.path))\n content1.delete()\n self.assertFalse(os.path.exists(content1.content_file.path))",
"def delete(self, med_id):\n query = medic_queries().del_med_by_id()\n\tvalue = get_service().del_content(query,[med_id])\n if value != 1:\n return jsonify(status=404)\n return jsonify(status=200)",
"def delete(self, path):\n req_url = self.normalize_cdmi_url(path)\n res = requests.delete(req_url, auth=self.auth, verify=False)\n if res.status_code == 204:\n return Response(0, \"ok\")\n else:\n return Response(res.status_code, res)",
"def delete(self, request, id, format=None):\n posts = self.get_object(id)\n posts.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def delete_document(self, portal_name, content_id):\n r = requests.delete('/'.join([self.base_url,\n self.DOCUMENTS_ENDPOINT,\n portal_name,\n str(content_id)]))\n return r.json()",
"def _delete(self, url):\n return self._request(url, method=\"DELETE\")"
] | [
"0.76685023",
"0.72205293",
"0.7062801",
"0.70419425",
"0.70234096",
"0.7012189",
"0.69226223",
"0.69223297",
"0.6868076",
"0.6860735",
"0.68212754",
"0.68212754",
"0.6745534",
"0.6732339",
"0.67283106",
"0.66764003",
"0.6665684",
"0.6665189",
"0.66566366",
"0.6643468",
"0.6640182",
"0.66122586",
"0.6593503",
"0.6588883",
"0.65877926",
"0.6572077",
"0.65566826",
"0.6554073",
"0.654697",
"0.6525587"
] | 0.7722003 | 0 |
Reads the licence from the specified fileName. | def Read(self, fileName: str) -> None:
try:
if os.path.isfile(os.path.join(os.getcwd(), 'Licences', fileName)) and (fileName.endswith('.nls1')):
self.m_Licence1 = ElementTree.parse(os.path.join(os.getcwd(), 'Licences', fileName)).getroot()
except FileNotFoundError:
raise FileNotFoundError("Licence file: " + fileName + " not found.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def license(p):\n # Input file\n f = '/'.join([p, 'collector.stats'])\n check_path(f)\n\n # Open file with universal newline support\n with open(f, 'rU') as fh:\n for line in fh.readlines():\n if 'License key' in line:\n license = line.split(':')[1].strip()\n break\n\n return license",
"def read_from_file(self, filename: str) -> None:",
"def load(cls, filename):\n\n licenses = []\n\n address_manager = REGISTRY['address_manager']\n error_manager = REGISTRY['error_manager']\n\n license_numbers = []\n\n reader = csv.reader(open(filename, 'rU'))\n try:\n headers = reader.next() # Text column headers\n except StopIteration:\n syslog.syslog('merge.py: Empty file %s' % filename)\n return licenses\n \n if len(headers) != 15:\n raise InvalidInput('Business License file should have ' +\n 'exactly 15 columns. Found %d.' % len(headers))\n\n for line in reader:\n business_license = BusinessLicense(line)\n\n if business_license.license_number in license_numbers:\n # Silently skip duplicates\n #error_manager.add(business_license,\n # 'Duplicate license number')\n continue\n\n license_numbers.append(business_license.license_number)\n\n if not business_license.is_valid_license_type():\n error_manager.add(business_license,\n 'Invalid license type')\n continue\n if not business_license.is_valid_business_name():\n error_manager.add(business_license,\n 'Business name is on ignore list')\n continue\n\n if address_manager.is_in_strathcona(business_license.address):\n licenses.append(business_license)\n else:\n error_manager.add(business_license,\n 'Not in Strathcona or invalid address')\n\n licenses.sort(key=operator.attrgetter('license_number'))\n\n return licenses",
"def readFromFile(filename):\n raise NotImplementedError",
"def readFile(fileName):\n with open(fileName, 'r', encoding='utf-8') as f:\n text = f.read()\n return text",
"def _read_file(file_name):\n file_handle = file(file_name)\n try:\n return file_handle.read()\n finally:\n file_handle.close()",
"def read_file(name_file):\n with open(name_file, 'r') as file:\n return file.read()",
"def readFromTextFile(self, file_name):\n with open(file_name, 'r') as file_obj:\n return file_obj.read()",
"def read(self, filename):\n pass",
"def read(self, filename):\n pass",
"def set_pkg_license_from_file(self, doc, lic):\n self.assert_package_exists()\n if validations.validate_lics_from_file(lic):\n doc.package.licenses_from_files.append(lic)\n return True\n else:\n raise SPDXValueError('Package::LicensesFromFile')",
"def read_file(filename):\n open_kwargs = {}\n if sys.version_info.major == 3:\n open_kwargs = {'encoding': 'utf-8'}\n\n path = os.path.abspath(os.path.dirname(__file__))\n filepath = os.path.join(path, filename)\n with open(filepath, **open_kwargs) as filecontents:\n return filecontents.read()",
"def read_file(self, file_name):\n f = file(file_name, \"r\")\n temp = f.read()\n f.close()",
"def open_and_read_file(file_path):\n\n source_material = open(file_path).read()\n return source_material",
"def get_license():\n repo_fs()\n return LICENSE",
"def ReadFile(self, filename):\r\n file = open(filename, 'rb')\r\n result = \"\"\r\n try:\r\n result = file.read()\r\n finally:\r\n file.close()\r\n return result",
"def read_file(file_name):\n with open(file_name, \"r\") as f:\n return f.read()",
"def read_file(file_name):\n with open(file_name, 'r') as f:\n return f.read()",
"def read_file(filename):\n with open(filename) as fp:\n return fp.read()",
"def ReadFile(self, filename):\n file = open(filename, 'rb')\n result = \"\"\n try:\n result = file.read()\n finally:\n file.close()\n return result",
"def read(self, filename):\n raise NotImplementedError",
"def read_file(filename):\n if os.path.isfile(filename):\n with open(filename, 'r') as f:\n return f.read()",
"def read_file(self, file_name):\n\n with open(file_name, 'r') as file_input:\n file_content = file_input.read()\n return file_content",
"def read(self, filename): # real signature unknown; restored from __doc__\n pass",
"def read_file(filename):\n return open(filename).read()",
"def read_file(filename):\n with codecs.open(filename, 'r', 'utf8') as f:\n return f.read()",
"def read_file(filename):\n with codecs.open(filename, 'r', 'utf8') as f:\n return f.read()",
"def read_from_file(file_name):\n with open(file_name, \"rb\") as text_file:\n return text_file.read()",
"def get_book(file_name):\n with open(file_name, 'r') as file:\n book = file.read()\n return book",
"def __read_file(self, filename):\n with open(filename) as f:\n content = f.readlines()\n \n return content"
] | [
"0.6428396",
"0.6019847",
"0.59947544",
"0.58736926",
"0.5851565",
"0.58470494",
"0.58166087",
"0.58135986",
"0.5810444",
"0.5810444",
"0.5779063",
"0.5741028",
"0.5730347",
"0.5701212",
"0.5701118",
"0.5668183",
"0.56602037",
"0.5650262",
"0.5648893",
"0.5632953",
"0.56318074",
"0.56109184",
"0.5601454",
"0.55974877",
"0.559456",
"0.5577467",
"0.5577467",
"0.55707914",
"0.5569929",
"0.5569442"
] | 0.7940669 | 0 |
Verifies the licence by comparing it to the signature computed for the licence using the specified public key. | def Verify(self, publicKey: str) -> bool:
if not publicKey:
raise ValueError(str(publicKey))
isValid = False
if self.m_Licence1:
signature = self.m_Licence1.find('Code').text
self.m_Licence1.find('Code').text = ''
try:
isValid = RSAVerify().Verify(self.m_Licence1, signature, publicKey)
self.m_Licence1.find('Code').text = signature
except Exception as ex:
print("Exception 22")
print(ex)
isValid = False
return isValid | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def VerifyWithFile(publicKey: str, value: ElementTree.Element) -> bool:\r\n lr = LicenceReader()\r\n lr.m_Licence1 = value\r\n return lr.Verify(publicKey)",
"def can_verify(self, public_key):\n result = self._lib_vscf_ecc.vscf_ecc_can_verify(self.ctx, public_key.c_impl)\n return result",
"def verify(self, signature, body, external_aad, public_key):",
"def _verify(pubkey: SupportedKeyTypes, sig: bytes, filehash: bytes, hashfunc: hashes.HashAlgorithm) -> None:\n if isinstance(pubkey, RSAPublicKey):\n pubkey.verify(sig, filehash, padding.PKCS1v15(), Prehashed(hashfunc))\n elif isinstance(pubkey, EllipticCurvePublicKey):\n pubkey.verify(sig, filehash, ec.ECDSA(Prehashed(hashfunc)))",
"def verify(self):\n if not self.public_key:\n self.fetch_public_key()\n data = self.doc.find(\".//{http://salmon-protocol.org/ns/magic-env}data\").text\n sig = self.doc.find(\".//{http://salmon-protocol.org/ns/magic-env}sig\").text\n sig_contents = '.'.join([\n data,\n b64encode(b\"application/xml\").decode(\"ascii\"),\n b64encode(b\"base64url\").decode(\"ascii\"),\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n ])\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(RSA.importKey(self.public_key))\n if not cipher.verify(sig_hash, urlsafe_b64decode(sig)):\n raise SignatureVerificationError(\"Signature cannot be verified using the given public key\")",
"def rsa_verify(cypher, publickey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions. \r\n temp_key_obj = _rsa_keydict_to_keyobj(publickey)\r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.verify)",
"def verify(public_key, message, signature):\n hasher = SHA256.new(message)\n verifier = PKCS1_v1_5.new(public_key)\n return verifier.verify(hasher, signature)",
"def validate_license(key: str) -> bool:\r\n return bool(\r\n re.match(r'^PB-[A-Z0-9]{8}(?:-[A-Z0-9]{8}){3}$', key)\r\n )",
"def verify(self, msg, sig, key):\n if not isinstance(key, ec.EllipticCurvePublicKey):\n raise TypeError(\"The public key must be an instance of \" \"ec.EllipticCurvePublicKey\")\n self._cross_check(key)\n\n num_bits = key.curve.key_size\n num_bytes = (num_bits + 7) // 8\n if len(sig) != 2 * num_bytes:\n raise ValueError(\"Invalid signature\")\n\n try:\n # cryptography uses ASN.1-encoded signature data; split JWS\n # signature (r||s) and encode before verification\n (r, s) = self._split_raw_signature(sig)\n asn1sig = encode_dss_signature(r, s)\n key.verify(asn1sig, msg, ec.ECDSA(self.hash_algorithm()))\n except InvalidSignature as err:\n raise BadSignature(err)\n else:\n return True",
"def call_backend_verify(self, pubkey: PublicKey, signature: Signature, msg_hash: bytes):\n is_valid_sig = signature.verify_msg_hash(msg_hash, pubkey)\n sig_pubkey = signature.recover_public_key_from_msg_hash(msg_hash)\n\n return is_valid_sig and (sig_pubkey == pubkey)",
"def verify(self, message, signature):\n symkey = self.gen_symkey(message)\n\n # v is the verification value, X is the ring of signatures\n v, X = signature[0], signature[1:]\n\n # permute an X value to a Y value using the g function\n mapper = lambda i: self.g(X[i], self.public_keys[i].e, self.public_keys[i].n)\n\n # map the array of x -> array of y\n Y = map(mapper, range(len(X)))\n\n # XOR the cumulative hash with the next value, then hash that\n reducer = lambda x, i: self.concat_hash(x ^ Y[i], symkey)\n\n # now do the verification:\n # C(k, v, y[]) = E(k, y[r] ^ E(k, y[r-1] ^ E(... ^ E(k, y[0] ^ v)...)))\n r = reduce(reducer, range(self.n_keys), v)\n return r == v",
"def verify_signatures(params, signed_fields_key='signedFields', full_sig_key='signedDataPublicSignature'):\r\n signed_fields = params.get(signed_fields_key, '').split(',')\r\n data = u\",\".join([u\"{0}={1}\".format(k, params.get(k, '')) for k in signed_fields])\r\n signed_fields_sig = processor_hash(params.get(signed_fields_key, ''))\r\n data += u\",signedFieldsPublicSignature=\" + signed_fields_sig\r\n returned_sig = params.get(full_sig_key, '')\r\n if processor_hash(data) != returned_sig:\r\n raise CCProcessorSignatureException()",
"def verify(signature: Signature, pub_key: rsa.RSAPublicKey, msg: bytes) -> bool:\n try:\n pub_key.verify(signature, msg, PADDING, HASH)\n except:\n return False\n return True",
"def _cross_check(self, pub_key):\n if self.curve_name != pub_key.curve.name:\n raise ValueError(\n \"The curve in private key {} and in algorithm {} don't \"\n \"match\".format(pub_key.curve.name, self.curve_name)\n )",
"def verify_signature(signature_object, pubkey_info, content):\n if not CRYPTO: # pragma: no cover\n raise exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)\n\n formats.GPG_PUBKEY_SCHEMA.check_match(pubkey_info)\n formats.GPG_SIGNATURE_SCHEMA.check_match(signature_object)\n\n handler = SIGNATURE_HANDLERS[pubkey_info[\"type\"]]\n sig_keyid = signature_object[\"keyid\"]\n\n verification_key = pubkey_info\n\n # If the keyid on the signature matches a subkey of the passed key,\n # we use that subkey for verification instead of the master key.\n if sig_keyid in list(pubkey_info.get(\"subkeys\", {}).keys()):\n verification_key = pubkey_info[\"subkeys\"][sig_keyid]\n\n creation_time = verification_key.get(\"creation_time\")\n validity_period = verification_key.get(\"validity_period\")\n\n if (\n creation_time\n and validity_period\n and creation_time + validity_period < time.time()\n ):\n raise KeyExpirationError(verification_key)\n\n return handler.verify_signature(\n signature_object, verification_key, content, SHA256\n )",
"def verification(file_name: str) -> None:\n print(\"Verification process...\")\n file_name = os.path.join('data', file_name)\n\n file1 = open(\"data/key.txt\", \"r\")\n file2 = open(\"data/signature.txt\", \"r\")\n p = int(file1.readline().rstrip())\n q = int(file1.readline().rstrip())\n g = int(file1.readline().rstrip())\n h = int(file1.readline().rstrip())\n\n c1 = int(file2.readline().rstrip())\n c2 = int(file2.readline().rstrip())\n print('c1 = ', c1)\n print('c2 = ', c2)\n\n t1 = sha_hash(file_name)\n print('hash = ', t1)\n inverseC2 = compute_inverse(c2, q)\n t1 = (t1 * inverseC2) % q\n\n t2 = compute_inverse(c2, q)\n t2 = (t2 * c1) % q\n\n valid1 = square_multiply(g, t1, p)\n valid2 = square_multiply(h, t2, p)\n valid = ((valid1 * valid2) % p) % q\n if valid == c1:\n print(\"Valid signature\")\n else:\n print(\"Invalid signature\")",
"def compare_signature(public_key: str, signature: str, content: dict) -> bool:\n\n public_key = import_key(public_key)\n verifier = PKCS1_v1_5.new(public_key)\n encoded_content = json.dumps(content, sort_keys=True).encode()\n h = SHA256.new(encoded_content)\n\n return verifier.verify(h, binascii.unhexlify(signature))",
"def verify(cypher, pub_key):\n\n if not isinstance(pub_key, key.PublicKey):\n raise TypeError(\"You must use the public pub_key with verify\")\n\n return gluechops(cypher, pub_key.e, pub_key.n, decrypt_int)",
"def verify(self, digest, sig):\n sig_r = sig[:32]\n sig_s = sig[32:]\n signature = utils.encode_dss_signature(_convert_binary_to_bigint(sig_r), _convert_binary_to_bigint(sig_s))\n try:\n self.public_key_obj.verify(signature, digest, ec.ECDSA(utils.Prehashed(hashes.SHA256())))\n except cryptography.exceptions.InvalidSignature:\n return False\n return True",
"def verify(self, data, signature_b64):\n from base64 import urlsafe_b64decode\n\n if self.sign_public == \"\":\n raise ValueError(\"Error verifying: No public signing key found for {}\".format(self))\n\n signature = urlsafe_b64decode(signature_b64)\n key_public = RsaPublicKey.Read(self.sign_public)\n return key_public.Verify(data, signature)",
"def verify(self, data, signature_b64):\n\n signature = b64decode(signature_b64)\n key_public = RsaPublicKey.Read(self.sign_public)\n return key_public.Verify(data, signature)",
"def verify_signature(self, key, data):\n verify_signature(self, key, data)",
"def validate(msg, pubkey: dict, signature):\n if signature is None:\n print(\"Signature is None. probably cause something other than a string or byte being passed to signer\")\n return False\n try:\n x_int = base64.b85decode(pubkey[\"x\"].encode())\n x_int = int.from_bytes(x_int, \"big\")\n\n y_int = base64.b85decode(pubkey[\"y\"].encode())\n y_int = int.from_bytes(y_int, \"big\")\n except KeyError:\n return False\n\n signature = signature.encode()\n signature = base64.b85decode(signature)\n\n # if it a string\n try:\n hash_of_message = SHA256.new(msg)\n except TypeError:\n hash_of_message = SHA256.new(msg.encode())\n\n try:\n pubkey = ECC.construct(point_x=x_int, point_y=y_int, curve=\"P-256\").public_key()\n verifier = DSS.new(pubkey, mode=\"fips-186-3\")\n verifier.verify(hash_of_message, signature=signature)\n except ValueError:\n return False\n else:\n return True",
"def verifySig(pub, inHash, r, s):\n # See [NSA] 3.4.2\n N = Curve.N\n\n if r <= 0 or s <= 0:\n return False\n\n if r >= N or s >= N:\n return False\n\n e = hashToInt(inHash)\n\n w = crypto.modInv(s, N)\n\n u1 = (e * w) % N\n u2 = (r * w) % N\n\n x1, y1 = Curve.scalarBaseMult(u1)\n x2, y2 = Curve.scalarMult(pub.x, pub.y, u2)\n x, y = Curve.add(x1, y1, x2, y2)\n\n if x == 0 and y == 0:\n return False\n x = x % N\n return x == r",
"def verify_cert(public_key, cert):\n try:\n public_key.verify(\n signature=cert.signature,\n data=cert.tbs_certificate_bytes,\n signature_algorithm=ec.ECDSA(cert.signature_hash_algorithm)\n )\n except:\n return 'failure'\n\n return 'success'",
"def verify_hash(self, public_key, hash_id, digest, signature):\n d_digest = Data(digest)\n d_signature = Data(signature)\n result = self._lib_vscf_ecc.vscf_ecc_verify_hash(self.ctx, public_key.c_impl, hash_id, d_digest.data, d_signature.data)\n return result",
"def verify_signature(self, payload, signature, timestamp, public_key=None):\n timestamped_payload = timestamp + payload\n decoded_signature = Signature.fromBase64(signature)\n\n key = public_key or self.public_key\n return Ecdsa.verify(timestamped_payload, decoded_signature, key)",
"def is_signature_valid(self, data, sig):\n if self.verified == False:\n return False\n\n key = self.publickey_set.filter(\n fingerprint=PublicKey.verify(data, sig).fingerprint,\n ).first()\n return key",
"def verify(key, file, sign):\n\n try:\n key = TomlKeyFormatter().from_string(key.read())\n signature = TomlSignatureFormatter().from_string(sign.read())\n\n if signature.verify(SignableBinaryIO(file), key):\n click.echo(\"---verified---\")\n exit(0)\n else:\n click.echo(\"---denied---\")\n exit(1)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except SignatureFormatError:\n click.echo(\"ERROR: Signature is in bad format\")",
"def validate_handshake_public_key(cls, public_key: bytes) -> None:\n ..."
] | [
"0.6605417",
"0.640331",
"0.6185872",
"0.6178675",
"0.608803",
"0.59822845",
"0.5972749",
"0.5925845",
"0.5922927",
"0.58927023",
"0.58635825",
"0.586296",
"0.58162904",
"0.578368",
"0.5780162",
"0.57581234",
"0.5755524",
"0.570118",
"0.56900156",
"0.56470275",
"0.5636996",
"0.5627519",
"0.56192166",
"0.5616444",
"0.5596299",
"0.5580692",
"0.5564095",
"0.5548061",
"0.55038923",
"0.5485874"
] | 0.7127445 | 0 |
Verifies the specified licence by comparing it to the signature computed for the licence using the specified public key. | def VerifyWithFile(publicKey: str, value: ElementTree.Element) -> bool:
lr = LicenceReader()
lr.m_Licence1 = value
return lr.Verify(publicKey) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Verify(self, publicKey: str) -> bool:\r\n if not publicKey:\r\n raise ValueError(str(publicKey))\r\n isValid = False\r\n if self.m_Licence1:\r\n signature = self.m_Licence1.find('Code').text\r\n self.m_Licence1.find('Code').text = ''\r\n try:\r\n isValid = RSAVerify().Verify(self.m_Licence1, signature, publicKey)\r\n self.m_Licence1.find('Code').text = signature\r\n except Exception as ex:\r\n print(\"Exception 22\")\r\n print(ex)\r\n isValid = False\r\n return isValid",
"def _verify(pubkey: SupportedKeyTypes, sig: bytes, filehash: bytes, hashfunc: hashes.HashAlgorithm) -> None:\n if isinstance(pubkey, RSAPublicKey):\n pubkey.verify(sig, filehash, padding.PKCS1v15(), Prehashed(hashfunc))\n elif isinstance(pubkey, EllipticCurvePublicKey):\n pubkey.verify(sig, filehash, ec.ECDSA(Prehashed(hashfunc)))",
"def can_verify(self, public_key):\n result = self._lib_vscf_ecc.vscf_ecc_can_verify(self.ctx, public_key.c_impl)\n return result",
"def verify(self, signature, body, external_aad, public_key):",
"def verify_signatures(params, signed_fields_key='signedFields', full_sig_key='signedDataPublicSignature'):\r\n signed_fields = params.get(signed_fields_key, '').split(',')\r\n data = u\",\".join([u\"{0}={1}\".format(k, params.get(k, '')) for k in signed_fields])\r\n signed_fields_sig = processor_hash(params.get(signed_fields_key, ''))\r\n data += u\",signedFieldsPublicSignature=\" + signed_fields_sig\r\n returned_sig = params.get(full_sig_key, '')\r\n if processor_hash(data) != returned_sig:\r\n raise CCProcessorSignatureException()",
"def verify(self):\n if not self.public_key:\n self.fetch_public_key()\n data = self.doc.find(\".//{http://salmon-protocol.org/ns/magic-env}data\").text\n sig = self.doc.find(\".//{http://salmon-protocol.org/ns/magic-env}sig\").text\n sig_contents = '.'.join([\n data,\n b64encode(b\"application/xml\").decode(\"ascii\"),\n b64encode(b\"base64url\").decode(\"ascii\"),\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n ])\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(RSA.importKey(self.public_key))\n if not cipher.verify(sig_hash, urlsafe_b64decode(sig)):\n raise SignatureVerificationError(\"Signature cannot be verified using the given public key\")",
"def rsa_verify(cypher, publickey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions. \r\n temp_key_obj = _rsa_keydict_to_keyobj(publickey)\r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.verify)",
"def validate_license(key: str) -> bool:\r\n return bool(\r\n re.match(r'^PB-[A-Z0-9]{8}(?:-[A-Z0-9]{8}){3}$', key)\r\n )",
"def call_backend_verify(self, pubkey: PublicKey, signature: Signature, msg_hash: bytes):\n is_valid_sig = signature.verify_msg_hash(msg_hash, pubkey)\n sig_pubkey = signature.recover_public_key_from_msg_hash(msg_hash)\n\n return is_valid_sig and (sig_pubkey == pubkey)",
"def verify(public_key, message, signature):\n hasher = SHA256.new(message)\n verifier = PKCS1_v1_5.new(public_key)\n return verifier.verify(hasher, signature)",
"def verify(self, msg, sig, key):\n if not isinstance(key, ec.EllipticCurvePublicKey):\n raise TypeError(\"The public key must be an instance of \" \"ec.EllipticCurvePublicKey\")\n self._cross_check(key)\n\n num_bits = key.curve.key_size\n num_bytes = (num_bits + 7) // 8\n if len(sig) != 2 * num_bytes:\n raise ValueError(\"Invalid signature\")\n\n try:\n # cryptography uses ASN.1-encoded signature data; split JWS\n # signature (r||s) and encode before verification\n (r, s) = self._split_raw_signature(sig)\n asn1sig = encode_dss_signature(r, s)\n key.verify(asn1sig, msg, ec.ECDSA(self.hash_algorithm()))\n except InvalidSignature as err:\n raise BadSignature(err)\n else:\n return True",
"def verify_signature(signature_object, pubkey_info, content):\n if not CRYPTO: # pragma: no cover\n raise exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)\n\n formats.GPG_PUBKEY_SCHEMA.check_match(pubkey_info)\n formats.GPG_SIGNATURE_SCHEMA.check_match(signature_object)\n\n handler = SIGNATURE_HANDLERS[pubkey_info[\"type\"]]\n sig_keyid = signature_object[\"keyid\"]\n\n verification_key = pubkey_info\n\n # If the keyid on the signature matches a subkey of the passed key,\n # we use that subkey for verification instead of the master key.\n if sig_keyid in list(pubkey_info.get(\"subkeys\", {}).keys()):\n verification_key = pubkey_info[\"subkeys\"][sig_keyid]\n\n creation_time = verification_key.get(\"creation_time\")\n validity_period = verification_key.get(\"validity_period\")\n\n if (\n creation_time\n and validity_period\n and creation_time + validity_period < time.time()\n ):\n raise KeyExpirationError(verification_key)\n\n return handler.verify_signature(\n signature_object, verification_key, content, SHA256\n )",
"def verify(self, message, signature):\n symkey = self.gen_symkey(message)\n\n # v is the verification value, X is the ring of signatures\n v, X = signature[0], signature[1:]\n\n # permute an X value to a Y value using the g function\n mapper = lambda i: self.g(X[i], self.public_keys[i].e, self.public_keys[i].n)\n\n # map the array of x -> array of y\n Y = map(mapper, range(len(X)))\n\n # XOR the cumulative hash with the next value, then hash that\n reducer = lambda x, i: self.concat_hash(x ^ Y[i], symkey)\n\n # now do the verification:\n # C(k, v, y[]) = E(k, y[r] ^ E(k, y[r-1] ^ E(... ^ E(k, y[0] ^ v)...)))\n r = reduce(reducer, range(self.n_keys), v)\n return r == v",
"def verifySig(pub, inHash, r, s):\n # See [NSA] 3.4.2\n N = Curve.N\n\n if r <= 0 or s <= 0:\n return False\n\n if r >= N or s >= N:\n return False\n\n e = hashToInt(inHash)\n\n w = crypto.modInv(s, N)\n\n u1 = (e * w) % N\n u2 = (r * w) % N\n\n x1, y1 = Curve.scalarBaseMult(u1)\n x2, y2 = Curve.scalarMult(pub.x, pub.y, u2)\n x, y = Curve.add(x1, y1, x2, y2)\n\n if x == 0 and y == 0:\n return False\n x = x % N\n return x == r",
"def verify(signature: Signature, pub_key: rsa.RSAPublicKey, msg: bytes) -> bool:\n try:\n pub_key.verify(signature, msg, PADDING, HASH)\n except:\n return False\n return True",
"def compare_signature(public_key: str, signature: str, content: dict) -> bool:\n\n public_key = import_key(public_key)\n verifier = PKCS1_v1_5.new(public_key)\n encoded_content = json.dumps(content, sort_keys=True).encode()\n h = SHA256.new(encoded_content)\n\n return verifier.verify(h, binascii.unhexlify(signature))",
"def validate(msg, pubkey: dict, signature):\n if signature is None:\n print(\"Signature is None. probably cause something other than a string or byte being passed to signer\")\n return False\n try:\n x_int = base64.b85decode(pubkey[\"x\"].encode())\n x_int = int.from_bytes(x_int, \"big\")\n\n y_int = base64.b85decode(pubkey[\"y\"].encode())\n y_int = int.from_bytes(y_int, \"big\")\n except KeyError:\n return False\n\n signature = signature.encode()\n signature = base64.b85decode(signature)\n\n # if it a string\n try:\n hash_of_message = SHA256.new(msg)\n except TypeError:\n hash_of_message = SHA256.new(msg.encode())\n\n try:\n pubkey = ECC.construct(point_x=x_int, point_y=y_int, curve=\"P-256\").public_key()\n verifier = DSS.new(pubkey, mode=\"fips-186-3\")\n verifier.verify(hash_of_message, signature=signature)\n except ValueError:\n return False\n else:\n return True",
"def verify(self, digest, sig):\n sig_r = sig[:32]\n sig_s = sig[32:]\n signature = utils.encode_dss_signature(_convert_binary_to_bigint(sig_r), _convert_binary_to_bigint(sig_s))\n try:\n self.public_key_obj.verify(signature, digest, ec.ECDSA(utils.Prehashed(hashes.SHA256())))\n except cryptography.exceptions.InvalidSignature:\n return False\n return True",
"def _cross_check(self, pub_key):\n if self.curve_name != pub_key.curve.name:\n raise ValueError(\n \"The curve in private key {} and in algorithm {} don't \"\n \"match\".format(pub_key.curve.name, self.curve_name)\n )",
"def verification(file_name: str) -> None:\n print(\"Verification process...\")\n file_name = os.path.join('data', file_name)\n\n file1 = open(\"data/key.txt\", \"r\")\n file2 = open(\"data/signature.txt\", \"r\")\n p = int(file1.readline().rstrip())\n q = int(file1.readline().rstrip())\n g = int(file1.readline().rstrip())\n h = int(file1.readline().rstrip())\n\n c1 = int(file2.readline().rstrip())\n c2 = int(file2.readline().rstrip())\n print('c1 = ', c1)\n print('c2 = ', c2)\n\n t1 = sha_hash(file_name)\n print('hash = ', t1)\n inverseC2 = compute_inverse(c2, q)\n t1 = (t1 * inverseC2) % q\n\n t2 = compute_inverse(c2, q)\n t2 = (t2 * c1) % q\n\n valid1 = square_multiply(g, t1, p)\n valid2 = square_multiply(h, t2, p)\n valid = ((valid1 * valid2) % p) % q\n if valid == c1:\n print(\"Valid signature\")\n else:\n print(\"Invalid signature\")",
"def verify(cypher, pub_key):\n\n if not isinstance(pub_key, key.PublicKey):\n raise TypeError(\"You must use the public pub_key with verify\")\n\n return gluechops(cypher, pub_key.e, pub_key.n, decrypt_int)",
"def verify_signature(self, payload, signature, timestamp, public_key=None):\n timestamped_payload = timestamp + payload\n decoded_signature = Signature.fromBase64(signature)\n\n key = public_key or self.public_key\n return Ecdsa.verify(timestamped_payload, decoded_signature, key)",
"def verify_hash(self, public_key, hash_id, digest, signature):\n d_digest = Data(digest)\n d_signature = Data(signature)\n result = self._lib_vscf_ecc.vscf_ecc_verify_hash(self.ctx, public_key.c_impl, hash_id, d_digest.data, d_signature.data)\n return result",
"def verify(self, data, signature_b64):\n\n signature = b64decode(signature_b64)\n key_public = RsaPublicKey.Read(self.sign_public)\n return key_public.Verify(data, signature)",
"def verify(self, data, signature_b64):\n from base64 import urlsafe_b64decode\n\n if self.sign_public == \"\":\n raise ValueError(\"Error verifying: No public signing key found for {}\".format(self))\n\n signature = urlsafe_b64decode(signature_b64)\n key_public = RsaPublicKey.Read(self.sign_public)\n return key_public.Verify(data, signature)",
"def is_signature_valid(self, data, sig):\n if self.verified == False:\n return False\n\n key = self.publickey_set.filter(\n fingerprint=PublicKey.verify(data, sig).fingerprint,\n ).first()\n return key",
"def verify_signature(self, key, data):\n verify_signature(self, key, data)",
"def check_sig(filename):\n pipe = Popen([\"gpg\", \"--verify\", filename], stderr=PIPE)\n pipe.stderr.read()\n status = pipe.wait()\n if status != 0:\n raise BadSignature('%s is not properly signed' % filename)",
"def verify_cert(public_key, cert):\n try:\n public_key.verify(\n signature=cert.signature,\n data=cert.tbs_certificate_bytes,\n signature_algorithm=ec.ECDSA(cert.signature_hash_algorithm)\n )\n except:\n return 'failure'\n\n return 'success'",
"def _asymmetric_verify(self, signature: bytes, filehash: bytes, filehash_type: str) -> bool:\n\n siglen = len(signature)\n\n # The data are in big endian\n fmt = \">BBBIH\"\n hdrlen = struct.calcsize(fmt)\n if len(signature) < hdrlen:\n logger.warning(\"Signature header is too short\")\n return False\n _, _, hash_algo, keyidv2, sig_size = struct.unpack(fmt, signature[:hdrlen])\n\n siglen -= hdrlen\n\n if siglen != sig_size:\n logger.warning(\"Malformed signature\")\n return False\n\n hashfunc = HASH_FUNCS.get(hash_algo)\n if not hashfunc:\n logger.warning(\"Unsupported hash algo with id '%d'\", hash_algo)\n return False\n\n if filehash_type != hashfunc().name:\n logger.warning(\n \"Mismatching filehash type %s and ima signature hash used %s\", filehash_type, hashfunc().name\n )\n return False\n\n # Try all the keyrings until we find one with a key with the given keyidv2\n pubkey = None\n for keyring in self.get_all_keyrings():\n pubkey = keyring.get_pubkey_by_keyidv2(keyidv2)\n if pubkey:\n break\n\n if not pubkey:\n logger.warning(\"No key with id 0x%08x available\", keyidv2)\n return False\n\n try:\n ImaKeyrings._verify(pubkey, signature[hdrlen:], filehash, hashfunc())\n except InvalidSignature:\n return False\n return True"
] | [
"0.699972",
"0.60788673",
"0.60085154",
"0.5997743",
"0.5859552",
"0.5776492",
"0.5746808",
"0.5743002",
"0.57144624",
"0.5711558",
"0.56908786",
"0.5650581",
"0.56451046",
"0.56195503",
"0.5615559",
"0.5570306",
"0.5556296",
"0.5545048",
"0.5536732",
"0.5535945",
"0.55258125",
"0.54812115",
"0.5453127",
"0.543865",
"0.54345393",
"0.5388969",
"0.53850716",
"0.53534317",
"0.5332369",
"0.5331245"
] | 0.64155024 | 1 |
Parse a string with a size into a number of bytes. I.e. parses "10m", "10MB", "10 M" and other variations into the number of bytes in ten megabytes. Floatingpoint numbers are rounded to the nearest byte. | def _size_to_bytes(size):
units = 'KMGTPEZY' # note that position of letter is same as power - 1
match = re.search(r'^\s*([-+]?\s*[0-9]*\.?[0-9]*)\s*([' + units + r']?\s*B?\s*S?)\s*', size, re.IGNORECASE)
if match is None or match.group(1) == '':
raise ValueError("size string not in proper format 'number [kmgtpezy]': " + size)
mem_size = float(re.sub(r'\s*', '', match.group(1)))
unit = re.sub(r'\s*', '', match.group(2)).upper()
unit = re.sub(r'B?S?$', '', unit) # remove trailing units symbol
if unit == '':
unit_pow = 0
else:
unit_pow = units.find(unit) + 1
byte_size = int(round(mem_size * (1024 ** unit_pow)))
return byte_size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_bytes_size(size_str):\n\n m = BYTES_REGEX.fullmatch(size_str.lower())\n if m:\n number = int(m.group(1))\n\n if m.group(2) is not None:\n unit = m.group(2)\n conversion = SIZE_UNITS.get(unit)\n if conversion:\n return conversion * number\n return number\n else:\n raise ValueError(\"Invalid size string: {}\".format(size_str))",
"def bytes_from_str( size_str ):\n unit_conversions = { char: 1024**power for ( power, char ) in enumerate( [ \"B\", \"K\", \"M\", \"G\", \"T\" ] ) }\n try:\n coeff = unit_conversions[ size_str.upper()[-1] ]\n size_str = size_str[:-1]\n except KeyError:\n coeff = 1\n try:\n size = float( size_str )\n except ValueError:\n print( \"Invalid size string: {}\".format( size_str ) )\n exit( -1 )\n return coeff * size",
"def parse_size(size_str):\n try:\n return int(size_str)\n except ValueError, e:\n pass\n\n try:\n num = int(size_str[:-1])\n except ValueError, e:\n raise VMBuilderUserError(\"Invalid size: %s\" % size_str)\n\n if size_str[-1:] == 'g' or size_str[-1:] == 'G':\n return num * 1024\n if size_str[-1:] == 'm' or size_str[-1:] == 'M':\n return num\n if size_str[-1:] == 'k' or size_str[-1:] == 'K':\n return num / 1024",
"def size_human2byte(s_str):#{{{\n s_byte = None\n if s_str.isdigit():\n s_byte = int(s_str)\n else:\n s_str = s_str.upper()\n match = re.match(r\"([0-9]+)([A-Z]+)\", s_str, re.I)\n if match:\n items = match.groups()\n size = int(items[0])\n if items[1] in [\"B\"]:\n s_byte = size\n elif items[1] in [\"K\", \"KB\"]:\n s_byte = size*1024\n elif items[1] in [\"M\", \"MB\"]:\n s_byte = size*1024*1024\n elif items[1] in [\"G\", \"GB\"]:\n s_byte = size*1024*1024*1024\n else:\n print(\"Bad maxsize argument:\", s_str, file=sys.stderr)\n return -1\n else:\n print(\"Bad maxsize argument:\", s_str, file=sys.stderr)\n return -1\n return s_byte",
"def anySizeToBytes(size_string):\n # separate integer from unit\n try:\n size, unit = size_string.split()\n except Exception:\n try:\n size = size_string.strip()\n unit = ''.join([c for c in size if c.isalpha()])\n if len(unit) > 0:\n size = size[:-len(unit)]\n except Exception:\n return -1\n if len(size) == 0:\n return -1\n size = float(size)\n if len(unit) == 0:\n return int(size)\n short_unit = unit.upper()[0]\n\n # convert\n units_dict = {'T': 40, 'G': 30, 'M': 20, 'K': 10}\n if short_unit in units_dict:\n size = size * 2**units_dict[short_unit]\n return int(size)",
"def parse_size(size,b=1024,u='B',pre=['']+[p for p in'KMGTPEZY']):\n intsize, unit = extract_num_unit(size)\n\n # Account for 10B vs 10KB when looking for base\n if len(unit) == len(u):\n base = unit\n else:\n base = unit[1:]\n\n # Check if we know this unit's base, otherwise use default\n if base in unit_base:\n b = unit_base[base]\n pow = { k+base:v for v, k in enumerate(pre) }\n\n return float(intsize)*(b**pow[unit])",
"def convertFromBytes(size, unit):\n\tif (unit == 'kb'):\n\t\treturn size / 10000\n\telif (unit == 'mb'):\n\t\treturn size / 1000000\n\telif (size == 'gb'):\n\t\treturn size / 1000000000",
"def parse_size(text, unit):\n\n text = text.strip()\n text = text.upper()\n unit = unit.upper()\n\n # First, handle the suffixes\n if text.endswith('B'):\n text = text[:-1]\n if text.endswith('I'):\n text = text[:-1]\n\n if not text:\n return ValueError('Empty size')\n\n if text[-1] in _SIZE_FACTORS:\n factor = _SIZE_FACTORS[text[-1]]\n text = text[:-1]\n else:\n factor = _SIZE_FACTORS[unit]\n\n try:\n value = float(text) * factor\n except ValueError:\n raise ValueError(\n 'Cannot parse \"{}\" as {}iB value.'.format(text, unit)\n )\n\n if value % _SIZE_FACTORS[unit]:\n raise ValueError('Value must be multiple of 1 {}iB'.format(unit))\n return int(value / _SIZE_FACTORS[unit])",
"def parse_size(text, unit):\n\n text = text.strip()\n text = text.upper()\n unit = unit.upper()\n\n # First, handle the suffixes\n if text.endswith('B'):\n text = text[:-1]\n if text.endswith('I'):\n text = text[:-1]\n\n if not text:\n return ValueError('Empty size')\n\n if text[-1] in _SIZE_FACTORS:\n factor = _SIZE_FACTORS[text[-1]]\n text = text[:-1]\n else:\n factor = _SIZE_FACTORS[unit]\n\n try:\n value = float(text) * factor\n except ValueError:\n raise ValueError(\n 'Cannot parse \"{}\" as {}iB value.'.format(text, unit)\n )\n\n if value % _SIZE_FACTORS[unit]:\n raise ValueError('Value must be multiple of 1 {}iB'.format(unit))\n return int(value / _SIZE_FACTORS[unit])",
"def human_to_bytes(size):\n symbols = ('B', 'K', 'M', 'G', 'T', 'P')\n unit = size[-1:].strip().upper()\n if unit == \"B\":\n # Strip off trailing 'b' and see if we've got another unit\n size = size[:-1]\n unit = size[-1:].strip().upper()\n if unit in symbols:\n num = size[:-1]\n else:\n unit = \"B\"\n num = size\n else:\n # Assume size in bytes if no units specified?\n unit = \"B\"\n num = size\n assert num.isdigit() and unit in symbols\n num = float(num)\n prefix = {symbols[0]:1}\n for i, size in enumerate(symbols[1:]):\n prefix[size] = 1 << (i+1)*10\n return int(num * prefix[unit])",
"def get_size(size):\n if size.isdigit():\n return int(size)\n\n def do_get_size(num, unit):\n u = units[unit]\n if num.find('.') == -1:\n return int(num) * u\n return int(float(num) * u)\n\n s = size.strip().upper()\n if s.find(' ') == -1:\n num, unit = re.sub(r\"([\\d.]+)\", r\"\\1 \", s).split()\n else:\n num, unit = s.split()\n\n try:\n return do_get_size(num, unit)\n except KeyError:\n\traise Exception('unknown size unit[%s]' % size)",
"def memstr_to_kbytes(text):\r\n kilo = 1024\r\n units = dict(K=1, M=kilo, G=kilo ** 2)\r\n try:\r\n size = int(units[text[-1]] * float(text[:-1]))\r\n except (KeyError, ValueError):\r\n raise ValueError(\r\n \"Invalid literal for size give: %s (type %s) should be \"\r\n \"alike '10G', '500M', '50K'.\" % (text, type(text))\r\n )\r\n return size",
"def convert_file_size_string(value):\n # list of file format sizes\n file_format_sizes = (\"kB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\n # dictionary mapping to multiplier\n file_format_scale = {\"B\" : 1,\n \"kB\" : 1e3,\n \"MB\" : 1e6,\n \"GB\" : 1e9,\n \"TB\" : 1e12,\n \"EB\" : 1e15,\n \"ZB\" : 1e18,\n \"YB\" : 1e21}\n if isinstance(value, str):\n if value.endswith(file_format_sizes):\n suffix = value[-2:]\n size = int(value[:-2])\n elif value[-1] == \"B\":\n suffix = \"B\"\n size = int(value[:-1])\n else:\n suffix = \"B\"\n size = int(value)\n # multiply by scalar\n size *= file_format_scale[suffix]\n return size\n else:\n return value",
"def to_bytes(size):\n size2bytes = {\n \"b\":1, \"bytes\":1, \"byte\":1,\n \"k\":1024, \"kib\":1024, \"kb\":1000,\n \"m\": 1024**2, \"mib\": 1024**2, \"mb\": 1000**2,\n \"g\": 1024**3, \"gib\": 1024**3, \"gb\": 1000**3,\n \"t\": 1024**4, \"tib\": 1024**4, \"tb\": 1000**4,\n \"p\": 1024**5, \"pib\": 1024**5, \"pb\": 1000**5,\n \"e\": 1024**6, \"eib\": 1024**6, \"eb\": 1000**6,\n \"z\": 1024**7, \"zib\": 1024**7, \"zb\": 1000**7,\n \"y\": 1024**8, \"yib\": 1024**8, \"yb\": 1000**8\n }\n \n size = size.replace(' ','')\n match = re.search('(?P<size>[0-9.]+)(?P<units>[a-zA-Z]+)$', size)\n \n if match:\n human_units = match.group('units').lower()\n human_units = human_units.lstrip().rstrip()\n scaling_factor = size2bytes[human_units]\n bytes = int(math.ceil(scaling_factor * float(match.group('size'))))\n else:\n # Cannot parse units,\n # cannot convert value\n # into bytes\n return None\n \n return bytes",
"def bytes_to_size(size):\n if not size >> 10 or size < 0:\n return str(size)\n elif not size >> 20:\n return '{:.2f}KB'.format(size / 1024.0)\n elif not size >> 30:\n return '{:.2f}MB'.format(size / (1024.0 ** 2))\n elif not size >> 40:\n return '{:.2f}GB'.format(size / (1024.0 ** 3))\n else:\n return '{:.2f}TB'.format(size / (1024.0 ** 4))",
"def filter_storage_size_num(size_str):\n\n # pattern: '^[1-9][\\d\\.]*[MGT]B?$', multiplier=1000 (not KiB)\n if size_str.endswith('B'):\n size_str = size_str[:-1]\n try:\n size_num = 1000000\n for multiplier in ['M', 'G', 'T']:\n if size_str.endswith(multiplier):\n return '{:.2f}'.format(size_num * float(size_str[:-1]))\n size_num = size_num * 1000\n return '{:.2f}'.format(float(size_str))\n except ValueError as ex:\n logging.error(size_str + \" is not a valid size string\")\n raise",
"def size_in_mb(size_in_bytes):\n if size_in_bytes < 10**6:\n return size_in_bytes // 1000\n else:\n return size_in_bytes // 10**6",
"def convert_size(size_bytes):\n # Sizes range from B to YiB, \n # warning larger sizes storage\n # may results in blackhole \n size_name = (\n \"B\", \"KiB\", \"MiB\", \n \"GiB\", \"TiB\", \"PiB\", \n \"EiB\", \"ZiB\", \"YiB\"\n )\n if size_bytes == 0: \n return \"0B\"\n i = int(math.floor(math.log(size_bytes, 1024)))\n p = math.pow(1024, i)\n s = round(size_bytes / p, 2)\n return \"{0}{1}\".format(s, size_name[i])",
"def _disk_size_in_gb(_string):\n try:\n value = int(_string)\n except ValueError as e:\n raise argparse.ArgumentTypeError(str(e))\n if value <= 0:\n raise argparse.ArgumentTypeError('Size must be positive value')\n return value",
"def cvtFromKMG(str):\n\n # remember, we already verify sizeset[]\n match = re.match('(\\d+)([kmg]?\\Z)', str, re.I)\n size = int(match.group(1))\n type = match.group(2).lower()\n if type == '':\n objsize = size\n if type == 'k':\n objsize = size * 1024\n elif type == 'm':\n objsize = size * 1024 * 1024\n elif type == 'g':\n objsize = size * 1024 * 1024 * 1024\n return(objsize)",
"def humanbytes(size):\n # https://stackoverflow.com/a/49361727/4723940\n if not size:\n return \"\"\n # 2 ** 10 = 1024\n power = 2**10\n raised_to_pow = 0\n dict_power_n = {0: \"\", 1: \"Ki\", 2: \"Mi\", 3: \"Gi\", 4: \"Ti\"}\n while size > power:\n size /= power\n raised_to_pow += 1\n return str(round(size, 2)) + \" \" + dict_power_n[raised_to_pow] + \"B\"",
"def human_size(size_bytes):\n if size_bytes is 0:\n return \"0B\"\n\n def ln(x):\n n = 99999999\n return n * ((x ** (1/n)) - 1)\n\n def log(x, base):\n result = ln(x)/ln(base)\n return result\n\n exp = int(log(size_bytes, 1024))\n try:\n unit = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")[exp]\n except KeyError:\n unit = \"YB\"\n return \"{} {}\".format(round(size_bytes / (1024 ** exp), 2), unit)",
"def parse_size(size):\n if isinstance(size, int):\n return size\n elif isinstance(size, str):\n if size.isdigit():\n return int(size)\n return None",
"def to_bytes(size, power=1024, sep=' '):\n if not size:\n return 0\n size, suffix = size.split(sep, 1)\n suffix = suffix[0]\n try:\n factor = {\n 'K': power,\n 'M': power**2,\n 'G': power**3,\n 'T': power**4,\n 'P': power**5,\n }[suffix]\n except KeyError:\n factor = 1\n return int(float(size) * factor)",
"def convert_unit(size_in_bytes, unit):\n if unit == 'KB':\n return size_in_bytes/1024\n elif unit == 'MB':\n return size_in_bytes/(1024*1024)\n elif unit == 'GB':\n return size_in_bytes/(1024*1024*1024)\n else:\n return size_in_bytes",
"def get_size(bytes, suffix=\"B\"):\n factor = 1024\n for unit in [\"\", \"K\", \"M\", \"G\", \"T\", \"P\"]:\n if bytes < factor:\n return f\"{bytes:.2f}{unit}{suffix}\"\n bytes /= factor",
"def get_size(bytes, suffix=\"B\"):\n factor = 1024\n for unit in [\"\", \"K\", \"M\", \"G\", \"T\", \"P\"]:\n if bytes < factor:\n return f\"{bytes:.2f}{unit}{suffix}\"\n bytes /= factor",
"def get_filesize(string):\r\n string = get_sra_xml('SRR3403834')\r\n pattern = re.compile(r'size.*?([0-9.-]+)')\r\n size = re.search(pattern,string)\r\n\r\n return float(size.group(1))/(10**9)",
"def BytesToMb(size):\n if not size:\n return None\n\n if size % constants.BYTES_IN_ONE_MB != 0:\n raise calliope_exceptions.ToolException(\n 'Disk size must be a multiple of 1 MB. Did you mean [{0}MB]?'\n .format(size / constants.BYTES_IN_ONE_MB + 1))\n\n return size / constants.BYTES_IN_ONE_MB",
"def approximate_size(size, a_kilobyte_is_1024_bytes=True):\n if size < 0:\n raise ValueError('number must be non-negative')\n\n multiple = 1024 if a_kilobyte_is_1024_bytes else 1000\n for suffix in SUFFIXES[multiple]:\n if size < multiple:\n return '{0:.1f} {1}'.format(size, suffix)\n size = 1.0 * size / multiple\n\n raise ValueError('number too large')"
] | [
"0.80750585",
"0.7943071",
"0.7917191",
"0.7679579",
"0.74983126",
"0.74863803",
"0.7467127",
"0.74435073",
"0.74435073",
"0.7316791",
"0.72532624",
"0.72482127",
"0.7226874",
"0.7199668",
"0.7170066",
"0.7089149",
"0.7017551",
"0.6896638",
"0.684901",
"0.6813656",
"0.68017435",
"0.6776219",
"0.6766753",
"0.6763738",
"0.6728273",
"0.6719246",
"0.6719246",
"0.67184293",
"0.6703922",
"0.6696732"
] | 0.8139478 | 0 |
Resample audio files in ${task_dir}/48000/ to ${task_dir}/${target_sr}/ | def resample_hear_corpus(task_dir: str, target_sr: int = 16000, num_workers: int = 6):
task_dir: Path = Path(task_dir)
target_audio_dir: Path = task_dir / f"{target_sr}"
if target_audio_dir.is_dir():
logger.info(f"{target_audio_dir} already exist. Do not need to resample")
return
default_audio_dir = task_dir / "48000"
assert default_audio_dir.exists(), f"{default_audio_dir} not found"
split_names = os.listdir(default_audio_dir)
for split_name in sorted(split_names):
split_dir = default_audio_dir / split_name
wav_paths = find_files(split_dir)
tgt_dir = target_audio_dir / split_name
tgt_dir.mkdir(exist_ok=True, parents=True)
def resample(wav_path: str):
wav, sr = torchaudio.load(wav_path)
if sr != target_sr:
resampler = torchaudio.transforms.Resample(sr, target_sr)
wav = resampler(wav)
torchaudio.save(
str(tgt_dir / Path(wav_path).name), wav, sample_rate=target_sr
)
logger.info(f"Resampling {split_dir} to {tgt_dir}:")
Parallel(n_jobs=num_workers)(
delayed(resample)(path) for path in tqdm(wav_paths)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def audio_resample(self, data):\n\n data = np.asarray(data)\n if data.ndim <= 1:\n logging.log_first_n(logging.INFO,\n 'Converting %s sound from shape %s to 2-D' %\n (self._name, data.shape), 5)\n data = np.reshape(data, (-1, 1))\n if data.shape[1] > data.shape[0]:\n logging.log_first_n(logging.INFO,\n 'Transposing %s sound from shape %s' %\n (self._name, data.shape), 5)\n data = np.transpose(data)\n\n # Get half window size in seconds.\n half_window_size = 0.5 * self._window / self._fs_out\n\n # Concatenate and update buffer.\n if self._buff is not None:\n data = np.concatenate((self._buff, data), axis=0)\n tau = self._buff.shape[0]\n else:\n tau = 0\n self._buff = data[-int(self._fs_in * half_window_size):, :]\n\n # Get i/o data dimensions.\n frames_in = data.shape[0]\n frames_out = int(round((frames_in - tau) / self._fs_in * self._fs_out))\n\n # Resample data via moving average.\n data_out = np.zeros((frames_out, data.shape[1]))\n if self._fs_out < self._fs_in or self._window > 1:\n for i in range(frames_out):\n t = float(i) / self._fs_out # center of window in seconds\n t1 = int(max(0, round(self._fs_in * (t - half_window_size)) + tau))\n t2 = int(min(frames_in,\n round(self._fs_in * (t + half_window_size)) + tau))\n data_out[i, :] = np.mean(data[t1:t2, :], axis=0)\n\n else:\n\n data_out = data\n\n return data_out",
"def transcribe(self, paths2audio_files: List[str], batch_size: int = 4) -> List[str]:\n pass",
"def resample(data, resample_rate=16000):\n for sample in data:\n assert \"sample_rate\" in sample\n assert \"wav\" in sample\n sample_rate = sample[\"sample_rate\"]\n waveform = sample[\"wav\"]\n if sample_rate != resample_rate:\n sample[\"sample_rate\"] = resample_rate\n sample[\"wav\"] = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=resample_rate)(waveform)\n yield sample",
"def load_audio(path, target_fs=None):\n y, fs = sf.read(path)\n if y.ndim>1:\n y = np.mean(y, axis=1)\n if target_fs is not None and fs!=target_fs:\n #print('Resampling %d->%d...' %(fs, target_fs))\n y = librosa.resample(y, orig_sr=fs, target_sr=target_fs)\n fs = target_fs\n return y, fs",
"def gen_random_samples():\n if os.path.exists('Song_Samples'):\n pass\n else:\n os.mkdir('Song_Samples')\n for filename in os.listdir(\"Songs\"):\n rate, data = wavfile.read(os.path.join(\"Songs\", filename))\n song_duration = len(data) // rate\n start_point = randint(0, song_duration - SAMPLE_DURATION)\n end_point = start_point + SAMPLE_DURATION\n subprocess.call(['ffmpeg', '-i', os.path.join(\"Songs\", filename),\n '-ss', str(datetime.timedelta(seconds=start_point)), '-to',\n str(datetime.timedelta(seconds=end_point)), '-y', os.path.join(\"Song_Samples\", filename)])",
"def data_resampler (self, data):\r\n data = librosa.resample(data, orig_sr = original_sampling, target_sr = self.target_sampling)\r\n \r\n return data",
"def resample(session_dir, normalized_dir, seed):\n\n host_name = socket.gethostname()\n\n # subject folder\n all_sub_folders = [ thisfile for thisfile in os.listdir(session_dir)]\n all_sub_folders.sort()\n proc_set = set() \n\n\n for sub_name in all_sub_folders:\n proc_set.add(subprocess.Popen(['/home/sci/weiliu/projects/group_mrf_lemon/build_' + host_name + '/fmriresample', '-i', os.path.join(session_dir, sub_name, 'func/lfo_res2standard.nii.gz'), '-o', os.path.join(normalized_dir, sub_name + '.nii.gz'), '-k', '197', '--seed', str(seed) ]) )\n\n # wait until all fmriresample finishes\n for p in proc_set:\n if p.poll() is None:\n p.wait()\n\n\n # Normalizatoin to sphere.\n all_subjects_files = os.listdir(normalized_dir)\n proc_set.clear()\n for sub_file in all_subjects_files:\n proc_set.add(subprocess.Popen(['/home/sci/weiliu/projects/group_mrf/build_' +host_name + '/projectfmri', '-i', os.path.join(normalized_dir, sub_file), '-o', os.path.join(normalized_dir, sub_file)]) )\n\n # wait until all fmriresample finishes\n for p in proc_set:\n if p.poll() is None:\n p.wait()",
"def sirsam_resample(data_sirsam):\n return os.path.join(data_sirsam, 'resampling')",
"def resample_signal(s, sample_rate, desired_sample_rate):\n\n duration = float(len(s)) / sample_rate\n t = np.arange(len(s)) * (1.0 / sample_rate)\n desired_n = int(duration*desired_sample_rate)\n rs,t_rs = resample(s, desired_n, t=t)\n return t_rs,rs",
"def process_audio_multiprocess(file_paths_arr,\n filt_type, filt_cutoff_freq, filt_order,\n trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength,\n SAMPLE_RATE=48000, MIN_SAMPLE_RATE=15999, BIT_DEPTH=2,\n ignore_dirs=[\"Noise samples\",\"_Noisy_\",\"_Very Noisy_\"], skip_existing=False,\n in_ext_=None, out_ext=\".wav\", use_tqdm=True, dump_sample_rates=True\n ):\n import soundfile as sf\n import scipy\n from scipy import signal\n \n if dump_sample_rates:\n sample_rates = {} # array of dicts. e.g: [{path 0: sample_rate 0}, {path 1: sample_rate 1}, {path 2: sample_rate 2}, ...]\n \n skip = 0\n prev_sr = 0\n iterator = tqdm(file_paths_arr, smoothing=0.0) if use_tqdm else file_paths_arr\n for file_path in iterator: # recursive directory search\n in_ext = in_ext_ if (in_ext_ is not None) else os.path.splitext(os.path.split(file_path)[-1])[-1] # get ext from file_path or use override.\n out_path = file_path.replace(in_ext,out_ext)\n if skip_existing and os.path.exists(out_path):\n continue\n if any([filter_dir in file_path for filter_dir in ignore_dirs]):\n continue\n \n # VCTK cleanup\n #if file_path.endswith(f\"_mic1{in_ext}\"):\n # os.rename(file_path, file_path.replace(f\"_mic1{in_ext}\",in_ext))\n #if file_path.endswith(f\"_mic2{in_ext}\"):\n # continue\n try:\n native_sound, native_SR = sf.read(file_path, always_2d=True)\n except RuntimeError as ex:\n print(f'\"{os.path.split(file_path)[-1]}\" failed to load and has been deleted.\\nDELETED PATH: \"{file_path}\"')\n os.unlink(file_path)\n #raise RuntimeError(ex)\n native_sound = native_sound[:,0]# take first channel (either mono or left audio channel)\n native_sound = np.asfortranarray(native_sound).astype('float64') # and ensure the audio is contiguous\n \n if native_SR < MIN_SAMPLE_RATE: # skip any files with native_SR below the minimum\n continue\n if native_SR != SAMPLE_RATE: # ensure all audio is same Sample Rate\n try:\n sound = librosa.core.resample(native_sound, native_SR, SAMPLE_RATE)\n except ValueError as ex:\n print(ex, file_path, native_SR, len(native_sound), sep=\"\\n\")\n raise ValueError(ex)\n else:\n sound = native_sound\n \n if dump_sample_rates:\n sample_rates[os.path.abspath(out_path)] = native_SR\n \n # 24 bit -> 16 bit, 32 bit -> 16 bit\n if max(np.amax(native_sound), -np.amin(native_sound)) > (2**23): # if samples exceed values possible at 24 bit\n sound = (sound / 2**(31-15))#.astype('int16') # change bit depth from 32 bit to 16 bit\n elif max(np.amax(native_sound), -np.amin(native_sound)) > (2**15): # if samples exceed values possible at 16 bit\n sound = (sound / 2**(23-15))#.astype('int16') # change bit depth from 24 bit to 16 bit\n \n # apply audio filters\n for type_, freq_, order_ in zip(filt_type, filt_cutoff_freq, filt_order): # eg[ ['lp'], [40], [10] ] # i.e [type, freq, strength]\n sos = signal.butter(order_, freq_, type_, fs=SAMPLE_RATE, output='sos') # calcuate filter somethings\n sound = signal.sosfilt(sos, sound) # apply filter\n \n # apply audio trimming\n for i, (margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_, preemphasis_strength_) in enumerate(zip(trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength)):\n if preemphasis_strength_:\n sound_filt = librosa.effects.preemphasis(sound, coef=preemphasis_strength_)\n _, index = librosa.effects.trim(sound_filt, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n else:\n _, index = librosa.effects.trim(sound, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n try:\n sound = sound[int(max(index[0]-margin_left_, 0)):int(index[1]+margin_right_)]\n except TypeError:\n print(f'Slice Left:\\n{max(index[0]-margin_left_, 0)}\\nSlice Right:\\n{index[1]+margin_right_}')\n assert len(sound), f\"Audio trimmed to 0 length by pass {i+1}\\nconfig = {[margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_]}\\nFile_Path = '{file_path}'\"\n \n # write updated audio to file\n if os.path.exists(out_path):\n os.unlink(out_path) # using unlink incase the out_path object is a symlink\n sf.write(out_path, sound, SAMPLE_RATE)\n \n if dump_sample_rates:\n return sample_rates",
"def transform_folder(source_folder,\n output_folder,\n temp_folder,\n rate_limit=6000.0,\n overwrite=True,\n plot=False,\n image_folder=None,\n multiprocess=False,\n encoder='mpg123',\n step=5.0):\n merged_file = os.path.join(output_folder, 'merged_file.json')\n\n os.makedirs(temp_folder, exist_ok=True)\n os.makedirs(output_folder, exist_ok=True)\n if os.path.isfile(merged_file):\n os.remove(merged_file)\n if plot:\n os.makedirs(image_folder, exist_ok=True)\n\n # Check if mp3 is already transformed into wav. Right\n # now, foucluster doesn't have a direct read from mp3\n logger.info('Checking if songs are in WAV format...')\n if source_folder != temp_folder:\n [check_wav(song=song,\n source_folder=source_folder,\n temp_folder=temp_folder,\n encoder=encoder)\n for song in os.listdir(source_folder)]\n\n if multiprocess is True:\n logger.debug('Fourier is applied in multiprocess')\n songs = [(song, temp_folder, output_folder, rate_limit,\n overwrite, plot, image_folder, step)\n for song in os.listdir(source_folder)]\n\n # with mp.Pool(processes=max(int(mp.cpu_count() / 2.0), 1)) as p:\n with mp.Pool(processes=mp.cpu_count(), maxtasksperchild=1) as p:\n p.starmap(time_to_frequency, songs)\n else:\n logger.debug('Fourier is applied in single core')\n [time_to_frequency(song=song,\n temp_folder=temp_folder,\n output_folder=output_folder,\n rate_limit=rate_limit,\n overwrite=overwrite,\n plot=plot,\n image_folder=image_folder,\n step=step)\n for song in os.listdir(source_folder)]\n\n # read_files = glob.glob(os.path.join(output_folder, '*.json'))\n # with open(merged_file, 'w') as outfile:\n # file_contents = [open(f).read() for f in read_files]\n # outfile.write('[{}]'.format(','.join(file_contents)))",
"def save_sample(file_path, sampling_rate, audio):\n audio = (audio.numpy() * 32768).astype(\"int16\")\n write(file_path, sampling_rate, audio)",
"def convert_files_parallel(self) -> None:\n file_paths = []\n for file in os.listdir(self.audios_dir):\n if file.endswith(self.input_format):\n file_paths.append(os.path.join(\n self.audios_dir, file))\n with Pool(cpu_count()) as p:\n p.map(self.convert_file, file_paths)",
"def transform_audio(self, segment: Union[AudioSegment, SpeechSegment]) -> None:\n segment.resample(self._new_sample_rate)",
"def filesample(filename):\n sampling_rate, samples = wavfile.read(filename)\n times = np.arange(len(samples)) / sampling_rate\n return samples, sampling_rate",
"def load_audio(file_path):\n # load the audio file in its original sampling rate\n audio_data, sr = librosa.load(file_path, sr=sampling_rate)\n\n # get the common file name\n file_name = file_path.split(\"/\")[-1]\n file_name = file_name.split(\".wav\")[0]\n\n # calculate number of samples in the time duration needed\n num_samples = int(sr*time_duration)\n\n # get the cut-off audio signals and save them\n for i in np.arange(num_audio_files):\n audio_cut_data = cut_sample(audio_data, num_samples)\n file_path=dir_to_save + file_name + \"_\" + str(i+1) + \".wav\"\n save_sample(audio_cut_data, file_path, sr)\n print(f\"generating signal {str(i)}, its length {len(audio_cut_data)} by cutting the original signal\")",
"def strech_audio(file_path, stretch_duration=\"00:10:00\"):\n audio = AudioSegment.from_mp3(file_path)\n # Pull thumbnail\n tags = ID3(file_path)\n thumbnail = tags.get(\"APIC:\").data\n\n # Pull any other tags from og audio file\n tags = mediainfo(file_path).get('TAG', {})\n \n # 1. Get the length of audio in seconds\n original_duration = len(audio)\n\n # 2. How many times does stretch_duration\n # overlap original duration\n stretch_duration = timestamp_to_milliseconds(stretch_duration)\n multiplier = int(stretch_duration/original_duration)\n\n # 3. Stretch the audio\n stretched_audio = audio*multiplier\n \n stretched_audio.export(\n file_path,\n format=\"mp3\",\n tags=tags\n )\n\n audiofile = eyed3.load(file_path)\n audiofile.tag.images.set(3, thumbnail, 'image/jpeg')\n audiofile.tag.save()",
"def load_randomly_augmented_audio(path, sample_rate=16000, tempo_range=(0.85, 1.15),\n gain_range=(-6, 8)):\n low_tempo, high_tempo = tempo_range\n tempo_value = np.random.uniform(low=low_tempo, high=high_tempo)\n low_gain, high_gain = gain_range\n gain_value = np.random.uniform(low=low_gain, high=high_gain)\n audio = augment_audio_with_sox(path=path, sample_rate=sample_rate,\n tempo=tempo_value, gain=gain_value)\n return audio",
"def convert_wav(src_wav, dst_wav, subtype='PCM_16'):\n assert os.path.exists(src_wav), \"{} not exists!\".format(src_wav)\n data, sr = soundfile.read(src_wav)\n soundfile.write(dst_wav, data, sr, subtype=subtype)",
"def file_generator(files: list,\n segment_duration: float,\n sampleRate: int,\n db_thr: float or None = None,\n frame_length: int = 512,\n hop_length: int = 128,\n ) -> None:\n\n I = 0\n J = 0\n\n segment = np.zeros((int(segment_duration*sampleRate),))\n\n k = 0\n file_no = 0\n\n while True:\n if I >= len(segment):\n yield segment\n segment = np.zeros((int(segment_duration*sampleRate),))\n I = 0\n\n if k == 0 or J >= len(y):\n J = 0\n y, sr = librosa.core.load(files[file_no], mono=True, sr=sampleRate)\n file_no += 1\n\n if file_no == len(files):\n break\n\n # Normalize\n y = y/y.max()\n\n # Remix non-silent segments\n if db_thr is not None:\n # Figure out intervals of non-silence (NOTE: Is the threshold right? -- 60db quiet)\n intervals = librosa.effects.split(y, frame_length=frame_length, hop_length=hop_length, top_db=db_thr)\n\n # Remix according to those intervals\n y = librosa.effects.remix(y, intervals)\n\n if len(segment[I:]) >= len(y[J:]):\n segment[I:I+len(y[J:])] = y[J:]\n I = I + len(y[J:])\n J = J + len(y[J:])\n else:\n segment[I:] = y[J:J+len(segment[I:])]\n J = J + len(segment[I:])\n I = I + len(segment[I:])\n k += 1",
"def resample(signal, sampleRate, newSampleRate):\r\n\r\n\tnbSamples = int(newSampleRate*signal.size/sampleRate)\r\n\tsignal = sp.signal.resample( signal, nbSamples)\r\n\tsignal = np.int16(signal)\r\n\treturn signal",
"def test_audio_to_target_dataset(self):\n # Data setup\n random_seed = 42\n sample_rate = 16000\n num_examples = 25\n data_num_channels = {\n 'input_signal': 4,\n 'target_signal': 2,\n }\n data_min_duration = 2.0\n data_max_duration = 8.0\n data_key = {\n 'input_signal': 'input_filepath',\n 'target_signal': 'target_filepath',\n }\n\n # Tolerance\n atol = 1e-6\n\n # Generate random signals\n _rng = np.random.default_rng(seed=random_seed)\n\n # Input and target signals have the same duration\n data_duration = np.round(_rng.uniform(low=data_min_duration, high=data_max_duration, size=num_examples), 3)\n data_duration_samples = np.floor(data_duration * sample_rate).astype(int)\n\n data = dict()\n for signal, num_channels in data_num_channels.items():\n data[signal] = []\n for n in range(num_examples):\n if num_channels == 1:\n random_signal = _rng.uniform(low=-0.5, high=0.5, size=(data_duration_samples[n]))\n else:\n random_signal = _rng.uniform(low=-0.5, high=0.5, size=(num_channels, data_duration_samples[n]))\n data[signal].append(random_signal)\n\n with tempfile.TemporaryDirectory() as test_dir:\n\n # Build metadata for manifest\n metadata = []\n\n for n in range(num_examples):\n\n meta = dict()\n\n for signal in data:\n # filenames\n signal_filename = f'{signal}_{n:02d}.wav'\n\n # write audio files\n sf.write(os.path.join(test_dir, signal_filename), data[signal][n].T, sample_rate, 'float')\n\n # update metadata\n meta[data_key[signal]] = signal_filename\n\n meta['duration'] = data_duration[n]\n metadata.append(meta)\n\n # Save manifest\n manifest_filepath = os.path.join(test_dir, 'manifest.json')\n write_manifest(manifest_filepath, metadata)\n\n # Test 1\n # - No constraints on channels or duration\n dataset = AudioToTargetDataset(\n manifest_filepath=manifest_filepath,\n input_key=data_key['input_signal'],\n target_key=data_key['target_signal'],\n sample_rate=sample_rate,\n )\n\n # Also test the corresponding factory\n config = {\n 'manifest_filepath': manifest_filepath,\n 'input_key': data_key['input_signal'],\n 'target_key': data_key['target_signal'],\n 'sample_rate': sample_rate,\n }\n dataset_factory = audio_to_audio_dataset.get_audio_to_target_dataset(config)\n\n # Test number of channels\n for signal in data:\n assert data_num_channels[signal] == dataset.num_channels(\n signal\n ), f'Num channels not correct for signal {signal}'\n assert data_num_channels[signal] == dataset_factory.num_channels(\n signal\n ), f'Num channels not correct for signal {signal}'\n\n # Test returned examples\n for n in range(num_examples):\n item = dataset.__getitem__(n)\n item_factory = dataset_factory.__getitem__(n)\n\n for signal in data:\n item_signal = item[signal].cpu().detach().numpy()\n golden_signal = data[signal][n]\n assert (\n item_signal.shape == golden_signal.shape\n ), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'\n assert np.allclose(\n item_signal, golden_signal, atol=atol\n ), f'Test 1: Failed for example {n}, signal {signal} (random seed {random_seed})'\n\n item_factory_signal = item_factory[signal].cpu().detach().numpy()\n assert np.allclose(\n item_factory_signal, golden_signal, atol=atol\n ), f'Test 1: Failed for factory example {n}, signal {signal} (random seed {random_seed})'\n\n # Test 2\n # - Filtering based on signal duration\n min_duration = 3.5\n max_duration = 7.5\n\n dataset = AudioToTargetDataset(\n manifest_filepath=manifest_filepath,\n input_key=data_key['input_signal'],\n target_key=data_key['target_signal'],\n min_duration=min_duration,\n max_duration=max_duration,\n sample_rate=sample_rate,\n )\n\n filtered_examples = [n for n, val in enumerate(data_duration) if min_duration <= val <= max_duration]\n\n for n in range(len(dataset)):\n item = dataset.__getitem__(n)\n\n for signal in data:\n item_signal = item[signal].cpu().detach().numpy()\n golden_signal = data[signal][filtered_examples[n]]\n assert (\n item_signal.shape == golden_signal.shape\n ), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'\n assert np.allclose(\n item_signal, golden_signal, atol=atol\n ), f'Test 2: Failed for example {n}, signal {signal} (random seed {random_seed})'\n\n # Test 3\n # - Use channel selector\n channel_selector = {\n 'input_signal': [0, 2],\n 'target_signal': 1,\n }\n\n dataset = AudioToTargetDataset(\n manifest_filepath=manifest_filepath,\n input_key=data_key['input_signal'],\n target_key=data_key['target_signal'],\n input_channel_selector=channel_selector['input_signal'],\n target_channel_selector=channel_selector['target_signal'],\n sample_rate=sample_rate,\n )\n\n for n in range(len(dataset)):\n item = dataset.__getitem__(n)\n\n for signal in data:\n cs = channel_selector[signal]\n item_signal = item[signal].cpu().detach().numpy()\n golden_signal = data[signal][n][cs, ...]\n assert (\n item_signal.shape == golden_signal.shape\n ), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'\n assert np.allclose(\n item_signal, golden_signal, atol=atol\n ), f'Test 3: Failed for example {n}, signal {signal} (random seed {random_seed})'\n\n # Test 4\n # - Use fixed duration (random segment selection)\n audio_duration = 4.0\n audio_duration_samples = int(np.floor(audio_duration * sample_rate))\n\n filtered_examples = [n for n, val in enumerate(data_duration) if val >= audio_duration]\n\n for random_offset in [True, False]:\n # Test subsegments with the default fixed offset and a random offset\n\n dataset = AudioToTargetDataset(\n manifest_filepath=manifest_filepath,\n input_key=data_key['input_signal'],\n target_key=data_key['target_signal'],\n sample_rate=sample_rate,\n min_duration=audio_duration,\n audio_duration=audio_duration,\n random_offset=random_offset, # random offset when selecting subsegment\n )\n\n for n in range(len(dataset)):\n item = dataset.__getitem__(n)\n\n golden_start = golden_end = None\n for signal in data:\n item_signal = item[signal].cpu().detach().numpy()\n full_golden_signal = data[signal][filtered_examples[n]]\n\n # Find random segment using correlation on the first channel\n # of the first signal, and then use it fixed for other signals\n if golden_start is None:\n golden_start = get_segment_start(\n signal=full_golden_signal[0, :], segment=item_signal[0, :]\n )\n if not random_offset:\n assert (\n golden_start == 0\n ), f'Expecting the signal to start at 0 when random_offset is False'\n\n golden_end = golden_start + audio_duration_samples\n golden_signal = full_golden_signal[..., golden_start:golden_end]\n\n # Test length is correct\n assert (\n item_signal.shape[-1] == audio_duration_samples\n ), f'Test 4: Signal length ({item_signal.shape[-1]}) not matching the expected length ({audio_duration_samples})'\n\n assert (\n item_signal.shape == golden_signal.shape\n ), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'\n # Test signal values\n assert np.allclose(\n item_signal, golden_signal, atol=atol\n ), f'Test 4: Failed for example {n}, signal {signal} (random seed {random_seed})'\n\n # Test 5:\n # - Test collate_fn\n batch_size = 16\n batch = [dataset.__getitem__(n) for n in range(batch_size)]\n batched = dataset.collate_fn(batch)\n\n for n, signal in enumerate(data.keys()):\n signal_shape = batched[2 * n].shape\n signal_len = batched[2 * n + 1]\n\n assert signal_shape == (\n batch_size,\n data_num_channels[signal],\n audio_duration_samples,\n ), f'Test 5: Unexpected signal {signal} shape {signal_shape}'\n assert len(signal_len) == batch_size, f'Test 5: Unexpected length of signal_len ({len(signal_len)})'\n assert all(signal_len == audio_duration_samples), f'Test 5: Unexpected signal_len {signal_len}'",
"def postprocess(self,\n wav,\n original_fs: int,\n target_fs: int=0,\n volume: float=1.0,\n speed: float=1.0,\n audio_path: str=None):\n\n # transform sample_rate\n if target_fs == 0 or target_fs > original_fs:\n target_fs = original_fs\n wav_tar_fs = wav\n logger.debug(\n \"The sample rate of synthesized audio is the same as model, which is {}Hz\".\n format(original_fs))\n else:\n wav_tar_fs = librosa.resample(\n np.squeeze(wav), original_fs, target_fs)\n logger.debug(\n \"The sample rate of model is {}Hz and the target sample rate is {}Hz. Converting the sample rate of the synthesized audio successfully.\".\n format(original_fs, target_fs))\n # transform volume\n wav_vol = wav_tar_fs * volume\n logger.debug(\"Transform the volume of the audio successfully.\")\n\n # transform speed\n try: # windows not support soxbindings\n wav_speed = change_speed(wav_vol, speed, target_fs)\n logger.debug(\"Transform the speed of the audio successfully.\")\n except ServerBaseException:\n raise ServerBaseException(\n ErrorCode.SERVER_INTERNAL_ERR,\n \"Failed to transform speed. Can not install soxbindings on your system. \\\n You need to set speed value 1.0.\")\n sys.exit(-1)\n except Exception as e:\n logger.error(\"Failed to transform speed.\")\n logger.error(e)\n sys.exit(-1)\n\n # wav to base64\n buf = io.BytesIO()\n sf.write(buf, wav_speed, target_fs, format=\"wav\")\n buf.seek(0)\n\n base64_bytes = base64.b64encode(buf.read())\n wav_base64 = base64_bytes.decode('utf-8')\n logger.debug(\"Audio to string successfully.\")\n\n # save audio\n if audio_path is not None:\n if audio_path.endswith(\".wav\"):\n sf.write(audio_path, wav_speed, target_fs)\n elif audio_path.endswith(\".pcm\"):\n wav_norm = wav_speed * (32767 / max(0.001,\n np.max(np.abs(wav_speed))))\n with open(audio_path, \"wb\") as f:\n f.write(wav_norm.astype(np.int16))\n logger.info(\"Save audio to {} successfully.\".format(audio_path))\n else:\n logger.info(\"There is no need to save audio.\")\n\n return target_fs, wav_base64",
"def sirsam_rs_out(sirsam_resample, sirsam_target_path):\n return os.path.join(sirsam_resample, 'out', \n os.path.splitext(os.path.basename(sirsam_target_path))[0] + '_resampled')",
"def convert_files_sequential(self) -> None:\n for file in os.listdir(self.audios_dir):\n if file.endswith(self.input_format):\n self.convert_file(os.path.join(\n self.audios_dir, file), self.output_format)",
"def apply_fourier_transform(chunked_audio):\n pass",
"def decode_audio(in_file, out_file):\r\n # construct the decoder\r\n autoencoder = keras.models.load_model(\"audio_autoencoder.model\")\r\n in_layer = keras.layers.Input(shape=(13,))\r\n decode = autoencoder.layers[-13](in_layer)\r\n decode = autoencoder.layers[-12](decode)\r\n decode = autoencoder.layers[-11](decode)\r\n decode = autoencoder.layers[-10](decode)\r\n decode = autoencoder.layers[-9](decode)\r\n decode = autoencoder.layers[-8](decode)\r\n decode = autoencoder.layers[-7](decode)\r\n decode = autoencoder.layers[-6](decode)\r\n decode = autoencoder.layers[-5](decode)\r\n decode = autoencoder.layers[-4](decode)\r\n decode = autoencoder.layers[-3](decode)\r\n decode = autoencoder.layers[-2](decode)\r\n decode = autoencoder.layers[-1](decode)\r\n decoder = keras.models.Model(in_layer, decode)\r\n\r\n # Load the data\r\n ins = np.load(in_file + \".npz\")\r\n encoded = ins['data']\r\n samp_rate = ins['rate']\r\n channels = ins['channels']\r\n\r\n # Run the decoder\r\n outputs = decoder.predict(encoded)\r\n\r\n # reform output data to the original shape and range\r\n out = outputs.reshape(outputs.shape[0] * outputs.shape[1])\r\n out = ((out * 2.0) - 1.0) * float(pow(2, 15))\r\n out = np.rint(out).astype(np.int16)\r\n\r\n if channels == 2:\r\n out = out.reshape(len(out)//2, 2)\r\n out1 = out[:, 0]\r\n out2 = out[:, 1]\r\n\r\n # perform stft on output data to be in frequency domain\r\n frequencies, times, spectrogram1 = signal.stft(out1, samp_rate, window='hann', nperseg=1024, noverlap=512)\r\n frequencies, times, spectrogram2 = signal.stft(out2, samp_rate, window='hann', nperseg=1024, noverlap=512)\r\n # eliminate values with frequencies higher than 1680 HZ to decrease noise\r\n spectrogram1[40:, :] = 0\r\n spectrogram2[40:, :] = 0\r\n # perform inverse stft to get back data in time domain\r\n _, out1 = signal.istft(spectrogram1, samp_rate, window='hann', nperseg=1024, noverlap=512)\r\n _, out2 = signal.istft(spectrogram2, samp_rate, window='hann', nperseg=1024, noverlap=512)\r\n out1 = np.rint(out1).astype(np.int16)\r\n out2 = np.rint(out2).astype(np.int16)\r\n out1 = out1.reshape(out1.shape[0], 1)\r\n out2 = out2.reshape(out2.shape[0], 1)\r\n out = np.concatenate((out1, out2), axis=1)\r\n elif channels == 1:\r\n # perform stft on output data to be in frequency domain\r\n frequencies, times, spectrogram = signal.stft(out, samp_rate, window='hann', nperseg=1024, noverlap=512)\r\n # eliminate values with frequencies higher than 1680 HZ to decrease noise\r\n spectrogram[40:, :] = 0\r\n # perform inverse stft to get back data in time domain\r\n _, out = signal.istft(spectrogram, samp_rate, window='hann', nperseg=1024, noverlap=512)\r\n out = np.rint(out).astype(np.int16)\r\n\r\n # build the wav file\r\n wavfile.write(out_file+'.wav', samp_rate, out)",
"def load_wav_16k_mono(self, filename):\n filename = utils.get_file_path('webapp/static/processed', filename)\n\n file_contents = tf.io.read_file(filename)\n wav, sample_rate = tf.audio.decode_wav(file_contents,\n desired_channels=1)\n wav = tf.squeeze(wav, axis=-1)\n sample_rate = tf.cast(sample_rate, dtype=tf.int64)\n wav = tfio.audio.resample(wav, rate_in=sample_rate, rate_out=16000)\n return wav",
"def preprocess_wav(fpath_or_wav: Union[str, Path, np.ndarray],\n source_sr: Optional[int] = None):\n # Load the wav from disk if needed\n if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path):\n wav, source_sr = librosa.load(fpath_or_wav, sr=None)\n else:\n wav = fpath_or_wav\n\n # Resample the wav if needed\n if source_sr is not None and source_sr != sampling_rate:\n wav = librosa.resample(wav, source_sr, sampling_rate)\n\n # Apply the preprocessing: normalize volume and shorten long silences\n wav = normalize_volume(wav, audio_norm_target_dBFS, increase_only=True)\n\n return wav",
"def subsample():\n\n nwav = 872\n nrow = 1600\n ncol = 1560\n\n fpath = os.path.join(HYSS_ENVIRON['HYSS_WRITE'],'raw_binned/nrow1600')\n fnames = ['full_frame_20ms_faster_VNIR_1600.raw',\n 'full_frame_20ms_faster_VNIR_1600_flat.raw']\n\n for fname in fnames:\n print(\"SUBSAMPLE: reading data from {0}\".format(fpath))\n print(\"SUBSAMPLE: {0}\".format(fname))\n data = np.fromfile(os.path.join(fpath,fname)).reshape(nwav,nrow,ncol)\n\n for fac in [2,4,8]:\n trow = '{0:04}'.format(1600/fac)\n opath = os.path.join(HYSS_ENVIRON['HYSS_WRITE'],'raw_subsample',\n 'nrow'+trow)\n oname = fname.replace('1600',trow)\n\n print(\"SUBSAMPLE: writing subsampled data to {0}\".format(opath))\n print(\"SUBSAMPLE: {0}\".format(oname))\n data[:,::fac,::fac].tofile(open(os.path.join(opath,oname),'wb'))\n\n return"
] | [
"0.6366487",
"0.63232714",
"0.6174829",
"0.61593074",
"0.60996795",
"0.608292",
"0.6074825",
"0.60149586",
"0.5962723",
"0.59391665",
"0.5921867",
"0.58278805",
"0.5824111",
"0.5806314",
"0.56739146",
"0.56591153",
"0.56460583",
"0.5642763",
"0.56418234",
"0.56304383",
"0.5623994",
"0.56222355",
"0.56021106",
"0.5594628",
"0.559027",
"0.5544189",
"0.5542384",
"0.55361533",
"0.5523502",
"0.5521056"
] | 0.76220435 | 0 |
Reads a option from input from a list of options, with message, default option and help message. | def read_option(message, options=[], default=None, help=None):
while True:
option = raw_input(message)
if option:
if option == "?":
if help:
print(help)
print("Possible options: {}".format(options))
elif options and option in options:
return option
elif not options:
return option
else:
print("Unknown option. Options are: {}".format(options))
elif not option:
if default:
return default
else:
print("Please specify an option") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def user_input_choices(self, msg, *options):\n choices = ['%s %s' % (self.prefix, msg)]\n choices += [\n \"%s. %s\" % (num, opt) for num, opt in enumerate(options, 1)]\n try:\n input_str = int(\n vim.eval('inputlist(%s)' % self.prepare_value(choices)))\n except (KeyboardInterrupt, ValueError):\n input_str = 0\n\n if not input_str:\n self.message('Cancelled!')\n return False\n\n try:\n return options[input_str - 1]\n except (IndexError, ValueError):\n self.error('Invalid option: %s' % input_str)\n return self.user_input_choices(msg, *options)",
"def read_element():\r\n option = 0\r\n while option != len(options) - 1:\r\n show_menu()\r\n input_data = _get_client_field('Options: ')\r\n if not __empty(input_data) and (input_data.isdigit() == True):\r\n option = int(input_data)\r\n switch(option)",
"def _get_input(prompt, options, allow_new=False, reprompt_options=None):\n\n _lwr_opts = [x.lower() for x in options]\n if reprompt_options is None:\n reprompt_options = options\n\n while True:\n _resp = input(prompt).strip()\n\n # Check that input is one of the options\n try:\n i = _lwr_opts.index(_resp.lower())\n return options[i]\n except ValueError:\n if not allow_new:\n print(f'Response must be one of the following: {\", \".join(reprompt_options)}')\n\n if allow_new and _resp: # If have a non-empty string\n return _resp",
"def read_opts(self):\n\n # process any optlist_ options\n self.valid_opts.check_special_opts(sys.argv)\n\n # ------------------------------------------------------------\n # terminal arguments, first\n\n # cannot have len(argv) <= 1 here, but be consistent with other progs\n if len(sys.argv) <= 1 or '-help' in sys.argv:\n print g_help_string\n return 0\n\n if '-hist' in sys.argv:\n print g_history\n return 0\n\n if '-ver' in sys.argv:\n print g_version\n return 0\n\n if '-show_valid_opts' in sys.argv:\n self.valid_opts.show('', 1)\n return 0\n\n # ------------------------------------------------------------\n # read all user options\n\n self.user_opts = self.OL.read_options(sys.argv, self.valid_opts)\n if not self.user_opts: return 1 # error condition\n\n return None # normal completion",
"def ReadOptions(self, args):\n (opts, args) = getopt.getopt(args, 'vxi:p:h:', ('help',))\n for (key, val) in opts:\n if key == '-h': self.hash = val\n elif key == '-i': self.input = val\n elif key == '-v':\n self.verbose = True\n util.verbose = True\n elif key == '-x':\n self.verbose = True\n util.verbose = True\n self.extra_verbose = True\n util.extra_verbose = True\n elif key == '-p': self.profile_dest = val\n elif key == '--help':\n PrintUsage()\n sys.exit(0)\n\n if not self.input:\n if 'GRIT_INPUT' in os.environ:\n self.input = os.environ['GRIT_INPUT']\n else:\n self.input = 'resource.grd'\n\n return args",
"def user_input(self, options, prompt):\n for o in options:\n line = self.selector_line(o)\n o[\"line\"] = line\n self.output(line)\n self.output(prompt, end=\" \")\n while True:\n if self.test_input:\n inp = self.test_input.pop(0)\n self.output(f\"Using '{inp}' test input\")\n else:\n try:\n inp = raw_input()\n except (IOError, KeyboardInterrupt):\n self.game.print_state()\n raise\n if inp:\n matching = []\n for o in options:\n if o[\"selector\"] == inp:\n return o\n if inp.lower() in o[\"line\"].lower() and o[\"selector\"] != \"-\":\n matching.append(o)\n if len(matching) == 1:\n return matching[0]\n self.output(f\"Invalid Option ({inp})\")",
"def ParseOption():\n parser = optparse.OptionParser()\n parser.add_option('--input', dest='input', help='Input file path')\n parser.add_option('--output', dest='output', help='Output file path')\n parser.add_option(\n '--var_name', dest='var_name', help='Var name for the array')\n return parser.parse_args()[0]",
"def select(self, options, prompt='Your choice? '):\n local_opts = options\n if isinstance(options, string_types):\n local_opts = list(zip(options.split(), options.split()))\n fulloptions = []\n for opt in local_opts:\n if isinstance(opt, string_types):\n fulloptions.append((opt, opt))\n else:\n try:\n fulloptions.append((opt[0], opt[1]))\n except IndexError:\n fulloptions.append((opt[0], opt[0]))\n for (idx, (value, text)) in enumerate(fulloptions):\n self.poutput(' %2d. %s\\n' % (idx + 1, text))\n while True:\n response = sm.input(prompt)\n try:\n response = int(response)\n result = fulloptions[response - 1][0]\n break\n except (ValueError, IndexError):\n self.stdout.write(\"{!r} isn't a valid choice. Pick a number \"\n \"between 1 and {}:\\n\".format(\n response, len(fulloptions)))\n return result",
"def choice(\n\t\toptions: Union[List[str], Mapping[str, str]],\n\t\ttext: str = '',\n\t\tdefault: Optional[str] = None,\n\t\tprompt_suffix: str = \": \",\n\t\tshow_default: bool = True,\n\t\terr: bool = False,\n\t\tstart_index: int = 0\n\t\t) -> Union[str, int]:\n\n\t# TODO: completer for numbers?\n\n\ttype_: click.ParamType\n\n\tif isinstance(options, Mapping):\n\t\t# (Y/I/N/O/D/Z) [default=N]\n\n\t\ttext = f\"{text} ({'/'.join(options.keys())})\"\n\t\ttype_ = click.STRING\n\n\t\tfor choice, descripton in options.items():\n\t\t\tclick.echo(f\" {choice} : {descripton}\")\n\n\telse:\n\t\ttype_ = click.IntRange(start_index, len(options) + 1 - start_index)\n\n\t\tfor idx, descripton in enumerate(options):\n\t\t\tidx += start_index\n\t\t\tclick.echo(f\" [{idx}] {descripton}\")\n\n\tif default is not None and show_default:\n\t\ttext += f\" [default={default}]\"\n\n\twhile True:\n\t\tselection = prompt(\n\t\t\t\ttext=text,\n\t\t\t\tdefault=default,\n\t\t\t\ttype=type_,\n\t\t\t\tprompt_suffix=prompt_suffix,\n\t\t\t\tshow_default=False,\n\t\t\t\terr=err,\n\t\t\t\t)\n\t\tif isinstance(options, Mapping):\n\t\t\tselection = selection.strip().upper()\n\t\t\tif selection not in options:\n\t\t\t\tclick.echo(f\"Please enter a valid option.\")\n\t\t\telse:\n\t\t\t\treturn selection\n\t\telse:\n\t\t\treturn selection - start_index",
"def get_option(self):\n self.display_options()\n i = input('\\nOption: ')\n\n return [i]",
"def prompt_options_list(options=None,\n default=None,\n prompt=\"Select from the following options\"):\n if 'Bullet' not in globals():\n raise RuntimeError(\"[-] can't use Bullet on Windows\")\n if (\n len(options) == 0\n or not isinstance(options[0], str)\n ):\n raise RuntimeError('[-] a list of options is required')\n cancel = '<CANCEL>'\n if default is None:\n default = cancel\n else:\n # Remove the default from the list because it will\n # be added back as the first item.\n options = [i for i in options if i != default]\n choices = [default] + options\n cli = Bullet(prompt=f'\\n{prompt}',\n choices=choices,\n indent=0,\n align=2,\n margin=1,\n shift=0,\n bullet=\"→\",\n pad_right=5)\n choice = cli.launch()\n if default == cancel and choice == cancel:\n logger.info('[-] cancelled selection of choice')\n return None\n return choice",
"def _maybe_read_option(self):\n self._read_while(self._is_whitespace)\n\n # check if the next word is maybe an option\n if not self._is_id_start(self._peek_char()):\n self.state += 1\n return\n\n # option follows the same naming convention as ID\n opt = self._read_while(self._is_id)\n if opt != '':\n self.parsed_parameter = self.parsed_id\n self.parsed_option = opt\n self.parsed_id += ':' + self.parsed_option\n\n self._read_while(self._is_whitespace)\n self.state += 1",
"def ask(question, options, default):\n assert default in options\n\n question += \" ({})? \".format(\"/\".join(o.upper() if o == default else o for o in options))\n selected = None\n while selected not in options:\n selected = input(question).strip().lower()\n if selected == \"\":\n selected = default\n else:\n if selected not in options:\n question = \"Please type '{}'{comma} or '{}': \".format(\n \"', '\".join(options[:-1]), options[-1],\n comma=',' if len(options) > 2 else '',\n )\n return selected",
"def parse_options():\n\n parser = optparse.OptionParser(usage=USAGE, version=VERSION)\n\n parser.add_option(\"-f\", \"--file\",\n action=\"store\", default=Utils.getConfig(\"defaultFile\"), dest=\"file\",\n help=\"Read the site name from external file\")\n\n parser.add_option(\"-s\", \"--site-name\",\n action=\"store\", default=\"\", dest=\"sitename\",\n help=\"Get links for specified url only\")\n\n opts, args = parser.parse_args()\n\n return opts, args",
"def read_float(message, default=None, help=None):\n while True:\n option = raw_input(message)\n if not option:\n if default is not None:\n return default\n else:\n print(\"Please provide a value.\")\n elif option == \"?\":\n if help:\n print(help)\n else:\n print(\"Please provide a number\")\n else:\n try:\n return float(option)\n except ValueError:\n print(\"Expected a number.\")\n pass",
"def ask_options(prompt, options, confirm=True, title=True, default=None, hints=None):\n print(prompt + ':')\n for idx, option in enumerate(options):\n if title:\n option = str(option).title()\n if hints is None:\n print(f'{idx+1} - {option}')\n else:\n try:\n print(f'{idx+1} - {option}: {hints[idx]}')\n except IndexError:\n print(f'{idx+1} - {option}')\n if default is None:\n hint = f'Pick an option (1-{len(options)}): '\n else:\n hint = f'Pick an option (1-{len(options)}) [{options.index(default)+1}]: '\n option = input(hint)\n if option == '' and default is not None:\n return default\n try:\n option = int(option)\n try:\n if option < 1:\n raise IndexError\n option = options[option-1]\n if confirm:\n print(f'User selected: {option}')\n return option\n except IndexError:\n print(f'Invalid option. Must be between 1 and {len(options)}')\n return ask_options(prompt, options, confirm, title, default)\n except ValueError:\n print('Invalid option. Must be integer.')\n return ask_options(prompt, options, confirm, title, default)",
"def inputChoice(self, question, options, hotkeys, default=None):\n options = options[:] # we don't want to edit the passed parameter\n for i in range(len(options)):\n option = options[i]\n hotkey = hotkeys[i]\n # try to mark a part of the option name as the hotkey\n m = re.search('[%s%s]' % (hotkey.lower(), hotkey.upper()), option)\n if hotkey == default:\n caseHotkey = hotkey.upper()\n else:\n caseHotkey = hotkey\n if m:\n pos = m.start()\n options[i] = '%s[%s]%s' % (option[:pos], caseHotkey,\n option[pos+1:])\n else:\n options[i] = '%s [%s]' % (option, caseHotkey)\n # loop until the user entered a valid choice\n while True:\n prompt = '%s (%s)' % (question, ', '.join(options))\n answer = self.input(prompt)\n if answer.lower() in hotkeys or answer.upper() in hotkeys:\n return answer\n elif default and answer=='': # empty string entered\n return default",
"def prompt_with_options(prompt, default=None, options=None):\n\n msg = \"%s [%s]: \" % (prompt, default) if default is not None else \"%s: \" % prompt\n value = None\n while value is None:\n value = raw_input(msg).strip()\n if value:\n if options and value not in options:\n value = None\n elif default is not None:\n value = default\n\n return value",
"def get_answer(option_list):\n # Print the options\n print(\"Options:\")\n for i in range(len(option_list)):\n print(f\"{i + 1}. {option_list[i]}\")\n\n # Return the selected option from the user\n while True:\n try:\n selection = int(input(\">>>\"))\n if 1 <= selection <= len(option_list):\n print()\n return selection\n else:\n raise ValueError\n except ValueError:\n print(f\"Invalid option: Must be a number between 1 and {len(option_list)}\")",
"def getopts():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", type=argparse.FileType('r'),\n required=True, help=\"input file (.csv)\")\n return parser.parse_args()",
"def ask_multiple_option(options, prefix = 'Choose between', prompt = ': '):\n\n def exists(index):\n return 0 <= index < len(options)\n\n while True:\n print(prefix)\n for index, option in enumerate(options):\n print(' {} - {}'.format(index + 1, option))\n answer = input(prompt).strip()\n if answer is not '':\n index = int(answer) - 1\n if exists(index):\n return options[index]",
"def option(number, default='no'):\n return answer(number).get('options', default)",
"def select_option(options, input, prompt='> ', error=\"Invalid selection\"):\n if isinstance(options, dict):\n ordered = list(\n sorted(\n options.items(),\n key=operator.itemgetter(1)\n )\n )\n else:\n ordered = options\n\n if input.enabled:\n for i, (key, value) in enumerate(ordered, start=1):\n print(' {i}) {label}'.format(i=i, label=value))\n\n print()\n\n choices = [str(index) for index in range(1, len(ordered) + 1)]\n index = input.selection_input(prompt=prompt, choices=choices, error_message=error)\n return ordered[int(index) - 1][0]",
"def getopt():\n raise NotImplementedError()",
"def readOption (self, optName) :\n if not optName in self.config:\n return None\n return self.config[optName]",
"def get_input(prompt, default=None, choices=None, option_value=None):\r\n if option_value is not None:\r\n return option_value\r\n \r\n choices = choices or []\r\n while 1:\r\n r = raw_input(prompt+' ').strip()\r\n if not r and default is not None:\r\n return default\r\n if choices:\r\n if r not in choices:\r\n r = None\r\n else:\r\n break\r\n else:\r\n break\r\n return r",
"def help_option(args, run):\n pass",
"def select(self, opts, prompt='Your choice? '):\n local_opts = opts\n if isinstance(opts, string_types):\n local_opts = list(zip(opts.split(), opts.split()))\n fulloptions = []\n for opt in local_opts:\n if isinstance(opt, string_types):\n fulloptions.append((opt, opt))\n else:\n try:\n fulloptions.append((opt[0], opt[1]))\n except IndexError:\n fulloptions.append((opt[0], opt[0]))\n for (idx, (value, text)) in enumerate(fulloptions):\n self.poutput(' %2d. %s\\n' % (idx + 1, text))\n while True:\n response = sm.input(prompt)\n try:\n response = int(response)\n result = fulloptions[response - 1][0]\n break\n except (ValueError, IndexError):\n self.poutput(\"{!r} isn't a valid choice. Pick a number between 1 and {}:\\n\".format(response,\n len(fulloptions)))\n return result",
"def _describe_input_option(self, option, **options):\n accept_value = option.accept_value()\n default = option.get_default()\n if accept_value and default is not None and (not isinstance(default, list) or len(default)):\n default = '<comment> [default: %s]</comment>' % self._format_default_value(default)\n else:\n default = ''\n\n value = ''\n if accept_value:\n value = '=%s' % option.get_name().upper()\n\n if option.is_value_optional():\n value = '[%s]' % value\n\n total_width = options.get('total_width', self._calculate_total_width_for_options([option]))\n shortcut = option.get_shortcut()\n synopsis = '%s%s'\\\n % ('-%s, ' % shortcut if shortcut else ' ',\n '--%s%s' % (option.get_name(), value))\n\n spacing_width = total_width - len(synopsis) + 2\n\n self._write_text(\n ' <info>%s</info>%s%s%s%s'\n % (\n synopsis,\n ' ' * spacing_width,\n re.sub('\\s*[\\r\\n]\\s*', '\\n' + (' ' * (total_width + 17)), option.get_description() or ''),\n default,\n '<comment> (multiple values allowed)</comment>' if option.is_list() else ''\n ),\n **options\n )",
"def handle_option(self, option, options):\n pass"
] | [
"0.64677876",
"0.6426644",
"0.6143929",
"0.6095286",
"0.6019173",
"0.5975163",
"0.5952457",
"0.59263563",
"0.5890711",
"0.5865748",
"0.5855209",
"0.584225",
"0.5817843",
"0.5789897",
"0.5786976",
"0.5770645",
"0.5755894",
"0.5755527",
"0.57514435",
"0.5748384",
"0.57481533",
"0.57221156",
"0.56875384",
"0.5679287",
"0.56675065",
"0.5662853",
"0.5640715",
"0.5611632",
"0.5601527",
"0.55953914"
] | 0.843761 | 0 |
Reads a float from input, with message, default option and help message. | def read_float(message, default=None, help=None):
while True:
option = raw_input(message)
if not option:
if default is not None:
return default
else:
print("Please provide a value.")
elif option == "?":
if help:
print(help)
else:
print("Please provide a number")
else:
try:
return float(option)
except ValueError:
print("Expected a number.")
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_float(self, prompt=\"> \"):\n\t\twhile True:\n\t\t\tans = raw_input(prompt)\n\t\t\ttry: \t\n\t\t\t\tans = float(ans)\n\t\t\t\treturn ans\n\t\t\texcept ValueError:\n\t\t\t\tif ans == \"quit\": quit()\n\t\t\t\telse: print \"Please enter a number using decimal notation.\"",
"def prompt_float_input(prompt_name: str, get_user_input: GetInputFunc) -> float:\n try:\n return float(get_user_input(f\"{prompt_name}:\"))\n except (ValueError, IndexError) as e:\n raise InvalidInput(str(e))",
"def getfloat(self, option, default=None):\n\t\treturn self._get_raw(option, 'float', default)",
"def main():\n\ttry:\n\t\tx = input(\"Type in a number to be converted to a float: \")\n\t\tfloatnum = float(x)\n\t\tprint(floatnum)\n\texcept(ValueError):\n\t\tprint(\"Could not convert the string to a float\")",
"def get_float(message, high, low=0):\r\n\r\n while True:\r\n try:\r\n floatValue = float(input(message))\r\n except ValueError:\r\n print (\"ERROR, Entry must be a number. Please try again.\")\r\n continue\r\n if floatValue <= low or floatValue > high:\r\n print (\"ERROR, Entry must be greater than \" + str(low) + \" and, less than or equal to \"\\\r\n + str(high) + \". Please try again.\")\r\n continue\r\n break\r\n return floatValue",
"def read_float(v):\n if v.strip() == '':\n return 0.\n try:\n return float(v)\n except ValueError:\n # ENDF6 may omit the e for exponent\n return float(v[0] + v[1:].replace('+', 'e+').replace('-', 'e-')) # don't replace leading negative sign",
"def getFloat(fl):\n while True:\n try:\n return float(input(fl))\n except ValueError:\n print(\"Give a proper value please\")",
"def getfloat(self, section, option, default=None):\r\n return self.get(section, option, type=float, default=default)",
"def getFloat(self, section, option, default=0):\n return self.get(section, option, default, float)",
"def usetf(self, prompt=None, default=None):\n\n i = 0\n abak = copy(default) # Backup our default value\n\n while(i<self.maxTries):\n tmp = self.uset(prompt,default)\n try:\n a = float(tmp)\n i = self.maxTries # preload failure\n except:\n # Print warning\n print\n print \" WARNING: Invalid Entry. Please enter a floating point number \"\n print \n # reload the default\n a = abak\n i = i+1\n\n return(a)",
"def readFloat(self) -> float:\n return self._unpack('!f', 4)",
"def possible_float(arg):\n try:\n return float(arg)\n except ValueError:\n logging.info(f'failed to parse {arg} as a float, treating it as a string')\n return arg",
"def getfloat(self, sec, name, default=None, badtypeok=False, morevars=None,\n taskvars=None):\n if sec in self.OLD_SECTIONS:\n sec = 'config'\n\n try:\n # call ProdConfig function with no default set so\n # we can log and set the default\n return super().getfloat(sec, name, default=None,\n badtypeok=badtypeok, morevars=morevars,\n taskvars=taskvars)\n\n # if config variable is not set\n except NoOptionError:\n if default is None:\n default = float(util.MISSING_DATA_VALUE)\n\n self.check_default(sec, name, default)\n return default\n\n # if invalid value\n except ValueError:\n # check if it was an empty string and return MISSING_DATA_VALUE\n if super().getstr(sec, name) == '':\n return util.MISSING_DATA_VALUE\n\n # if value is not correct type, log error and return None\n self.logger.error(f\"[{sec}] {name} must be a float.\")\n return None",
"def test_getfloat_with_default(self):\n self.assertEqual(self.config.getfloat('advanced','p'),None)\n self.assertEqual(self.config.getfloat('advanced','p',5.0),5.0)",
"def read_float(self):\n return self._packers[\"f\"].unpack(self.read(4))[0]",
"def char_float(inp_char):\n try:\n nFloat = float(inp_char)\n except:\n nFloat = 0.0\n return nFloat",
"def getfloat(self, option):\n return getfloat(self.name, option)",
"def getfloat(self, section, option):\n return float(self.get(section, option))",
"def read_float(data):\n s_type = \"=%s\" % get_type(\"float\")\n return struct.unpack(s_type, data.read(4))[0]",
"def read_float(self, process_handle: int, address: int):\n self.__bufferSize = 4\n value = self.__read_bytes(process_handle, address)\n return None if value is None else unpack('<f', bytearray(value))",
"def read_float(stream, size):\n\t\n\tif size not in (0, 4, 8):\n\t\traise IOError('Cannot read floating point values with lengths other than 0, 4, or 8 bytes.')\n\tvalue = 0.0\n\tif size in (4, 8):\n\t\tdata = stream.read(size)\n\t\tvalue = struct.unpack({\n\t\t\t4: '>f',\n\t\t\t8: '>d'\n\t\t}[size], data)[0]\n\treturn value",
"def val_parser(parser, inputstring):\n\n inputstring = inputstring.strip()\n\n if float(inputstring) == 9.9e37:\n output = float('inf')\n else:\n output = float(inputstring)\n if parser == int:\n output = parser(output)\n\n return output",
"def read_float(stream, writer_schema=None, reader_schema=None): # noqa\n return unpack('<f', stream.read(4))[0]",
"def set_float(val,default=None):\n if val in (None,''): return default\n try:\n return float(val)\n except:\n return default",
"def config_get_float(section, option):\n return __CONFIG.getfloat(section, option)",
"def value_input(unit):\n print(Fore.CYAN + \"\\n Enter the temperature in \\u00b0\" + unit + \":\\n\" +\n Fore.RESET)\n while True:\n try:\n value = float(input()) # <=== Make sure input is a float\n return value\n break\n except ValueError:\n print(Fore.RED + \"\\n Input must be an integer!\\n\" + Fore.RESET)",
"def get_user_input():\n return float(input('Your transaction amount please: '))",
"def parse_float(value):\n try:\n return float(value)\n except (ValueError, TypeError):\n return None",
"def mag_def(inp):\n global F0\n inp = inp.split()\n for i in range(2):\n try:\n inp[i] = float(inp[i])\n except ValueError:\n x = i\n \n m = inp[0]\n F = inp[1]\n \n if x==0:\n return -2.5 * math.log10(F/F0)\n elif x==1:\n f_vega = 10 ** ((-2)*m*(1/5))\n return f_vega * F0\n else:\n raise Exception(\"No unknowns!\")",
"def read_option(message, options=[], default=None, help=None):\n while True:\n option = raw_input(message)\n if option:\n if option == \"?\":\n if help:\n print(help)\n print(\"Possible options: {}\".format(options))\n elif options and option in options:\n return option\n elif not options:\n return option\n else:\n print(\"Unknown option. Options are: {}\".format(options))\n elif not option:\n if default:\n return default\n else:\n print(\"Please specify an option\")"
] | [
"0.7123461",
"0.6892407",
"0.6582036",
"0.6527858",
"0.63909686",
"0.63844645",
"0.6292312",
"0.62777096",
"0.6268448",
"0.61232615",
"0.6026712",
"0.59781504",
"0.59341896",
"0.5892403",
"0.58726865",
"0.58607256",
"0.584479",
"0.5762863",
"0.5747006",
"0.5698778",
"0.56860435",
"0.56836265",
"0.5653579",
"0.5644199",
"0.5636222",
"0.56134737",
"0.5602303",
"0.55994266",
"0.5566559",
"0.5565233"
] | 0.8742288 | 0 |
writing the dict in self.data to the yaml file. | def write(self):
self.f.write(yaml.safe_dump(self.data, default_flow_style=False, indent=4)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write(self, file=sys.stdout):\n d = self.to_dict()\n if d:\n yaml.dump([d], file, default_flow_style=False)",
"def save(self, filename=None):\n name = filename or self.filename\n with open(name, \"w\") as stream:\n yaml.dump(self.data, stream, default_flow_style=False)",
"def _save_data_yaml(self, data, pathname): \n pathname = self._yaml_extension(pathname)\n with open(pathname, \"w\") as outfile:\n yaml.dump(data, outfile, default_flow_style=False)",
"def _save_data_yaml(self, data, pathname): \n pathname = self._yaml_extension(pathname)\n with open(pathname, \"w\") as outfile:\n yaml.dump(data, outfile, default_flow_style=False)",
"def save(self, filename):\n with open(filename, 'w') as f:\n yaml.dump(self.to_dict(), f, sort_keys=False)",
"def _write(self, widget_dict):\n return yaml.safe_dump(widget_dict, default_flow_style=False)",
"def _save_config_log(self, data):\n config_path = os.path.join(self.runtime.working_dir, '.config')\n with open(config_path, 'w') as f:\n yaml.safe_dump(data, f, default_flow_style=False)",
"def dump_yaml(self, data, output):\n yaml.indent(mapping=MAPPING, sequence=SEQUENCE, offset=OFFSET)\n yaml.dump(data, output)",
"def write(self, fname=None):\n fname = fname or self.path\n with open(fname, \"w\") as fl:\n yaml.dump(self._as_dict(), fl)\n self.path = Path(fname)",
"def dump(self, yaml_file):\n\n with open(yaml_file, 'w') as fp:\n yaml.dump(self.__dict__, fp)",
"def write(self):\n print yaml.dump(self._config, default_flow_style=False),",
"def save_to_yml_file(self):\n yml_filename = self.get_yml_filename()\n\n if os.path.exists(yml_filename) and not self.force:\n logger.warning(\n f\"[red]File {yml_filename} already exists, not writing. To override add --force.[/red]\"\n )\n else:\n if self.force:\n logger.info(\n f\"[yellow]Force flag is used. Overriding {yml_filename} if it exists.[/yellow]\"\n )\n if self.metadata:\n self.metadata.save_dict_as_yaml_integration_file(yml_filename)",
"def save(self):\n defn_dir = path.dirname(self.definition_filename)\n\n if not path.isdir(defn_dir):\n os.makedirs(defn_dir)\n\n # Force check of stopsignal\n self.stopsignal\n\n with open(self.definition_filename, 'w') as df:\n yaml.safe_dump(self.raw_data, df, default_flow_style=False)",
"def saveData(data, file, path='./data/'):\n\twith open(\"{}{}.yml\".format(path, file), 'w') as out:\n\t\tyaml.dump(data, out)",
"def write_yaml(fname: str, data: dict) -> None:\n try:\n with open(fname, 'w') as f:\n yaml.safe_dump(data, f, default_flow_style=False)\n except IOError as e:\n print(f\"Cannot write YAML file {fname}\")\n print(f\"IOError: {e}\")",
"def to_content(cls, data: Mapping) -> str:\n cls._check_yaml()\n s = yaml.safe_dump(data, default_flow_style=False)\n s = '---\\n' + s\n return s",
"def save(self, filepath):\n writer = json.dump if Config.isjson(filepath) else yaml.dump\n with open(filepath, 'w') as f:\n writer(dict(self), f)",
"def _save_configuration_to_yml(self):\n data = self.get_configuration_data()\n timestamp = self.model.timestamp\n with open(os.path.join(CHECKPOINTS_DIR, timestamp, 'config_{}.yml'.format(timestamp)), 'w') as outfile:\n yaml.dump(dict(data), outfile, default_flow_style=False)",
"def write(self):\n cfgpath = os.path.join(self.config_dir, CONFIG_FILENAME)\n ofile = open(cfgpath, 'w')\n if ofile:\n log.debug( \"Write config: %s\" % cfgpath )\n cfg = yaml.dump(self.yaml, default_flow_style=False)\n log.debug( \"Config:\\n%s\" % cfg)\n ofile.write(cfg)\n ofile.close()",
"def dump(filename: Path) -> None:\n import yaml\n\n dumped_str = yaml.dump_all(\n [data_dict],\n Dumper=RegressionYamlDumper,\n default_flow_style=False,\n allow_unicode=True,\n indent=2,\n encoding=\"utf-8\",\n )\n with filename.open(\"wb\") as f:\n f.write(dumped_str)",
"def save_dict_as_yaml_integration_file(self, output_file: str):\n logger.debug(f\"Writing collected metadata to {output_file}.\")\n\n write_yml(output_file, self.metadata_dict)\n logger.info(\"[green]Finished successfully.[/green]\")",
"def save_yaml(data, write_path: PathLike) -> None:\n with open(write_path, \"w\") as write_file:\n yaml.dump(data, write_file, default_flow_style=False)",
"def save(self):\n with open(self._config, 'w') as f:\n json.dump(self.data, f, indent=2, sort_keys=True)",
"def save(self, data : dict) -> None:\n try:\n self._logger.debug('Save new metadata file %s.', self._path)\n self._add_version()\n self._add_key(data, ChatDumpMetaFile.key_chatName)\n self._add_key(data, ChatDumpMetaFile.key_LastMessageId)\n self._add_key(data, ChatDumpMetaFile.key_exporter)\n self._add_key(data, ChatDumpMetaFile.key_exporterConfig)\n self._add_key(data, ChatDumpMetaFile.key_filter)\n with open(self._path, 'w') as mf:\n json.dump(self._data, mf, indent=4, sort_keys=False)\n except OSError as ex:\n msg = 'Failed to write the metadata file. {}'.format(ex.strerror);\n raise MetaFileError(msg)",
"def save_to_yaml(self, path=None):\n\n if not path:\n path = \".\".join([self.name.value, \"yaml\"])\n\n planet_dict = {}\n for a in sorted(self.attributes):\n exo_param = getattr(self, a)\n param_dict = exo_param.__dict__\n param_dict = {k: str(v)\n for k, v in param_dict.items()\n if v and len(str(v)) > 0}\n planet_dict[a] = param_dict\n\n with open(path, 'w') as yamlfile:\n yaml.dump(planet_dict, yamlfile, default_flow_style=False)",
"def write(self, filename, mode=\"w\"):\n d = self._result_dict\n val = yaml.safe_dump(d, default_flow_style=False)\n\n with open(str(filename), mode) as outfile:\n outfile.write(val)",
"async def dump(self, data: dict, file: IO):",
"def yaml_inventory(self):\n inventory_file = 'inventory_file'\n with open(inventory_file, 'w') as invfile:\n yaml.dump(self.inventory_dict, invfile, default_flow_style=False, sort_keys=False)",
"def write(self, filename: str):\n obj = self.to_dict(self)\n config.write(obj, filename)",
"def _write(self, preset_type, data):\n logger.debug('write presets for %s', self._device.name)\n with self._file_open_rlock(preset_type) as f:\n f.seek(0)\n yaml.dump(data, f, default_flow_style=False)\n f.truncate()"
] | [
"0.7988538",
"0.75797796",
"0.7365184",
"0.7365184",
"0.7156229",
"0.6999356",
"0.6944983",
"0.6942563",
"0.69258875",
"0.68983555",
"0.6881225",
"0.6877785",
"0.68321437",
"0.68181944",
"0.67738885",
"0.6756965",
"0.6753976",
"0.67452943",
"0.6742652",
"0.67258483",
"0.67258155",
"0.67019874",
"0.6676384",
"0.66743356",
"0.66444284",
"0.66163",
"0.66144156",
"0.65820813",
"0.6580707",
"0.6579991"
] | 0.8466246 | 0 |
add a box to the shape | def add_box(self, l, w, h, x, y, z, comment=""):
self.data['shape']['compound'].append({'box': {'#': comment, 'pose': {'x': x, 'y': y, 'z': z},
'size': {'x': l, 'y': w, 'z': h}}}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_box(self):\n self.scenes[self.current_scene].add_object(Box())\n self.redraw()",
"def add_box(self, box):\n mz_from = box.from_mz\n mz_to = box.to_mz\n rt_from = box.from_rt\n rt_to = box.to_rt\n self.boxes_mz.addi(mz_from, mz_to, box)\n self.boxes_rt.addi(rt_from, rt_to, box)",
"def add_box(self, width, height, label=None, facecolor='none', color='b'):\n rect = patches.Rectangle(\n (0, 0),\n width,\n height,\n linewidth=1,\n edgecolor=color,\n label=label,\n facecolor=facecolor)\n pyplot.gca().add_patch(rect)",
"def add_random_box(self, env):\n box_size = self.random_size(0.05, 0.15, 0.05, 0.15, 0.01, 0.06)\n box_pose = self.random_pose(env, box_size)\n box_template = 'assets/box/box-template.urdf'\n box_urdf = self.fill_template(box_template, {'DIM': box_size})\n box_id = env.add_object(box_urdf, box_pose)\n os.remove(box_urdf)\n self.color_random_brown(box_id)\n self.object_points[box_id] = np.float32((0, 0, 0)).reshape(3, 1)\n self._IDs[box_id] = 'random_box'\n return box_id",
"def add_box(self, timeout=4):\n\n # Side length of the box\n box_size = 0.16\n\n # Set pose of the box\n box_pose = geometry_msgs.msg.PoseStamped()\n box_pose.header.frame_id = 'world'\n box_pose.pose.orientation.w = 1.0\n box_pose.pose.position.x = 0.0\n box_pose.pose.position.y = 0.45\n box_pose.pose.position.z = 1.92\n\n # Add box to scene\n self.scene.add_box(self.box_name, box_pose, size=(box_size,\n box_size,\n box_size))\n\n # Wait for update and return status\n return self.wait_for_state_update(box_is_known=True,\n timeout=timeout)",
"def box(self, box, padx=0.5, pady=0.3, **options):\n\n # underride sets default values only if the called hasn't\n underride(options, outline='black')\n box.left -= padx\n box.top -= pady\n box.right += padx\n box.bottom += pady\n item = self.rectangle(box, **options)\n return item",
"def box(self, x0, y0, width, height):\n assert width > 1\n assert height > 1\n\n width -= 1\n height -= 1\n\n for x in range(x0, x0 + width):\n self.point(x, y0, \"-\")\n self.point(x, y0 + height, \"-\")\n\n for y in range(y0, y0 + height):\n self.point(x0, y, \"|\")\n self.point(x0 + width, y, \"|\")\n\n self.point(x0, y0, \"+\")\n self.point(x0 + width, y0, \"+\")\n self.point(x0, y0 + height, \"+\")\n self.point(x0 + width, y0 + height, \"+\")",
"def box(self, x, y, w, h):\n\t\tpass",
"def draw_box(image, box, color, thickness=2):\n b = np.array(box).astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), color, thickness, cv2.LINE_AA)",
"def shape(self) -> str:\n return \"box\"",
"def add_textbox(self, left, top, width, height):\n id = self.__next_shape_id\n name = 'TextBox %d' % (id-1)\n sp = self.__sp(id, name, left, top, width, height, is_textbox=True)\n self.__spTree.append(sp)\n shape = Shape(sp)\n self.__shapes.append(shape)\n return shape",
"def draw_box(img, box):\n draw_img = img.copy()\n cv2.polylines(draw_img, np.int32([box]), True, (255, 0, 0), 4)\n show(draw_img)",
"def add_box(self, position_x=None, position_y=None, width=None,\n height=None, static=False, kinematic=False,\n density=None, 密度=None,\n 位置x=None, 位置y=None, 寬=None, 高=None, \n 固定=False, random_flag=False):\n\n if static or 固定 :\n box_body = pymunk.Body(body_type=pymunk.Body.STATIC)\n elif kinematic:\n box_body = pymunk.Body(body_type=pymunk.Body.KINEMATIC)\n else:\n box_body = pymunk.Body(body_type=pymunk.Body.DYNAMIC) \n\n \n tmp_width = 寬 if 寬 is not None else width\n if not random_flag:\n tmp_width = tmp_width if tmp_width is not None else self.config.SIZE_WIDTH\n else:\n tmp_width = tmp_width if tmp_width is not None else randint(*self.config.RAMDOM_SIZE_RANGE)\n\n if tmp_width <= 0: raise BoxException('新增方塊錯誤','寬(width)要大於0')\n \n tmp_height = 高 if 高 is not None else height\n if not random_flag:\n tmp_height = tmp_height if tmp_height is not None else self.config.SIZE_HEIGHT\n else:\n tmp_height = tmp_height if tmp_height is not None else randint(*self.config.RAMDOM_SIZE_RANGE)\n\n if tmp_height <= 0: raise BoxException('新增方塊錯誤','高(height)要大於0')\n\n box_shape = pymunk.Poly.create_box(box_body, (tmp_width, tmp_height) )\n\n tmp_density = 密度 if 密度 is not None else density\n if tmp_density is None:\n tmp_density = self.config.DENSITY\n box_shape.density = tmp_density\n \n box_shape.friction = self.config.FRICTION\n box_shape.elasticity = self.config.ELASTICITY\n \n box_shape.color = color.random() \n \n\n\n tmp_x = 位置x if 位置x is not None else position_x\n if not random_flag:\n tmp_x = tmp_x if tmp_x is not None else self.config.X\n else:\n tmp_x = tmp_x if tmp_x is not None else randint(*self.config.RANDOM_X_RANGE)\n\n tmp_y = 位置y if 位置y is not None else position_y\n if not random_flag:\n tmp_y = tmp_y if tmp_y is not None else self.config.Y\n else:\n tmp_y = tmp_y if tmp_y is not None else randint(*self.config.RANDOM_Y_RANGE)\n\n box_body.position = (tmp_x, tmp_y)\n\n if not random_flag:\n box_body.angle = 0\n else:\n box_body.angle = 3.1416 * 2 * random()\n\n if not random_flag:\n box_body.velocity = (0, 0)\n else:\n box_body.velocity = ( randint(*self.config.RANDOM_VELOCITY_RANGE),\n randint(*self.config.RANDOM_VELOCITY_RANGE) ) \n\n self.space.add(box_body, box_shape)\n return BodyShapeWrapper(box_body, box_shape)",
"def make_box(self, name=None) -> 'Box':\n\n if self.size().x == 0:\n box = Rect(self.size().z, self.size().y, name=name)\n box.ry(90)\n elif self.size().y == 0:\n box = Rect(self.size().x, self.size().z, name=name)\n box.rx(90)\n elif self.size().z == 0:\n box = Rect(self.size().x, self.size().y, name=name)\n else:\n box = Box(*self.size().asArray(), name=name)\n\n box.place(\n ~box == ~self,\n ~box == ~self,\n ~box == ~self)\n return box",
"def fill_box(self, x, y, w, h):\n\t\tpass",
"def _add_box(self, boxdesc):\n # Check box definition parameters\n box_attributes = list(boxdesc.keys())\n if not set(box_attributes).issubset(self.unit_attributes):\n raise ValueError(\n \"Box definition: '{0}' defined in '{1}' is not supported. \"\n \"Supported box parameters are '{2}'.\".format(\n json.dumps(boxdesc, indent=2), self._xmlfile,\n self.unit_attributes))\n for mandatory_parameter in self.unit_attributes[:2]:\n if mandatory_parameter not in box_attributes:\n raise ValueError(\n \"A '{0}' parameter is required in box definition: '{1}' \"\n \"defined in '{2}'.\".format(\n mandatory_parameter, json.dumps(boxdesc, indent=2),\n self._xmlfile))\n\n # Check the name of the new box is not already reserved\n box_name = boxdesc[self.unit_attributes[0]][0]\n if box_name in self._boxes:\n raise ValueError(\"The box name '{0}' defined in '{1}' is already \"\n \"used.\".format(box_name, self._xmlfile))\n\n # Instanciate the new box\n box_module = boxdesc[self.unit_attributes[1]][0]\n iterinputs = boxdesc.get(self.unit_attributes[3], [])\n iteroutputs = boxdesc.get(self.unit_attributes[4], [])\n if box_module.endswith(\".xml\"):\n box = Pbox(box_module)\n else:\n box = Bbox(box_module)\n box.update_control_names(box_name)\n if iterinputs != [] or iteroutputs != []:\n iterinputs = [item[\"name\"] for item in iterinputs]\n iteroutputs = [item[\"name\"] for item in iteroutputs]\n box = Ibox(box, iterinputs, iteroutputs)\n self._boxes[box_name] = box\n\n # Set the new box default parameters\n set_tag = self.unit_attributes[2]\n if set_tag in box_attributes:\n for box_defaults in boxdesc[set_tag]:\n\n # Check the proper lexic has been specified\n if not set(box_defaults.keys()).issubset(self.unit_set):\n raise ValueError(\n \"Box attribute definition: '{0}' defined in '{1}' is \"\n \"not supported. Supported attributes are \"\n \"'{2}'.\".format(\n list(box_defaults.keys()), self._xmlfile,\n self.unit_set))\n\n # Set the input or output default paramters\n box_pname = box_defaults[self.unit_set[0]]\n box_pvalue = eval(box_defaults[self.unit_set[1]])\n if box_pname in self._boxes[box_name].inputs.controls:\n control = getattr(self._boxes[box_name].inputs, box_pname)\n elif box_pname in self._boxes[box_name].outputs.controls:\n control = getattr(self._boxes[box_name].outputs, box_pname)\n else:\n raise ValueError(\n \"The parameter '{0}' is not defined in the box \"\n \"'{1}' input or output parameters.\".format(\n box_pname, box_name))\n control.optional = True\n control.value = box_pvalue",
"def draw_box(self) -> None:\n from math import pi, sin, cos\n import pymol\n from pymol import cmd\n\n # Convert angle\n angle1 = (self.angle1.value() / 180.0) * pi\n angle2 = (self.angle2.value() / 180.0) * pi\n\n # Get positions of box vertices\n # P1\n x1 = -self.min_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n\n y1 = -self.min_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n \n z1 = self.min_x.value() * sin(angle2) + self.min_y.value() * sin(angle1) * cos(angle2) - self.min_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P2\n x2 = self.max_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n \n y2 = (-self.min_y.value()) * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n \n z2 = (-self.max_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P3\n x3 = (-self.min_x.value()) * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n\n y3 = self.max_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n\n z3 = -(-self.min_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P4\n x4 = (-self.min_x.value()) * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y4 = (-self.min_y.value()) * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z4 = -(-self.min_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P5\n x5 = self.max_x.value() * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n \n y5 = self.max_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n\n z5 = (-self.max_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P6\n x6 = self.max_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y6 = (-self.min_y.value()) * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z6 = (-self.max_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P7\n x7 = (-self.min_x.value()) * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n\n y7 = self.max_y.value() * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n\n z7 = -(-self.min_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P8\n x8 = self.max_x.value() * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y8 = self.max_y.value() * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z8 = (-self.max_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # Create box object\n pymol.stored.list = []\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.iterate(\"box\", \"stored.list.append((name, color))\", quiet=1)\n list_color = pymol.stored.list\n cmd.delete(\"box\")\n if len(list_color) > 0:\n for item in list_color:\n at_name = item[0]\n at_c = item[1]\n cmd.set_color(at_name + \"color\", cmd.get_color_tuple(at_c))\n else:\n for at_name in [\"v2\", \"v3\", \"v4\", \"v5\", \"v6\", \"v7\", \"v8\", \"v1x\", \"v1y\", \"v1z\", \"v2x\", \"v3y\", \"v4z\"]:\n cmd.set_color(at_name + \"color\", [0.86, 0.86, 0.86])\n\n # Create vertices\n cmd.pseudoatom(\"box\", name=\"v2\", pos=[x2, y2, z2], color=\"v2color\")\n cmd.pseudoatom(\"box\", name=\"v3\", pos=[x3, y3, z3], color=\"v3color\")\n cmd.pseudoatom(\"box\", name=\"v4\", pos=[x4, y4, z4], color=\"v4color\")\n cmd.pseudoatom(\"box\", name=\"v5\", pos=[x5, y5, z5], color=\"v5color\")\n cmd.pseudoatom(\"box\", name=\"v6\", pos=[x6, y6, z6], color=\"v6color\")\n cmd.pseudoatom(\"box\", name=\"v7\", pos=[x7, y7, z7], color=\"v7color\")\n cmd.pseudoatom(\"box\", name=\"v8\", pos=[x8, y8, z8], color=\"v8color\")\n\n # Connect vertices\n cmd.select(\"vertices\", \"(name v3,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v5,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v3,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v6,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v7,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v1x\", pos=[x1, y1, z1], color='red')\n cmd.pseudoatom(\"box\", name=\"v2x\", pos=[x2, y2, z2], color='red')\n cmd.select(\"vertices\", \"(name v1x,v2x)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v1y\", pos=[x1, y1, z1], color='forest')\n cmd.pseudoatom(\"box\", name=\"v3y\", pos=[x3, y3, z3], color='forest')\n cmd.select(\"vertices\", \"(name v1y,v3y)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v4z\", pos=[x4, y4, z4], color='blue')\n cmd.pseudoatom(\"box\", name=\"v1z\", pos=[x1, y1, z1], color='blue')\n cmd.select(\"vertices\", \"(name v1z,v4z)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.delete(\"vertices\")",
"def make_box(self, scale, color):\n box = Box()\n box.set_color(c=color)\n box.set_location(0, 0, 0)\n box.set_size(scale, scale, scale)\n return box",
"def make_box(self, scale, color):\n box = Box()\n box.set_color(c=color)\n box.set_location(0, 0, 0)\n box.set_size(scale, scale, scale)\n return box",
"def put_box(self, x, y, color=(0, 0, 0), width=1):\n assert(width > 0)\n if width == 1:\n self.put_pixel(x, y, color=color)\n return\n for i in range(x-int(width/2), x+int(width/2)):\n for j in range(y-int(width/2), y+int(width/2)):\n self.put_pixel(i, j, color=color)",
"def _draw_single_box_on_image(self,box,label,id):\n p1 = (box[1], box[0])\n p2 = (box[3], box[2])\n if self.config.DISCO_MODE:\n color = random.choice(self.STANDARD_COLORS)\n else:\n color = self.STANDARD_COLORS[id]\n cv2.rectangle(self.image, p1, p2, color, 2)\n self._draw_text_on_image(label,(p1[0],p1[1]-10),color)",
"def Add(self, *args):\n return _Bnd.Bnd_Box_Add(self, *args)",
"def draw_box(image, curr_box, label, draw_line=False):\n # y1, x1, y2, x2 = box\n # print(curr_box)\n # assert False\n x1, y1, x2, y2 = curr_box[0], curr_box[1], curr_box[2], curr_box[3]\n _, h, w = image.size()\n x1 = int(x1.item() * w)\n y1 = int(y1.item() * h)\n x2 = int(x2.item() * w)\n y2 = int(y2.item() * h)\n if draw_line:\n if x1 > x2:\n x1, x2 = x2, x1\n if y1 > y2:\n y1, y2 = y2, y1\n image[:, y1:y1 + 3, x1:x2] = label/13.0\n image[:, y2:y2 + 3, x1:x2] = label/13.0\n image[:, y1:y2, x1:x1 + 3] = label/13.0\n image[:, y1:y2, x2:x2 + 3] = label/13.0\n else:\n image[:, y1:y1 + 3, x1:x2] = label/13.0\n image[:, y2:y2 + 3, x1:x2] = label/13.0\n image[:, y1:y2, x1:x1 + 3] = label/13.0\n image[:, y1:y2, x2:x2 + 3] = label/13.0\n return image",
"def set_box(self) -> None:\n from pymol import cmd\n\n # Delete Box object in PyMOL\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.delete(\"box\")\n # Get dimensions of selected residues\n selection = \"sele\"\n if selection in cmd.get_names(\"selections\"):\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(selection)\n else:\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(\"\")\n \n # Get center of each dimension (x, y, z)\n self.x = (min_x + max_x) / 2\n self.y = (min_y + max_y) / 2\n self.z = (min_z + max_z) / 2\n\n # Set Box variables in interface\n self.min_x.setValue(round(self.x - (min_x - self.padding.value()), 1))\n self.max_x.setValue(round((max_x + self.padding.value()) - self.x, 1))\n self.min_y.setValue(round(self.y - (min_y - self.padding.value()), 1))\n self.max_y.setValue(round((max_y + self.padding.value()) - self.y, 1))\n self.min_z.setValue(round(self.z - (min_z - self.padding.value()), 1))\n self.max_z.setValue(round((max_z + self.padding.value()) - self.z, 1))\n self.angle1.setValue(0)\n self.angle2.setValue(0)\n\n # Setting background box values\n self.min_x_set = self.min_x.value()\n self.max_x_set = self.max_x.value()\n self.min_y_set = self.min_y.value()\n self.max_y_set = self.max_y.value()\n self.min_z_set = self.min_z.value()\n self.max_z_set = self.max_z.value()\n self.angle1_set = self.angle1.value()\n self.angle2_set = self.angle2.value()\n self.padding_set = self.padding.value()\n\n # Draw box\n self.draw_box()\n\n # Enable/Disable buttons\n self.button_draw_box.setEnabled(False)\n self.button_redraw_box.setEnabled(True)\n self.min_x.setEnabled(True)\n self.min_y.setEnabled(True)\n self.min_z.setEnabled(True)\n self.max_x.setEnabled(True)\n self.max_y.setEnabled(True)\n self.max_z.setEnabled(True)\n self.angle1.setEnabled(True)\n self.angle2.setEnabled(True)",
"def draw_box(\n draw,\n box,\n img_width,\n img_height,\n text=\"\",\n color=(255, 255, 0),\n) -> None:\n\n line_width = 3\n font_height = 8\n y_min, x_min, y_max, x_max = box\n (left, right, top, bottom) = (\n x_min * img_width,\n x_max * img_width,\n y_min * img_height,\n y_max * img_height,\n )\n draw.line(\n [(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],\n width=line_width,\n fill=color,\n )\n if text:\n draw.text(\n (left + line_width, abs(top - line_width - font_height)), text, fill=color\n )",
"def draw_box(image, boxes, box_color=(255, 255, 255)):\r\n for box in boxes:\r\n cv2.rectangle(image,\r\n (box[0], box[1]),\r\n (box[2], box[3]), box_color)",
"def computeAndInsertBox(self,**kwargs):\n if self.predefined_box is None:\n self.mm.neglect()\n return\n (pose,new_frame) = self.baxter.frame.computeTransformation() \n if pose is None:\n self.mm.neglect()\n return\n \n try:\n side = kwargs['side']\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n else:\n self.baxter.frame.setTF(self.predefined_box+'_'+side,pose)\n self.baxter.frame.waitUntilFrameUpdate(self.predefined_box+\"_\"+side)\n self.baxter.scene.createPredefinedBox(self.predefined_box+\"_\"+side,self.predefined_box)\n if self.learning:\n self.appendToTask(\"import tf_helper \\n\")\n self.appendToTask(\"side='%s'\\n\"%(side))\n self.appendToTask(\"baxter.bb.predefined_box='%s'\\n\"%(self.predefined_box))\n self.appendToTask(\"pose = tf_helper.PS('%s',%s,%s)\\n\"%(FRAME_ORIGIN,list(pose.pose.position),list(pose.pose.orientation)))\n self.appendToTask(\"baxter.frame.setTF('%s_'+side,pose)\\n\"%(self.predefined_box))\n self.appendToTask(\"baxter.frame.waitUntilFrameUpdate('%s_'+side)\\n\"%(self.predefined_box))\n self.appendToTask(\"baxter.scene.createPredefinedBox(baxter.bb.predefined_box+'_'+side,baxter.bb.predefined_box)\\n\")\n if self.predefined_box == \"wako\" or self.predefined_box.startswith(\"tray\") is True or self.predefined_box.startswith(\"table\") is True:\n self.appendToTask(\"for drop_off in baxter.scene.boxes[baxter.bb.predefined_box][1].keys():\\n\"%())\n self.appendToTask(\" pose = tf_helper.PS('%s_'+side,%s,%s)\\n\"%(self.predefined_box,\"baxter.scene.boxes[baxter.bb.predefined_box][1][drop_off][0:3]\",\"baxter.scene.boxes[baxter.bb.predefined_box][1][drop_off][3:7]\"))\n self.appendToTask(\" baxter.frame.setTF(drop_off+'_'+side,pose)\\n\")\n if self.predefined_box == \"wako\" or self.predefined_box.startswith(\"tray\") is True or self.predefined_box.startswith(\"table\") is True:\n for drop_off in self.baxter.scene.boxes[self.predefined_box][1].keys():\n pose = PS(self.predefined_box+'_'+side,self.baxter.scene.boxes[self.predefined_box][1][drop_off][0:3],self.baxter.scene.boxes[self.predefined_box][1][drop_off][3:7])\n self.baxter.frame.setTF(drop_off+'_'+side,pose)\n self.mm.confirm()",
"def draw_box(image, boxes, box_color=(255, 255, 255)):\r\n for box in boxes:\r\n cv2.rectangle(image,\r\n (box[0], box[1]),\r\n (box[2], box[3]), box_color, 3)",
"def _box_face(image, face):\n draw = PIL.ImageDraw.Draw(image.image)\n draw.rectangle(face.as_box(), outline=\"yellow\")",
"def plotBox(box):\n plt.plot([box.xll, box.xur, box.xur, box.xll, box.xll]\n ,[box.yll, box.yll, box.yur, box.yur, box.yll]\n , '-'\n )"
] | [
"0.7877751",
"0.7707694",
"0.76889735",
"0.70273536",
"0.6995231",
"0.6946092",
"0.69300044",
"0.6853376",
"0.6805851",
"0.67583156",
"0.6678382",
"0.6639419",
"0.65904963",
"0.6526012",
"0.64928424",
"0.64728737",
"0.6445446",
"0.64276713",
"0.64276713",
"0.64217186",
"0.63875914",
"0.63557756",
"0.63392067",
"0.63032544",
"0.62506944",
"0.62400013",
"0.6229159",
"0.6216492",
"0.6213512",
"0.6198431"
] | 0.8421367 | 0 |
add an 'in_front_of' area to an object. It takes into account the size of the object | def add_in_front_of(self, depth, width, side_clearance=DEFAULT_IN_FRONT_SIDE_CLEARANCE,
distance=DEFAULT_IN_FRONT_DISTANCE,
size=DEFAULT_IN_FRONT_SIZE):
boxmin = {'x': round(depth/2 + distance, ROUND_LEVEL),
'y': round(-width/2 + side_clearance, ROUND_LEVEL),
'z': 0.0}
boxmax = {'x': round(depth/2 + distance+size, ROUND_LEVEL),
'y': round(width/2 - side_clearance, ROUND_LEVEL),
'z': 0.01}
self.add_area("in_front_of", "shape", [{'box': {'min': boxmin, 'max': boxmax}}]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_on_top_of(self, name=\"\", bottom_clearance=DEFAULT_BOTTOM_CLEARANCE,\n side_clearance=DEFAULT_SIDE_CLEARANCE, front_clearence=DEFAULT_FRONT_CLEARANCE,\n back_clearence=DEFAULT_BACK_CLEARANCE, height=ON_TOP_OFF_HEIGHT):\n\n if not name:\n name = 'on_top_of'\n if len(self.data['shape']['compound']) == 0:\n print(\"No shapes yet, cannot add ontopoff\")\n return\n else:\n shape = self.data['shape']['compound'][-1]\n if 'box' not in shape:\n print(\"No box in this shape, cannot add ontopoff\")\n return\n\n pose = shape['box']['pose']\n size = shape['box']['size']\n\n boxmin = {'x': round(pose['x'] - size['x']/2.0 + back_clearence, ROUND_LEVEL),\n 'y': round(pose['y'] - size['y']/2.0 + side_clearance, ROUND_LEVEL),\n 'z': round(pose['z'] + size['z']/2.0 + bottom_clearance, ROUND_LEVEL)}\n boxmax = {'x': round(pose['x'] + size['x']/2.0 - front_clearence, ROUND_LEVEL),\n 'y': round(pose['y'] + size['y']/2.0 - side_clearance, ROUND_LEVEL),\n 'z': round(pose['z'] + size['z']/2.0 + bottom_clearance + height, ROUND_LEVEL)}\n\n self.add_area(name, \"shape\", [{'box': {'min': boxmin, 'max': boxmax}}])",
"def add_front(self, item):\n\n self.items.insert(0, item)",
"def set_object_in_front_of_agent(sim, obj_id, z_offset=-1.5):\n agent_transform = sim.agents[0].scene_node.transformation_matrix()\n obj_translation = agent_transform.transform_point(\n np.array([0, 0, z_offset])\n )\n sim.set_translation(obj_translation, obj_id)\n\n obj_node = sim.get_object_scene_node(obj_id)\n xform_bb = habitat_sim.geo.get_transformed_bb(\n obj_node.cumulative_bb, obj_node.transformation\n )\n\n # also account for collision margin of the scene\n scene_collision_margin = 0.04\n y_translation = mn.Vector3(\n 0, xform_bb.size_y() / 2.0 + scene_collision_margin, 0\n )\n sim.set_translation(y_translation + sim.get_translation(obj_id), obj_id)",
"def add_object(self, obj): # DEFINE OBJ!\n obj.spritesheet_width = self.spritesheet.size['width']\n obj.spritesheet_height = self.spritesheet.size['height']\n \n obj._layer_added(self)\n \n\n obj.buffer_index = len(self.objects)\n self.objects.append(obj)\n\n x = obj.x\n y = obj.y\n \n self.verts.extend(((x, y, 0.0), (x+obj.width, y, 0.0), (x+obj.width, y-obj.height, 0.0), (x, y-obj.height, 0.0)))\n self.texcoords.extend(obj.uv_texture)\n self.norms.extend(((0, 0, -1), (0, 0, -1), (0, 0, -1), (0, 0, -1)))\n\n if pi3d.PLATFORM == pi3d.PLATFORM_PI:\n self.inds.append((self.a,self.b,self.c))\n self.inds.append((self.d,self.a,self.c))\n else:\n self.inds.extend((self.a,self.b,self.c))\n self.inds.extend((self.d,self.a,self.c))\n\n self.a += 4\n self.b += 4\n self.c += 4\n self.d += 4\n\n \n #~ return len(self.sprites)-1",
"def append_front(self, item):\n\n self.front = Node(item, self.front)",
"def addToFront(self, value):\n self._growCheck()\n super().addToFront(value)",
"def addFront(self, item, clock):\n temp = Node2Way(item, clock)\n temp.setPrevious(self._front)\n \n if self._size == 0:\n self._rear = temp\n else:\n self._front.setNext(temp)\n \n self._front = temp\n self._size += 1",
"def wall_in_front(self): #py:UR.wall_in_front\n return RUR._UR.wall_in_front_(self.body)",
"def place_object(self, thing):\n color = [i * 255 for i in thing.color.rgb]\n size = (20, 20)\n if thing.name == \"luna\":\n size = (5, 5)\n if self.is_visible(thing.position, max(size)):\n position = self.get_position(thing.position, size)\n pygame.draw.ellipse(self.screen, color, (position, size))",
"def _is_obstacle_in_front(self):\n range_front = []\n range_front[:20] = self.lidar_data[-20:]\n range_front[20:] = self.lidar_data[:20]\n range_front = list(filter(lambda num: num != 0, range_front))\n min_front = min(range_front)\n if min_front < 0.4 and min_front != 0.0:\n\t\t\treturn True\n else:\n\t\t\treturn False",
"def binarization_element_above(self, element, new_object=lambda lattice, x, y: len(lattice)):\n\n while len(self.above(element)) > 2:\n successors = list(self.above(element))\n x = successors[0]\n y = successors[1]\n size = len(self.above_filter(self.sup(x, y)))\n\n for i in range(len(successors)):\n first = successors[i]\n for j in range(i + 1, len(successors)):\n second = successors[j]\n if len(self.above_filter(self.sup(first, second))) > size:\n x = first\n y = second\n size = len(self.above_filter(self.sup(first, second)))\n new = new_object(self, x, y)\n\n self._hase_diagram.add(new)\n self._hase_diagram.difference([(element, x), (element, y)])\n self._hase_diagram.update([(new, x), (new, y), (element, new)])\n\n self._order.add(new)\n self._order.update([(new, x), (new, y), (element, new)])\n self._order.update((z, new) for z in self._order(element, begin=False, end=True))\n self._order.update((new, z) for z in self._order(x))\n self._order.update((new, z) for z in self._order(y))\n\n return self",
"def enqueue_front(self, item):\n self._items.insert(0, item)",
"def add_object(self, obj):\n if self.it_fits(obj):\n self.content.append(obj)\n return self\n else:\n raise Exception(f\"Object {obj.name} does not fit on the box\")",
"def place_obj(self,\n obj,\n top=None,\n size=None,\n reject_fn=None,\n max_tries=math.inf\n ):\n\n if top is None:\n top = (0, 0)\n\n if size is None:\n size = (self.grid.width, self.grid.height)\n\n num_tries = 0\n\n while True:\n # This is to handle with rare cases where rejection sampling\n # gets stuck in an infinite loop\n if num_tries > max_tries:\n raise RecursionError('rejection sampling failed in place_obj')\n\n num_tries += 1\n\n pos = np.array((\n self._rand_int(top[0], top[0] + size[0]),\n self._rand_int(top[1], top[1] + size[1])\n ))\n\n # Don't place the object on top of another object\n if self.grid.get(*pos) != None:\n continue\n\n # Don't place the object where the agent is\n if np.array_equal(pos, self.start_pos):\n continue\n\n if np.array_equal(pos, self.start_dpos):\n continue\n\n # Check if there is a filtering criterion\n if reject_fn and reject_fn(self, pos):\n continue\n\n break\n\n self.grid.set(*pos, obj)\n\n if obj is not None:\n obj.init_pos = pos\n obj.cur_pos = pos\n\n return pos",
"def wall_in_front(): #py:wall_in_front\n return RUR._wall_in_front_()",
"def push_front(self, param):\n if self.size == self.capacity:\n self.resize(2 * self.size)\n for _ in range(self.arr):\n pass",
"def push(self, item):\n super().add_item_to_front(item)",
"def _find_front(self):\n self.front = (laplace(self.working_mask) > 0).astype('uint8')\n # TODO: check if scipy's laplace filter is faster than scikit's",
"def add(self, obj):\n self.objects.append(obj)\n if obj.gravity == 0:\n obj.gravity = self.gravity\n if obj.gravity_z == 0:\n obj.gravity_z = self.gravity_z",
"def push_front(self, e):\n if(self.size_ >= self.capacity_):#If our Deque is full we need to resize it first\n self.resize_front()\n self.data_[self.front_]= e#New Front\n self.size_+=1\n # print(\"Case 1\")\n elif(self.front_ == -1 and self.size_ ==0) :#If the Deque is intially empty then when we add the first item that will be both the front and the back \n self.front_= 0\n self.back_ = 0\n self.data_[self.front_]= e #Inserting First element in deque either front end or rear end they both lead to the same result.\n self.size_+=1\n # print(\"Case 2\")\n elif (self.front_ ==0):#If the front is at the beginning of the Deque.This may happen after the first insertion.\n self.front_-=1\n self.data_[self.front_] = e\n self.size_+=1\n # print(\"Case 3\")\n else:\n self.front_ -=1 #We add normally \n self.data_[self.front_] = e\n self.size_+=1\n #print(\"Case 4\")",
"def insertFront(self, item):\n self.sentinel.insertAfter(item)\n self.N += 1",
"def add_before ( self ):\n self.add_item( 0 )",
"def add_card(self, card_, on_top=True):\n card_.unclick()\n if on_top:\n pos_ = self.pos\n if len(self.cards) is not 0:\n length = len(self.cards)\n pos_ = (self.pos[0] + length * self.offset[0],\n self.pos[1] + length * self.offset[1])\n card_.set_pos(pos_)\n self.cards.append(card_)\n else:\n self.cards.insert(0, card_)\n self.update_position()",
"def __add_elem(self, elem):\n # compute the box around the element adding vmax safety points\n vmax = self.stencil.vmax\n elem_bl, elem_ur = elem.get_bounds()\n phys_bl, phys_ur = self.get_bounds_halo()\n\n tmp = np.array((elem_bl - phys_bl)/self.dx, np.int) - vmax\n nmin = np.maximum(vmax, tmp)\n tmp = np.array((elem_ur - phys_bl)/self.dx, np.int) + vmax + 1\n nmax = np.minimum(vmax + self.shape_in, tmp)\n\n # set the grid\n space_slice = [slice(imin, imax) for imin, imax in zip(nmin, nmax)]\n total_slice = [slice(None)] + space_slice\n # local view of the arrays\n ioo_view = self.in_or_out[space_slice]\n dist_view = self.distance[total_slice]\n flag_view = self.flag[total_slice]\n\n tcoords = (self.coords_halo[d][s] for d, s in enumerate(space_slice))\n grid = np.meshgrid(*tcoords, sparse=True, indexing='ij')\n\n if not elem.isfluid: # add a solid part\n ind_solid = elem.point_inside(grid)\n ind_fluid = np.logical_not(ind_solid)\n ioo_view[ind_solid] = self.valout\n else: # add a fluid part\n ind_fluid = elem.point_inside(grid)\n ind_solid = np.logical_not(ind_fluid)\n ioo_view[ind_fluid] = self.valin\n\n for k in range(self.stencil.unvtot):\n vk = np.asarray(self.stencil.unique_velocities[k].v)\n if np.any(vk != 0):\n space_slice = [slice(imin + vk[d], imax + vk[d]) for imin, imax, d in zip(nmin, nmax, range(self.dim))]\n # check the cells that are out when we move with the vk velocity\n out_cells = self.in_or_out[space_slice] == self.valout\n # compute the distance and set the boundary label\n # of each cell and the element with the vk velocity\n alpha, border = elem.distance(grid, self.dx*vk, 1.)\n # take the indices where the distance is lower than 1\n # between a fluid cell and the border of the element\n # with the vk velocity\n indx = np.logical_and(alpha > 0, ind_fluid)\n if out_cells.size != 0:\n indx = np.logical_and(indx, out_cells)\n\n if elem.isfluid:\n # take all points in the fluid in the ioo_view\n indfluidinbox = ioo_view == self.valin\n # take all the fluid points in the box (not only in the created element)\n # which always are in fluid after a displacement of the velocity vk\n borderToInt = np.logical_and(np.logical_not(out_cells), indfluidinbox)\n dist_view[k][borderToInt] = self.valin\n flag_view[k][borderToInt] = self.valin\n else:\n dist_view[k][ind_solid] = self.valin\n flag_view[k][ind_solid] = self.valin\n\n #set distance\n ind4 = np.where(indx)\n if not elem.isfluid:\n ind3 = np.where(alpha[ind4] < dist_view[k][ind4])[0]\n else:\n ind3 = np.where(np.logical_or(alpha[ind4] > dist_view[k][ind4], dist_view[k][ind4] == self.valin))[0]\n\n ind = [i[ind3] for i in ind4]\n dist_view[k][ind] = alpha[ind]\n flag_view[k][ind] = border[ind]",
"def boundary(active, objects):\n limit = SIZE[1]\n for obj in objects:\n if active.pos_x == obj.pos_x:\n limit = min(limit, obj.pos_y)\n active.pos_y = limit-active.height\n active.col_d = True",
"def insert_front(self, value: int) -> bool:\r\n if self.size != self.capacity:\r\n self.deque[self.frontIndex] = value\r\n self.size += 1\r\n if self.frontIndex == 0:\r\n self.frontIndex = self.capacity - 1\r\n else:\r\n self.frontIndex -= 1\r\n return True\r\n return False",
"def __init__(self):\n self.front = None\n self.minfront = None",
"def add_object(self, obj_data, obj_name, obj_orientation, qpmi, entity):\n self.objects.append((obj_data, obj_name, obj_orientation, qpmi, entity))\n if len(self.objects) == 1:\n self.set_default_brush()",
"def boundary(self):\n if self.pos.x < 0:\n self.pos.x = 0\n if self.pos.x > WIDTH - 48:\n self.pos.x = WIDTH - 48\n if self.pos.y < 0:\n self.pos.y = 0\n if self.pos.y > HEIGHT - 48:\n self.pos.y = HEIGHT - 48\n\n self.rect.topleft = self.pos",
"def _place_new_obj(self, (screen_width, screen_height)):\n old_tree = self.objects.get()\n new_x = (-old_tree.position[0]) + old_tree.max_width*2 + screen_width\n another_tree = Grass((new_x, screen_height), self.width, self.height)\n self.objects.put(another_tree)"
] | [
"0.59779716",
"0.56933",
"0.5566353",
"0.55477345",
"0.5506981",
"0.5493056",
"0.5481022",
"0.54144007",
"0.5384035",
"0.5383211",
"0.5361351",
"0.5339586",
"0.531687",
"0.5316715",
"0.53058517",
"0.5301703",
"0.52933913",
"0.52611434",
"0.5226817",
"0.51790226",
"0.5167534",
"0.51129085",
"0.50981784",
"0.5092218",
"0.50649095",
"0.50623953",
"0.5060084",
"0.50344294",
"0.5034048",
"0.500998"
] | 0.7426306 | 0 |
Adds an on_top_off_area to the model. This must be called right after the element w.r.t. which the on_top_off is specified (e.g., the table top) has been added. | def add_on_top_of(self, name="", bottom_clearance=DEFAULT_BOTTOM_CLEARANCE,
side_clearance=DEFAULT_SIDE_CLEARANCE, front_clearence=DEFAULT_FRONT_CLEARANCE,
back_clearence=DEFAULT_BACK_CLEARANCE, height=ON_TOP_OFF_HEIGHT):
if not name:
name = 'on_top_of'
if len(self.data['shape']['compound']) == 0:
print("No shapes yet, cannot add ontopoff")
return
else:
shape = self.data['shape']['compound'][-1]
if 'box' not in shape:
print("No box in this shape, cannot add ontopoff")
return
pose = shape['box']['pose']
size = shape['box']['size']
boxmin = {'x': round(pose['x'] - size['x']/2.0 + back_clearence, ROUND_LEVEL),
'y': round(pose['y'] - size['y']/2.0 + side_clearance, ROUND_LEVEL),
'z': round(pose['z'] + size['z']/2.0 + bottom_clearance, ROUND_LEVEL)}
boxmax = {'x': round(pose['x'] + size['x']/2.0 - front_clearence, ROUND_LEVEL),
'y': round(pose['y'] + size['y']/2.0 - side_clearance, ROUND_LEVEL),
'z': round(pose['z'] + size['z']/2.0 + bottom_clearance + height, ROUND_LEVEL)}
self.add_area(name, "shape", [{'box': {'min': boxmin, 'max': boxmax}}]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bb_top(self, bb_top: float):\n\n self._bb_top = bb_top",
"def add_card(self, card_, on_top=True):\n card_.unclick()\n if on_top:\n pos_ = self.pos\n if len(self.cards) is not 0:\n length = len(self.cards)\n pos_ = (self.pos[0] + length * self.offset[0],\n self.pos[1] + length * self.offset[1])\n card_.set_pos(pos_)\n self.cards.append(card_)\n else:\n self.cards.insert(0, card_)\n self.update_position()",
"def putOn(self,obj):\n if obj not in self.on:\n self.on.append(obj)\n if self not in obj.on:\n obj.putOn(self)\n if obj not in self.road.on:\n self.road.putOn(obj)",
"def GripperTop(self, attop=True):\r\n \r\n return self.SetFlag(self.optionGripperTop, attop)",
"def Top(self):\r\n\r\n self.dock_direction = AUI_DOCK_TOP\r\n return self",
"def Top(self):\r\n\r\n self.dock_direction = AUI_DOCK_TOP\r\n return self",
"def top_bar(self, top_bar):\n\n self._top_bar = top_bar",
"def testPsychOnTop(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"on_top\")\n\n self.util.intPropertyTest(self, attr, \"on_top\")",
"def top(self, top):\n\n self._top = top",
"def top(self, top):\n\n self._top = top",
"def add_overflow_button(self) -> None:\n\n self.ids.right_actions.add_widget(\n ActionOverFlowButton(\n theme_text_color=\"Custom\"\n if not self.opposite_colors\n else \"Primary\",\n text_color=self.specific_text_color,\n opposite_colors=self.opposite_colors,\n on_release=lambda x: self.overflow_cls.open(),\n )\n )",
"def over_under(self, over_under):\n\n self._over_under = over_under",
"def place_call_offhold(self) -> None:",
"def overview_on_off(self):\n\n if self.overview_win:\n self.overview_button.deselect()\n self.overview_win.destroy()\n self.overview_win=None\n else:\n self.overview_button.select()\n if not self.data.has_key('AA_seqs1'):\n self.warning('No DNA sequence loaded','Load a DNA sequence first')\n self.overview_button.deselect()\n return\n\n # Open Canvas and draw lines\n self.overview_win=Toplevel()\n self.overview_win.geometry('300x100+400+350')\n self.overview_win.title('Open reading frames')\n self.overview_frame=Canvas(self.overview_win,bd=5,bg='white',width=300,height=150)\n self.overview_frame.xview(\"moveto\", 0)\n self.overview_frame.yview(\"moveto\", 0.2)\n self.overview_frame.grid(row=0,column=0)\n #\n # Draw\n #\n self.update_overview_win()\n return",
"def top_attire_color(self, top_attire_color):\n\n self._top_attire_color = top_attire_color",
"def top(self, top):\n self.ptr.top(top)",
"def putOn(self,obj):\n if obj not in self.on:\n self.on.append(obj)\n if self not in obj.on:\n obj.putOn(self)",
"def putOn(self,obj):\n if obj not in self.on:\n self.on.append(obj)\n if self not in obj.on:\n obj.putOn(self)",
"def always_top(self, value: bool):\n self.tk_ref.wm_attributes('-topmost', int(value))",
"def add_area_element(self, obj, typ_sofi, layer):\n\n qd = AreaElement(obj)\n\n pts = rs.SurfacePoints(obj)\n\n qd.n1 = self.nodes.add(Node(None, pts[0]))\n qd.n2 = self.nodes.add(Node(None, pts[1]))\n qd.n3 = self.nodes.add(Node(None, pts[3]))\n qd.n4 = self.nodes.add(Node(None, pts[2]))\n\n qd.layer = layer\n\n self.area_elements.add(qd)",
"def overlay_up(self, idx):\n if not self.is_top_layer(idx):\n self.overlay_list[idx], self.overlay_list[idx+1] = \\\n self.overlay_list[idx+1], self.overlay_list[idx]",
"def chart_area(self, chart_area):\n\n self.container['chart_area'] = chart_area",
"def set_area(self, area=0.0):\n self.area = area",
"def append(self, item: Area) -> None:\r\n if not isinstance(item, Area):\r\n raise TypeError(\r\n f\"{item} is not an Area. \"\r\n \"Only Area objects can be inside Areas.\",\r\n )\r\n\r\n self.data.append(item)",
"def top(self, top):\n # type: (float) -> None\n\n if top is not None:\n if not isinstance(top, (float, int)):\n raise TypeError(\"Invalid type for `top`, type has to be `float`\")\n\n self._top = top",
"def overlay(self, overlay):\n\n self._overlay = overlay",
"def total_area_above_sea(self):\n del self._total_area_above_sea",
"def set_top_border(self, val):\n self.tborder = val",
"def service_area(self, service_area: object):\n\n self._service_area = service_area",
"def left_top(self, left_top):\n\n self._left_top = left_top"
] | [
"0.49048257",
"0.46711868",
"0.46110424",
"0.4565928",
"0.45465842",
"0.45465842",
"0.45445573",
"0.44879434",
"0.4483955",
"0.4483955",
"0.44211143",
"0.44092962",
"0.43391472",
"0.4334049",
"0.43188763",
"0.43176568",
"0.43120137",
"0.43120137",
"0.42938113",
"0.4287915",
"0.42724323",
"0.42629144",
"0.42476675",
"0.41919345",
"0.41725728",
"0.4165474",
"0.4140348",
"0.4121287",
"0.41073665",
"0.41030005"
] | 0.67451435 | 0 |
Adds an 'near' area to the model with given offset | def add_near(self, offset=0.7):
self.add_area("near", "offset", offset) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setNear(self, near):\n self.light.node().getLens().setNear(near)",
"def set_near(self, value):\n scene = self.scenes[self.current_scene]\n scene.set_perspective(near=value)\n self.redraw()",
"def offset(self, offset):\n if self.da > 0:\n radius = self.r + offset\n else:\n radius = self.r - offset\n return Arc(self.c, radius, self.a0, self.da)",
"def add_region_offset(self, offset):\n if not self.__offset_added:\n self.__region_ids = dict(\n (var, region + offset)\n for var, region in self.__region_ids.items())\n\n self.__offset_added = True",
"def near(self, o, e=0.00001):\n raise NotImplementedError\n # TODO: this will snap for any axis despite the others...\n self.set(near(self.x, o.x, e),\n near(self.y, o.y, e),\n near(self.z, o.z, e))",
"def in_place_offset(self, offset):\n self.p += offset * self.cross_z.normalized()",
"def apply_offset(self, query, offset):\n if offset is not None:\n offset = int(offset)\n if offset < 0:\n raise ValueError(\"offset can not be a negative integer.\")\n query = query.offset(offset)\n return query",
"def setNearFar(self, near, far):\n self.light.node().getLens().setNearFar(near, far)",
"def add_on_land_position(self) -> Point2:\n return self.position.offset(Point2((-2.5, 0.5)))",
"def set_offset(self, offset):\n if type(offset) not in [float, int]:\n raise TypeError(\n \"ERROR: Offset can be a float (or int value) within \"\n \"the specified range only.\")\n\n if (offset > self.AMPL_MAX) or (offset < -self.AMPL_MAX):\n raise TypeError(\n \"ERROR: Invalid offset value. Specify offset within the range\")\n\n self._offset = float(offset)",
"def offset(self, offset):\n self._offset += offset",
"def set_initial_offset(self, offset):\n self.initial_offset = max(\n min(\n (len(self) + 0.5) * self.item_heights - self.my_surface.get_height(),\n offset\n ),\n 0\n )",
"def set_offset(self, pos):\n self.offset = self.bbox().offset(pos)",
"def addOffset(tri, offset):\n\tpts = tri.points\n\tL = getDistance(*pts[0:2])\n\tl = offset / math.sin(math.pi / 3)\n\tscale = float((L + offset + l) / L)\n\twithOffset = scale * tri\n\treturn withOffset",
"def searchNear(self, minF, offsetX, offsetY):\n # check offset\n if minF.point.x + offsetX < 0 or minF.point.x + offsetX > self.map2d.w - 1 or minF.point.y + offsetY < 0 or minF.point.y + offsetY > self.map2d.h - 1:\n return\n # if is obstruct then ignore\n if self.map2d[minF.point.x + offsetX][minF.point.y + offsetY] != self.passTag:\n return\n # if in close list,then ignore\n currentPoint = Point(minF.point.x + offsetX, minF.point.y + offsetY)\n if self.pointInCloseList(currentPoint):\n return\n # set unit cost\n if offsetX == 0 or offsetY == 0:\n step = 10\n else:\n step = 14\n # if not in openList,added it in openlist\n currentNode = self.pointInOpenList(currentPoint)\n if not currentNode:\n currentNode = AStar.Node(currentPoint, self.endPoint, g=minF.g + step)\n currentNode.father = minF\n self.openList.append(currentNode)\n return\n # if in openList,if minF to current point G is smaller\n if minF.g + step < currentNode.g: # if smaller,recalculate g,and change father\n currentNode.g = minF.g + step\n currentNode.father = minF",
"def generateOffset(self, offset, max_dist):\n settings.space_by_type['offset'] = 1000000\n self._set_sections(offset, max_dist)\n self.offset = True",
"def offset(self, offset):\n raise NotImplementedError(\"This should have been implemented.\")",
"def set_world_offset(self, offset, is_radian=None, wait=True):\r\n return self._arm.set_world_offset(offset, is_radian=is_radian, wait=wait)",
"def offset(self, offset):\n return Line(self.p + offset * self.cross_z.normalized(), self.v)",
"def offset(self, offset):\n return Line3d(self.p + offset * self.cross.normalized(), self.v)",
"def offset(self, offset):\n\n self._offset = offset",
"def offset(self, offset):\n\n self._offset = offset",
"def offset(self, offset):\n\n self._offset = offset",
"def z_near(self, z_near):\n self.ptr.z_near(z_near)",
"def near():\n\targs = request.args\n\n\tif 'limit' in args: limit = int(args.get('limit'))\n\telse: limit = 1\n\n\tif 'lat' in args and 'lng' in args:\n\t\tlat = float(args.get('lat'))\n\t\tlng = float(args.get('lng'))\n\n\telse:\n\t\treturn jsonify(success=False, reason='wrong_arguments')\n\n\tdocs = findWithInCircle([lat,lng],6)\n\n\treturn json_util.dumps({\n\t\t'success': True, 'docs': docs\n\t})",
"def include(self,source):\n return self.near(source, self.max_radius)",
"def offset(self, offset):\n self._offset = offset",
"def set_offset(self, offset):\n self.offset = offset",
"def set_offset(self, offset):\n self.offset = offset",
"def constant_offset(df, offset, harmonize_year=\"2015\"):\n df = df.copy()\n numcols = utils.numcols(df)\n # just add offset to all values\n df[numcols] = df[numcols].add(offset, axis=0)\n return df"
] | [
"0.6061911",
"0.58462834",
"0.5605604",
"0.5502686",
"0.5402498",
"0.53371626",
"0.52448463",
"0.51996297",
"0.51567805",
"0.51047224",
"0.5097213",
"0.50725985",
"0.5062398",
"0.50562",
"0.50494885",
"0.503145",
"0.50292754",
"0.50055283",
"0.50039315",
"0.49986252",
"0.49956036",
"0.49956036",
"0.49956036",
"0.49869817",
"0.49783874",
"0.4975239",
"0.49520284",
"0.49387497",
"0.49387497",
"0.48705056"
] | 0.86550325 | 0 |
file.managed, existing file with replace=True, change permissions | def test_managed_file_mode_file_exists_replace(
file, tmp_path, grail_scene33_file, mode, replace
):
name = tmp_path / "grail_scene33"
# Set the mode on the state tree file to 0600
grail_scene33_file.chmod(0o600)
# The file should exist, copy it
shutil.copyfile(str(grail_scene33_file), str(name))
shutil.copymode(str(grail_scene33_file), str(name))
# The initial mode of the fail should not match the mode we want
assert stat.S_IMODE(name.stat().st_mode) != mode
# Regardless if the file was replaced or not, the mode should be updated
ret = file.managed(
name=str(name), mode=oct(mode), replace=replace, source="salt://grail/scene33"
)
assert ret.result is True
assert stat.S_IMODE(name.stat().st_mode) == mode | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edit_file_permission(request, app=None, priv=None):\n pass",
"def chmod_file ( self, fspath ):\n return",
"def test_provider_system_hook_file_chmod(change_dir, fix_file_perms):\n tackle(context_file='chmod.yaml', no_input=True)\n assert oct(os.stat('tackle.yaml').st_mode)[-3:] == \"600\"",
"def fix_file_perms():\n yield\n os.chmod('tackle.yaml', int('0o644', 8))",
"def _make_writeable(filename):\n import stat\n if sys.platform.startswith('java'):\n # On Jython there is no os.access()\n return\n if not os.access(filename, os.W_OK):\n st = os.stat(filename)\n new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR\n os.chmod(filename, new_permissions)",
"def chmod(self, path, mod):\n self._call(\"SETPERMISSION\", method=\"put\", path=path, permission=mod)",
"def make_writeable(filename):\n if sys.platform.startswith('java'):\n # On Jython there is no os.access()\n return\n if not os.access(filename, os.W_OK):\n stt = os.stat(filename)\n new_permissions = stat.S_IMODE(stt.st_mode) | stat.S_IWUSR\n os.chmod(filename, new_permissions)",
"def replaceFileAtomic(source_path, dest_path):\n\n if python_version >= 0x300:\n os.replace(source_path, dest_path)\n else:\n importFromInlineCopy(\"atomicwrites\", must_exist=True).replace_atomic(\n source_path, dest_path\n )",
"def set_file_immutable_unlink(path):\n\n return vserver.set_file_attr(path, {'immutable':True, 'iunlink':True})",
"def _make_writeable(filename):\n if not os.access(filename, os.W_OK):\n st = os.stat(filename)\n new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR\n os.chmod(filename, new_permissions)",
"def file_perms( fname, permissions, remote=None ):\n if remote == None:\n if perms.i_own( fname ):\n if type(permissions) == type(''):\n perms.apply_chmod( fname, permissions )\n else:\n # assume 'permissions' is a tuple or list\n perms.apply_chmod( fname, *permissions )\n else:\n if remote.x_i_own( fname ):\n if type(permissions) == type(''):\n remote.x_apply_chmod( fname, permissions )\n else:\n # assume 'permissions' is a tuple or list\n remote.x_apply_chmod( fname, *permissions )",
"def test_make_file_read_only():\n\n filename = os.path.join(tempfile.gettempdir(), \"jade-test-file.txt\")\n if os.path.exists(filename):\n os.chmod(filename, stat.S_IWRITE)\n os.remove(filename)\n\n with open(filename, \"w\") as f:\n f.write(\"Hello World\")\n\n prev_mode = os.stat(filename)\n make_file_read_only(filename)\n # Disabling because it doesn't work on Windows.\n # s = os.stat(filename)\n # assert s.st_mode != prev_mode\n # assert s.st_mode == 33060\n\n if os.path.exists(filename):\n os.chmod(filename, stat.S_IWRITE)\n os.remove(filename)",
"def update_chmod(self):\n pass",
"def edit(self, name, file):\n # Check that the file exists on s3 before proceeding\n if not self.exists_on_s3(name, file):\n print \"[-] File does not exist on s3 or role permissions are incorrect.\"\n return\n\n # Grab the data key from IAM\n decrypted_key = self._get_data_key(name)\n\n # store the key in a temporary file in /dev/shm for working with the encrypted file.\n key_file = \"/dev/shm/\" + name + \".tmp.key\"\n try:\n key_file_out = open(key_file, \"w\")\n except:\n print \"[-] Error creating temp data key file {0}\".format(key_file)\n return\n\n key_file_out.write(decrypted_key)\n os.chmod(key_file, 0600)\n key_file_out.close()\n\n # Download the file from s3 to /dev/shm\n file_name = self.download_from_s3(name, file)\n os.chmod(file_name, 0600)\n\n # Decrypt the file before editing\n decrypted_file_name = self.decrypt_file(file_name, decrypted_key)\n\n # Call $EDITOR to edit the file.\n EDITOR = os.environ.get('EDITOR','vim')\n call([EDITOR, decrypted_file_name])\n\n # Encrypt and upload the file back to s3\n self.upload(name, decrypted_file_name)\n\n # Clean up any other files laying around\n self.secure_delete(file_name, passes=10)\n self.secure_delete(decrypted_file_name, passes=10)\n self.secure_delete(key_file, passes=10)\n\n return",
"def test_managed_contents(file, tmp_path, name, contents):\n name = tmp_path / \"managed-{}\".format(name)\n ret = file.managed(name=str(name), contents=contents)\n assert ret.result is True\n assert \"diff\" in ret.changes\n assert name.exists()",
"def set_file_owner_perm(path, permission, user, group):\n uid = pwd.getpwnam(user).pw_uid\n gid = grp.getgrnam(group).gr_gid\n\n current_perm = get_permissions(path)\n try:\n logger.debug('Current permission: {0}, changing to {1}'.format(current_perm, oct(permission)))\n os.chmod(path, permission)\n os.chown(path, uid, gid)\n except Exception as e:\n logger.warning('Unable to change permissions on {0}: {1}'.format(path, e))",
"def set_file_permissions(host, fqpath, perms):\n command = \"chmod %s %s\" % (perms, fqpath)\n rcode, _, rerr = g.run(host, command)\n\n if rcode == 0:\n return True\n\n g.log.error('chmod failed: %s' % rerr)\n return False",
"def set_permissions(self, object, replace=False):\r\n if isinstance(self.config.origin, S3Origin):\r\n if self.config.origin.origin_access_identity:\r\n id = self.config.origin.origin_access_identity.split('/')[-1]\r\n oai = self.connection.get_origin_access_identity_info(id)\r\n policy = object.get_acl()\r\n if replace:\r\n policy.acl = ACL()\r\n policy.acl.add_user_grant('READ', oai.s3_user_id)\r\n object.set_acl(policy)\r\n else:\r\n object.set_canned_acl('public-read')",
"def change_permissions(path, permission='777'):\r\n if os.path.exists(path):\r\n subprocess.call('chmod -R %s %s'%(permission,path),shell=True)\r\n else:\r\n raise NameError('invalid path %s'% path)",
"def test_provider_system_hook_file_remove(change_dir, fix_file_perms):\n o = tackle(context_file='remove.yaml', no_input=True)\n assert o['if_file']\n assert not o['not_file']\n assert o['if_files']\n assert not o['not_files']",
"def make_readonly(path):\n mode = Path.stat(path).st_mode\n Path.chmod(path, mode & ~stat.S_IWRITE)",
"def writable(path):",
"def del_ro(action, name, exc):\n os.chmod(name, stat.S_IWRITE)\n os.remove(name)",
"def chmod_file(filename, permissions, sudo=True):\n LOG.info(\"Changing file permissions for {}\".format(filename))\n cmd = \"chmod {} {}\".format(permissions, filename)\n _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False)",
"def can_manage(self, filename):\n return False",
"def replace_file(new_content, current_location):\r\n\tif should_replace(new_content, current_location):\r\n\t\tabs_path = os.path.abspath(current_location)\r\n\t\tcurrent_dir, filename = os.path.split(abs_path)\r\n\t\ttmp_filename = '{0}.{1}'.format(filename, time.time())\r\n\t\ttmp_path = os.path.join(current_dir, tmp_filename)\r\n\r\n\t\ttry:\r\n\t\t\twith open(tmp_path, 'w') as tmp:\r\n\t\t\t\ttmp.write(new_content.getvalue())\r\n\t\t\tos.rename(tmp_path, abs_path)\t\r\n\t\texcept IOError:\r\n\t\t\tprint('Failed to replace ''{0}'''.format(abs_path), file=sys.stderr)\r\n\t\t\treturn False\r\n\t\treturn True\r\n\treturn False",
"def _ensure_read_write_access(tarfileobj):\n dir_perm = tarfile.TUREAD | tarfile.TUWRITE | tarfile.TUEXEC\n file_perm = tarfile.TUREAD | tarfile.TUWRITE\n\n for tarinfo in tarfileobj.getmembers():\n tarinfo.mode |= (dir_perm if tarinfo.isdir() else file_perm)",
"def manage_files(self):\n return 1 << 2",
"def replace_file(filename, contents):\n filename = path.join(PATH_ROOT, filename)\n filename_bak = \"%s.release.bak\" % filename\n os.rename(filename, filename_bak)\n with open(filename, \"w\") as out_file:\n out_file.write(\"\".join(contents))\n shutil.copymode(filename_bak, filename)\n os.remove(filename_bak)",
"def put(self, resource_id, file_id):\n v = APIValidator()\n if not v.validate(request.json, file_schema):\n abort(\n 400,\n message=\"Bad request\",\n status=400,\n errors=map(lambda x: dict(\n message=x,\n code=error_codes[\"validation_error\"]\n ), v.errors),\n )\n\n d = Deposition.get(resource_id, user=current_user)\n df = d.get_file(file_id)\n\n if not d.type.authorize_file(d, df, 'update_metadata'):\n raise ForbiddenAction('update_metadata', df)\n\n new_name = secure_filename(request.json['filename'])\n if new_name != request.json['filename']:\n abort(\n 400,\n message=\"Bad request\",\n status=400,\n errors=[dict(\n message=\"Not a valid filename\",\n code=error_codes[\"validation_error\"]\n )],\n )\n\n df.name = new_name\n d.save()\n\n return d.type.marshal_file(df)"
] | [
"0.69003147",
"0.6608834",
"0.64237905",
"0.61870056",
"0.6163651",
"0.61197066",
"0.6103727",
"0.6009532",
"0.5986581",
"0.5952005",
"0.5944114",
"0.5904253",
"0.58546925",
"0.5817923",
"0.5791194",
"0.5735725",
"0.5721977",
"0.56830996",
"0.5677009",
"0.5665991",
"0.5662682",
"0.5662606",
"0.5641098",
"0.5634327",
"0.5630057",
"0.56244826",
"0.5617145",
"0.5611392",
"0.559657",
"0.5557399"
] | 0.69366336 | 0 |
Test to ensure we can render grains data into a managed file. | def test_managed_file_with_grains_data(file, tmp_path, state_tree, minion_id):
name = tmp_path / "grains-get-contents.txt"
tmpl_contents = """
{{ salt['grains.get']('id') }}
"""
with pytest.helpers.temp_file("grainsget.tmpl", tmpl_contents, state_tree):
ret = file.managed(
name=str(name), source="salt://grainsget.tmpl", template="jinja"
)
assert ret.result is True
assert name.is_file()
assert name.read_text().strip() == minion_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_visualisations_get_visualisation_render_data(self):\n pass",
"def test_import_snapshot_mapped(self, status):\n object_generator = ObjectGenerator()\n _, user = object_generator.generate_person(user_role=\"Creator\")\n with factories.single_commit():\n assessment = factories.AssessmentFactory()\n control = factories.ControlFactory()\n # pylint: disable=expression-not-assigned\n self._create_snapshots(assessment.audit, [control])[0]\n assessment.add_person_with_role_name(user, \"Verifiers\")\n assessment.status = status\n db.session.commit()\n\n response = self.import_data(collections.OrderedDict([\n (\"object_type\", \"Assessment\"),\n (\"Code*\", assessment.slug),\n (\"Map:control versions\", control.slug),\n ]))\n self._check_csv_response(response, {})\n self.assert_asmnt_notifications()",
"def test_visualisations_get_visualisation(self):\n pass",
"def testViewData(self):\n try:\n entryD = self.__mU.doImport(self.__instanceSavePath, fmt=\"pickle\")\n for entryId in entryD:\n for entityId, eD in entryD[entryId][\"selected_polymer_entities\"].items():\n\n analD = eD[\"anal_instances\"] if \"anal_instances\" in eD else {}\n for asymId, aD in analD.items():\n logger.info(\"entryId %s entityId %s asymId %s analD: %r\", entryId, entityId, asymId, aD)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def test_visualisations_create_visualisation_render_data_refresh_job(self):\n pass",
"def test_valid_genes_file(self):\n\n # Create a valid genes file\n valid_genes_file = os.path.join(os.path.dirname(\n os.path.abspath(__file__)), \"data\", \"valid_genes_file.bed\")\n\n ref_name = \"ref1\"\n\n genes = {\"gene1\": {\"start\": 0, \"end\": 100},\n \"gene 2\": {\"start\": 101, \"end\": 200}, # Spaces are allowed in the gene name\n \"gene3\": {\"start\": 201, \"end\": 300}}\n\n with open(valid_genes_file, \"w+\") as f:\n for gene in genes:\n f.write(\"%s\\t%s\\t%s\\t%s\\n\" % (ref_name, genes[gene][\"start\"],\n genes[gene][\"end\"], gene))\n\n parsed_genes = parse_genes_file(valid_genes_file, ref_name)\n\n for gene in parsed_genes:\n assert gene in genes\n assert parsed_genes[gene][\"start\"] == genes[gene][\"start\"]\n assert parsed_genes[gene][\"end\"] == genes[gene][\"end\"]\n assert parsed_genes[gene][\"frame\"] == genes[gene][\"start\"] % 3\n\n os.remove(valid_genes_file)",
"def test_snapshot(model):\n with open(join(DATA_PATH, \"store.json\")) as file_handle:\n validator = Draft4Validator(json.load(file_handle))\n code, result = memote.test_model(\n model=model, results=True, pytest_args=[\"--tb\", \"no\"])\n assert validator.is_valid(result)\n config = memote.ReportConfiguration.load()\n report = memote.SnapshotReport(result=result, configuration=config)\n obj = report.render_json()\n assert validator.is_valid(obj)",
"def test_compute_glycemic_load(self):\n pass",
"def test_good_load(self):\n self.r0.save_to_file([self.r0, self.r1])\n objs = self.r0.load_from_file()\n self.assertEqual(str(objs[0]), '[Rectangle] (1) 0/0 - 2/3')\n self.assertEqual(str(objs[1]), '[Rectangle] (2) 0/0 - 4/6')",
"def test_visualize_equipment(self):\n pass",
"def test_alien_data(self):",
"def check(self):\n # get the data from shotgun\n app = self.parent.app\n context = app.context\n # get asset type\n filters = [[\"id\", \"is\", context.entity[\"id\"]]]\n fields = [\"sg_asset_type\"]\n assetType = app.shotgun.find_one(\n \"Asset\", filters=filters, fields=fields)[\"sg_asset_type\"]\n # get step short name\n filters = [[\"id\", \"is\", context.step[\"id\"]]]\n fields = [\"short_name\"]\n stepShortName = app.shotgun.find_one(\n \"Step\", filters=filters, fields=fields)[\"short_name\"]\n\n try:\n assetNode = gNodes.getTopGNode()\n except:\n assetNode = None\n\n if assetNode:\n metadataCode = assetNode.grid_code.get()\n metadataAssetType = assetNode.grid_type.get(asString=True)\n metadataPipeStep = assetNode.grid_pipeStep.get(asString=True)\n if not (assetType == metadataAssetType and\n stepShortName == metadataPipeStep and\n context.entity[\"name\"] == metadataCode):\n self.status = self.errorMode\n self.addError(\"Context and asset node metadata don't match\")\n self.errorMessage = \"Context and asset node metadata don't match\"\n else:\n self.status = \"OK\"\n else:\n self.status = \"OK\"",
"def test_drugs_get(self):\n pass",
"def testViewCoverageData(self):\n try:\n entryD = self.__mU.doImport(self.__instanceSavePath, fmt=\"pickle\")\n covRefDbList = []\n covSampleList = []\n entryCountD = {}\n for entryId in entryD:\n for _, eD in entryD[entryId][\"selected_polymer_entities\"].items():\n\n analD = eD[\"anal_instances\"] if \"anal_instances\" in eD else {}\n\n for _, aD in analD.items():\n entryCountD[entryId] = True\n covRefDb = aD[\"coverage_inst_refdb\"]\n covSample = aD[\"coverage_inst_entity\"]\n if covRefDb is not None:\n covRefDb = 0.0 if covRefDb < 0.0 else covRefDb\n covRefDb = 1.0 if covRefDb > 1.0 else covRefDb\n covRefDbList.append(covRefDb)\n if covSample is not None:\n covSample = 0.0 if covSample < 0.0 else covSample\n covSample = 1.0 if covSample > 1.0 else covSample\n covSampleList.append(covSample)\n #\n logger.info(\"covRefDbList %d covSampleList %d\", len(covRefDbList), len(covSampleList))\n #\n cu = DisorderChartUtils()\n cu.doHistogramChart(\n covRefDbList,\n plotPath=self.__plotCoverageRefDb,\n yPlotScale=\"log\",\n yPlotMax=100000,\n yPlotMin=1000,\n xPlotMin=0.0,\n xPlotMax=1.001,\n xPlotIncr=0.1,\n # yPlotMax=100000,\n xPlotLabel=\"Coverage Fraction\",\n yPlotLabel=\"Protein Instances\",\n plotTitle=\"Reference Sequence Coverage\",\n )\n self.__writeLegend(\n self.__plotCoverageRefDb,\n \"UniProt reference sequence coverage for all (%d) protein sequences (%d X-ray structures with resolution limit < 3.5 Angstoms) \"\n % (len(covRefDbList), len(entryCountD)),\n )\n cu.doHistogramChart(\n covSampleList,\n plotPath=self.__plotCoverageSample1,\n yPlotScale=\"log\",\n xPlotMin=0.0,\n xPlotMax=1.001,\n xPlotIncr=0.1,\n yPlotMax=100000,\n yPlotMin=1000,\n # yPlotMax=100000,\n xPlotLabel=\"Coverage Fraction\",\n yPlotLabel=\"Protein Instances\",\n plotTitle=\"Sample Sequence Coverage\",\n )\n self.__writeLegend(\n self.__plotCoverageSample1,\n \"Sample sequence coverage for all (%d) protein sequences (%d X-ray structures with resolution limit < 3.5 Angstoms) \" % (len(covSampleList), len(entryCountD)),\n )\n #\n cu.doHistogramChart(\n covSampleList,\n plotPath=self.__plotCoverageSample2,\n yPlotScale=\"log\",\n yPlotMax=100000,\n yPlotMin=1000,\n xPlotMin=0.8,\n xPlotMax=1.001,\n xPlotIncr=0.1,\n # yPlotMax=100000,\n xPlotLabel=\"Coverage Fraction\",\n yPlotLabel=\"Protein Instances\",\n plotTitle=\"Sample Sequence Coverage\",\n )\n self.__writeLegend(\n self.__plotCoverageSample1,\n \"Sample sequence coverage for all (%d) protein sequences (%d X-ray structures with resolution limit < 3.5 Angstoms) \" % (len(covSampleList), len(entryCountD)),\n )\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def test_visualisations_get_visualisation_render_data_refresh_job(self):\n pass",
"def test_geo_data_created(self):\n # Currently, there are no GeometryStore or PointGeometry objects in the database\n self.assertEqual(GeometryStore.objects.count(), 0)\n self.assertEqual(PointGeometry.objects.count(), 0)\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the PowerPlants that were created during the import\n (powerplant_ouessant, powerplant_ilarionas, powerplant_tonstad) = self.get_created_plants()\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n\n # GeometryStore objects were created for:\n # - powerplant_ouessant\n # - powerplant_ilarionas\n # - project_liaoning\n # The project_ouessant1 and project_ouessant2 should use\n # powerplant_ouessant's GeometryStore\n self.assertEqual(GeometryStore.objects.count(), 3)\n # PointGeometry objects were created for:\n # - powerplant_ouessant\n # - powerplant_ilarionas\n # - project_liaoning\n # The project_ouessant1 and project_ouessant2 should use\n # powerplant_ouessant's PointGeometry\n self.assertEqual(PointGeometry.objects.count(), 3)\n # The powerplant_ouessant point is correct\n powerplant_ouessant_points = powerplant_ouessant.geo.points.all()\n self.assertEqual(powerplant_ouessant_points.count(), 1)\n self.assertEqual(powerplant_ouessant_points.first().geom.x, -5.11121)\n self.assertEqual(powerplant_ouessant_points.first().geom.y, 48.43754)\n # The powerplant_ilarionas point is correct\n powerplant_ilarionas_points = powerplant_ilarionas.geo.points.all()\n self.assertEqual(powerplant_ilarionas_points.count(), 1)\n self.assertEqual(powerplant_ilarionas_points.first().geom.x, 21.8039)\n self.assertEqual(powerplant_ilarionas_points.first().geom.y, 40.0966)\n # The project_liaoning gets its geodata from its latitude and longitude\n # cells\n project_liaoning_points = project_liaoning.geo.points.all()\n self.assertEqual(project_liaoning_points.count(), 1)\n self.assertEqual(project_liaoning_points.first().geom.x, 121.38065)\n self.assertEqual(project_liaoning_points.first().geom.y, 41.16469)\n # For the project_ouessant1 and project_ouessant2, the latitude and\n # longitude cells are blank, so they get their geodata from their\n # parent PowerPlant (powerplant_ouessant).\n self.assertEqual(project_ouessant1.geo, project_ouessant1.power_plant.geo)\n self.assertEqual(project_ouessant2.geo, project_ouessant2.power_plant.geo)\n # The powerplant_tonstad has no geo data\n self.assertIsNone(powerplant_tonstad.geo)",
"def test_households_in_admin_unit(self):",
"def testViewEntityData(self):\n try:\n iCountH = 0\n iCountB = 0\n entryD = self.__mU.doImport(self.__instanceSavePath, fmt=\"pickle\")\n for entryId, _ in entryD.items():\n if not self.__matchEntry(entryId, resLimit=1.2):\n continue\n resV = self.__entryResD[entryId][\"ls_d_res_high\"]\n for entityId, eD in entryD[entryId][\"selected_polymer_entities\"].items():\n if self.__matchEntity(eD, taxId=9606):\n taxId = eD[\"ncbi_taxonomy_id\"] if \"ncbi_taxonomy_id\" in eD else None\n logger.debug(\"%s %s selected %.2f taxId %r\", entryId, entityId, resV, taxId)\n iCountH += 1\n if self.__matchEntity(eD, taxType=\"Bacteria\"):\n sn = eD[\"ncbi_scientific_name\"] if \"ncbi_scientific_name\" in eD else None\n logger.info(\"%s %s selected %.2f sn %r\", entryId, entityId, resV, sn)\n iCountB += 1\n #\n logger.info(\"Homo sapiens %d bacteria %d\", iCountH, iCountB)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def base_data_check_shot(self):\n\n #alembic_dir\n alembic_dir = self.alembic_functionality.get_parm_value(self.node, 'alembic_dir')\n \n #is False\n if not (alembic_dir):\n #log\n self.logger.debug('Parameter alembic dir empty.')\n return False\n\n #dir exists\n if not (os.path.isdir(alembic_dir)):\n #log\n self.logger.debug('Alembic dir {0} does not exist.'.format(alembic_dir))\n return False\n\n\n #alembic_path_list\n alembic_path_list = [os.path.join(alembic_dir, file).replace('\\\\', '/') for \n file in \n os.listdir(alembic_dir) if \n (os.path.isfile(os.path.join(alembic_dir, file)) and file.split('.')[-1] == 'abc')]\n #alembic_path_list empty\n if not (alembic_path_list):\n #log\n self.logger.debug('alembic_path_list empty. Alembic dir {0} does not seem to contain alembic files.'.format(alembic_dir))\n return False\n\n\n #checked_alembic_path_list\n checked_alembic_path_list = []\n\n #iterate\n for alembic_path in alembic_path_list:\n\n #object_path_list\n object_path_list = self.alembic_functionality.get_alembic_object_path_list(alembic_path)\n #object_path_list empty\n if not (object_path_list):\n #log\n self.logger.debug('Object path list for alembic {0} empty. Continuing'.format(alembic_path))\n continue\n\n #iterate, check and create\n for object_path in object_path_list:\n\n #helga_locator_attr_exists\n helga_locator_attr_exists = self.alembic_functionality.alembic_attribute_exists(alembic_path, object_path, 'helga_locator')\n\n #helga_highpoly_rendergeo_attr_exists\n helga_highpoly_rendergeo_attr_exists = self.alembic_functionality.alembic_attribute_exists(alembic_path, object_path, 'helga_highpoly_rendergeo')\n\n #if attr exists append and break\n if (helga_locator_attr_exists and helga_highpoly_rendergeo_attr_exists):\n\n #append\n checked_alembic_path_list.append(alembic_path)\n break\n\n #checked_alembic_path_list empty\n if not (checked_alembic_path_list):\n #log\n self.logger.debug('checked_alembic_path_list empty. Alembic dir {0} does not seem to contain alembic files with helga_highpoly_rendergeo attribute.'.format(alembic_dir))\n return False\n\n\n #alembic_highpoly_rendergeo_dir\n alembic_highpoly_rendergeo_dir = self.alembic_functionality.get_parm_value(self.node, 'alembic_highpoly_rendergeo_dir')\n \n #is False\n if not (alembic_highpoly_rendergeo_dir):\n #log\n self.logger.debug('Parameter alembic highpoly rendergeo dir empty.')\n return False\n\n #dir exists\n if not (os.path.isdir(alembic_highpoly_rendergeo_dir)):\n #log\n self.logger.debug('Alembic highpoly rendergeo dir {0} does not exist.'.format(alembic_highpoly_rendergeo_dir))\n return False\n\n\n #return\n return [checked_alembic_path_list, alembic_highpoly_rendergeo_dir]",
"def test_data_object_vaporise(self):\n pass",
"def test_gds_map_file(self) -> None:\n import hammer_config\n\n tech_dir, tech_dir_base = HammerToolTestHelpers.create_tech_dir(\"dummy28\")\n tech_json_filename = os.path.join(tech_dir, \"dummy28.tech.json\")\n\n def add_gds_map(in_dict: Dict[str, Any]) -> Dict[str, Any]:\n out_dict = deepdict(in_dict)\n out_dict.update({\"gds map file\": \"test/gds_map_file\"})\n return out_dict\n\n HammerToolTestHelpers.write_tech_json(tech_json_filename, add_gds_map)\n sys.path.append(tech_dir_base)\n tech = self.get_tech(hammer_tech.HammerTechnology.load_from_dir(\"dummy28\", tech_dir))\n tech.cache_dir = tech_dir\n\n tool = DummyTool()\n tool.technology = tech\n database = hammer_config.HammerDatabase()\n tool.set_database(database)\n\n # Test that empty for gds_map_mode results in no map file.\n database.update_project([{\n 'par.inputs.gds_map_mode': 'empty',\n 'par.inputs.gds_map_file': None\n }])\n self.assertEqual(tool.get_gds_map_file(), None)\n\n # Test that manual mode for gds_map_mode works.\n database.update_project([{\n 'par.inputs.gds_map_mode': 'manual',\n 'par.inputs.gds_map_file': '/tmp/foo/bar'\n }])\n self.assertEqual(tool.get_gds_map_file(), '/tmp/foo/bar')\n\n # Test that auto mode for gds_map_mode works if the technology has a map file.\n database.update_project([{\n 'par.inputs.gds_map_mode': 'auto',\n 'par.inputs.gds_map_file': None\n }])\n self.assertEqual(tool.get_gds_map_file(), '{tech}/gds_map_file'.format(tech=tech_dir))\n\n # Cleanup\n shutil.rmtree(tech_dir_base)\n\n # Create a new technology with no GDS map file.\n tech_dir, tech_dir_base = HammerToolTestHelpers.create_tech_dir(\"dummy28\")\n\n tech_json_filename = os.path.join(tech_dir, \"dummy28.tech.json\")\n HammerToolTestHelpers.write_tech_json(tech_json_filename)\n sys.path.append(tech_dir_base)\n tech = self.get_tech(hammer_tech.HammerTechnology.load_from_dir(\"dummy28\", tech_dir))\n tech.cache_dir = tech_dir\n\n tool.technology = tech\n\n # Test that auto mode for gds_map_mode works if the technology has no map file.\n database.update_project([{\n 'par.inputs.gds_map_mode': 'auto',\n 'par.inputs.gds_map_file': None\n }])\n self.assertEqual(tool.get_gds_map_file(), None)\n\n # Cleanup\n shutil.rmtree(tech_dir_base)",
"def test_visualize_fallback(self):\n mol = Molecule().from_smiles(\"CCO\")\n with pytest.warns(UserWarning):\n mol.visualize(backend=\"rdkit\")",
"def test_visualize_fallback(self):\n mol = Molecule().from_smiles(\"CCO\")\n with pytest.warns(UserWarning):\n mol.visualize(backend=\"rdkit\")",
"def test_gethtml(self):\r\n mock_module = CHModuleFactory.create()\r\n\r\n def fake_get_display_items():\r\n \"\"\"\r\n A mock of get_display_items\r\n \"\"\"\r\n return [FakeChild()]\r\n mock_module.get_display_items = fake_get_display_items\r\n out_html = mock_module.render('student_view').content\r\n self.assertTrue('This is supposed to be test html.' in out_html)\r\n self.assertTrue('i4x://this/is/a/fake/id' in out_html)",
"def test_visualize_recipe_nutrition(self):\n pass",
"def test_process_data():\n afos_dump.process_data(\"\")",
"def test_plate_size_error():\n \n test_object = fa.read_in_envision(data_csv=plate_1, platemap_csv=plate_map_file, data_type='plate', size=100)",
"def test_validate_ingest(self):\n #Validate schema and config file\n ingest_mgmr = IngestManager()\n response = ingest_mgmr.validate_config_file(self.example_config_data)\n assert (response is True)\n\n #Validate properties\n response = ingest_mgmr.validate_properties()\n assert (response is True)",
"def test_read_image(self):\n pass",
"def test_generate_sample_sheet(self):\n pass"
] | [
"0.59791505",
"0.5967556",
"0.572201",
"0.56537753",
"0.56120425",
"0.56058466",
"0.5579406",
"0.5568751",
"0.55665904",
"0.55644566",
"0.5499739",
"0.5443593",
"0.54322785",
"0.54284906",
"0.53880054",
"0.5371322",
"0.53619367",
"0.5352588",
"0.5329219",
"0.53169596",
"0.5316755",
"0.5309828",
"0.5309828",
"0.5301884",
"0.5289238",
"0.5285729",
"0.52819854",
"0.5259828",
"0.5257904",
"0.52504694"
] | 0.62238955 | 0 |
Tests to ensure that file.managed creates directories with the permissions requested with the dir_mode argument | def test_managed_dir_mode(file, tmp_path, grail_scene33_file):
desired_mode = 0o777
name = tmp_path / "a" / "managed_dir_mode_test_file"
ret = file.managed(
name=str(name),
source="salt://grail/scene33",
mode="600",
makedirs=True,
dir_mode=oct(desired_mode), # 0777
)
assert ret.result is True
# Sanity check. File exists and contents match
assert name.exists()
assert name.read_text() == grail_scene33_file.read_text()
# Now the real test, the created directories mode match
resulting_mode = stat.S_IMODE(name.parent.stat().st_mode)
assert resulting_mode == desired_mode | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_mkdir(self, client, remote_temp_dir):\n\n dir_path = posixpath.join(remote_temp_dir, \"subdir\")\n assert not client.exists(dir_path)\n\n with HdfsHook() as hook:\n hook.mkdir(dir_path, mode=0o750)\n\n assert client.exists(dir_path)\n assert client.info(dir_path)[\"permissions\"] == 0o750",
"def test_makedirs(self, client, remote_temp_dir):\n\n dir_path = posixpath.join(remote_temp_dir, \"some\", \"nested\", \"dir\")\n\n with HdfsHook() as hook:\n hook.makedirs(dir_path, mode=0o750)\n\n assert client.exists(dir_path)\n assert client.info(dir_path)[\"permissions\"] == 0o750",
"def test_directory_world_accessible(self):\n if os.name == \"nt\":\n self.skipTest(\"Windows does not use POSIX-style permissions.\")\n os.rmdir(self.info_dir)\n # The default umask is typically 0o022, in which case this test is\n # nontrivial. In the unlikely case that the umask is 0o000, we'll\n # still be covered by the \"restrictive umask\" test case below.\n manager.write_info_file(_make_info())\n self.assertMode(self.info_dir, 0o777)\n self.assertEqual(self._list_info_dir(), [\"pid-76540.info\"])",
"def _make_directory_unlocked(full_path, uid, gid, metadata=None):\n try:\n os.mkdir(full_path)\n except OSError as err:\n if err.errno == errno.ENOENT:\n # Tell the caller some directory of the parent path does not\n # exist.\n return False, metadata\n elif err.errno == errno.EEXIST:\n # Possible race, in that the caller invoked this method when it\n # had previously determined the file did not exist.\n #\n # FIXME: When we are confident, remove this stat() call as it is\n # not necessary.\n try:\n stats = os.stat(full_path)\n except OSError as serr:\n # FIXME: Ideally we'd want to return an appropriate error\n # message and code in the PUT Object REST API response.\n raise DiskFileError(\"_make_directory_unlocked: os.mkdir failed\"\n \" because path %s already exists, and\"\n \" a subsequent os.stat on that same\"\n \" path failed (%s)\" % (full_path,\n str(serr)))\n else:\n is_dir = stat.S_ISDIR(stats.st_mode)\n if not is_dir:\n # FIXME: Ideally we'd want to return an appropriate error\n # message and code in the PUT Object REST API response.\n raise DiskFileError(\"_make_directory_unlocked: os.mkdir\"\n \" failed on path %s because it already\"\n \" exists but not as a directory\" % (\n full_path))\n return True, metadata\n elif err.errno == errno.ENOTDIR:\n # FIXME: Ideally we'd want to return an appropriate error\n # message and code in the PUT Object REST API response.\n raise DiskFileError(\"_make_directory_unlocked: os.mkdir failed\"\n \" because some part of path %s is not in fact\"\n \" a directory\" % (full_path))\n elif err.errno == errno.EIO:\n # Sometimes Fuse will return an EIO error when it does not know\n # how to handle an unexpected, but transient situation. It is\n # possible the directory now exists, stat() it to find out after a\n # short period of time.\n _random_sleep()\n try:\n stats = os.stat(full_path)\n except OSError as serr:\n if serr.errno == errno.ENOENT:\n errmsg = \"_make_directory_unlocked: os.mkdir failed on\" \\\n \" path %s (EIO), and a subsequent os.stat on\" \\\n \" that same path did not find the file.\" % (\n full_path,)\n else:\n errmsg = \"_make_directory_unlocked: os.mkdir failed on\" \\\n \" path %s (%s), and a subsequent os.stat on\" \\\n \" that same path failed as well (%s)\" % (\n full_path, str(err), str(serr))\n raise DiskFileError(errmsg)\n else:\n # The directory at least exists now\n is_dir = stat.S_ISDIR(stats.st_mode)\n if is_dir:\n # Dump the stats to the log with the original exception.\n logging.warn(\"_make_directory_unlocked: os.mkdir initially\"\n \" failed on path %s (%s) but a stat()\"\n \" following that succeeded: %r\" % (full_path,\n str(err),\n stats))\n # Assume another entity took care of the proper setup.\n return True, metadata\n else:\n raise DiskFileError(\"_make_directory_unlocked: os.mkdir\"\n \" initially failed on path %s (%s) but\"\n \" now we see that it exists but is not\"\n \" a directory (%r)\" % (full_path,\n str(err),\n stats))\n else:\n # Some other potentially rare exception occurred that does not\n # currently warrant a special log entry to help diagnose.\n raise DiskFileError(\"_make_directory_unlocked: os.mkdir failed on\"\n \" path %s (%s)\" % (full_path, str(err)))\n else:\n if metadata:\n # We were asked to set the initial metadata for this object.\n metadata_orig = get_object_metadata(full_path)\n metadata_orig.update(metadata)\n write_metadata(full_path, metadata_orig)\n metadata = metadata_orig\n\n # We created it, so we are reponsible for always setting the proper\n # ownership.\n do_chown(full_path, uid, gid)\n return True, metadata",
"def validateDirectory(dir, mode=0755, noExceptionRaise=False):\n\n if os.path.isdir(dir):\n if os.access(dir, 7): return 1\n else: return None\n else:\n try:\n os.makedirs(dir, mode)\n os.chmod(dir, mode)\n except:\n if noExceptionRaise: pass\n else: raise\n return 1",
"def test_ensure_dir_exists(self):\n pass",
"def ensure_dir(self, *args):\n return self.ensure(*args, **{\"dir\": True})",
"def _make_dirs(filepath, mode):\n parent = filepath.parent\n if \"w\" in mode and parent:\n os.makedirs(parent, exist_ok=True)",
"def test_permissions(self):\n self.assertEqual(dir_perm, 0o2750)\n self.assertEqual(file_perm, 0o0440)",
"def ensuredir(dpath, mode=0o1777):\n if isinstance(dpath, (list, tuple)): # nocover\n dpath = join(*dpath)\n if not exists(dpath):\n try:\n os.makedirs(normpath(dpath), mode=mode)\n except OSError: # nocover\n raise\n return dpath",
"def test_isdir(self, remote_mock_dir):\n\n with HdfsHook() as hook:\n assert hook.isdir(posixpath.join(remote_mock_dir, \"subdir\"))\n assert not hook.isdir(posixpath.join(remote_mock_dir, \"test.txt\"))",
"def check_sane(self):\n st = os.stat(self.path)\n if st.st_uid != os.getuid():\n raise Exception('Auth dir %s not owned by user %d.' % (\n self.path, os.getuid()))\n # Mode 16832 is equal to (stat.S_IFDIR | stat.S_IRWXU)\n # In other words, a directory with mode bits rwx------\n if st.st_mode != 16832:\n raise Exception('Auth dir %s not a dir or wrong permissions.' % self.path)",
"def ensure_directory(self, name, dest, mode=0777):\n self.m.path.assert_absolute(dest)\n self._run(\n name, ['ensure-directory', '--mode', oct(mode), dest])\n self.m.path.mock_add_paths(dest)",
"def ensure_dir(dir_):\n try:\n os.mkdir(dir_)\n except OSError:\n assert os.path.isdir(dir_)",
"def testSetDirectoryTreeExecutable(self):\n with tempfile.TemporaryDirectory() as temp_dir:\n subdir = os.path.join(temp_dir, \"subdir\")\n file_path = os.path.join(subdir, \"file\")\n os.makedirs(subdir)\n with open(file_path, \"w\"):\n pass\n utils.SetDirectoryTreeExecutable(temp_dir)\n self.assertEqual(os.stat(file_path).st_mode & 0o777, 0o755)",
"def open_and_force_mkdir(path, mode):\n force_mkdir(os.path.dirname(path))\n return open(path, mode)",
"def test_nested_directories(self):\n filesystem = {\n '/a/a/a': '',\n '/a/a/b': '',\n '/a/b/a': '',\n '/a/b/b': '',\n '/b/a/a': '',\n '/b/a/b': '',\n '/b/b/a': '',\n '/b/b/b': '',\n }\n self.mfs.add_entries(filesystem)\n\n for path in filesystem:\n self.assertTrue(os.path.isdir(os.path.dirname(path)))\n self.assertTrue(os.path.exists(path))\n self.assertTrue(os.path.isfile(path))",
"def test_make_directory_read_only(mock_make_file_read_only):\n\n tmpdir = os.path.join(tempfile.gettempdir(), \"jade-test-tmp87alkj8ew\")\n os.makedirs(tmpdir, exist_ok=True)\n\n tmpfile = os.path.join(tmpdir, \"jade-test-file.txt\")\n with open(tmpfile, \"w\") as f:\n f.write(\"Hello World\")\n\n make_directory_read_only(tmpdir)\n mock_make_file_read_only.assert_called()\n\n if os.path.exists(tmpdir):\n shutil.rmtree(tmpdir)",
"def createDirectory(self, summary_handle,directory,mode,role =\"\",summary_var_dict={}):\n if role:\n directory = directory + \"/\" + role\n \n tmp_var = \"mkdir -p %s%s%s\" %(directory,self,role)\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"true\":\n return\n\n self.pushMode(CLI_MODES.shell)\n if role:\n self.removePath(directory)\n\n logger.info (\"Directory is %s\" %directory)\n output = self.sendCmd(\"mkdir -p %s\" % directory)\n status = self.command_execution_status()\n if status == \"true\":\n summary_handle.write(\"mkdir -p %s,%s,%s,pass \\n\" %(directory,self,role))\n else:\n summary_handle.write(\"mkdir -p %s,%s,%s,fail \\n\" %(directory,self,role)) \n\n self.popMode()\n return output",
"def create_dir():\n if check_dir_exist():\n return False\n else:\n os.makedirs(path_structure)\n return True",
"def test_mkdir_exists(self, client, remote_temp_dir):\n\n dir_path = posixpath.join(remote_temp_dir, \"subdir\")\n assert not client.exists(dir_path)\n\n with HdfsHook() as hook:\n hook.mkdir(dir_path, exist_ok=False)\n\n with pytest.raises(IOError):\n hook.mkdir(dir_path, exist_ok=False)\n\n hook.mkdir(dir_path, exist_ok=True)",
"def test_create_package_dir(self):\n tempdir = tempfile.mkdtemp()\n os.rmdir(tempdir)\n settings = {\n 'storage.dir': tempdir,\n }\n FileStorage.configure(settings)\n try:\n self.assertTrue(os.path.exists(tempdir))\n finally:\n os.rmdir(tempdir)",
"def test_additional_folders(host, path):\n\n current_dir = host.file(path)\n\n assert current_dir.exists\n assert current_dir.is_directory\n assert current_dir.user == 'www-data'\n assert current_dir.group == 'www-data'\n assert current_dir.mode == 0o700",
"def test_makedirs_exists(self, client, remote_temp_dir):\n\n dir_path = posixpath.join(remote_temp_dir, \"some\", \"nested\", \"dir\")\n\n with HdfsHook() as hook:\n hook.makedirs(dir_path, exist_ok=False)\n\n with pytest.raises(IOError):\n hook.makedirs(dir_path, exist_ok=False)\n\n hook.makedirs(dir_path, exist_ok=True)",
"def test_remote_create_directory_invalid(sftp,capsys):\n\tcases = [\n\t\t\"http://www.google.com\",\n\t\t\"\",\n\t\t\".\",\n\t\t\"/test_create_directory_invalid\"\n\t]\n\tfor dir_test in cases:\n\t\t# Each case should fail and print an error message.\n\t\tassert mkdirectory.create_dir_remote(sftp, dir_test) is False\n\t\tassert mkdirectory.ERROR_PREFACE in capsys.readouterr().out",
"def permissions_check(\n basedir='.',\n verbose_level=0,\n):\n # File permissions on Cygwin/Windows filesystems don't work the\n # same way as Linux. Don't try to change them.\n # TODO(dittrich): Is there a Better way to handle perms on Windows?\n fs_type = get_fs_type(basedir)\n if fs_type in ['NTFS', 'FAT', 'FAT32']:\n msg = (\n f\"[-] {basedir} has file system type '{fs_type}': \"\n \"skipping permissions check\"\n )\n logger.info(msg)\n return\n any_other_perms = stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH\n for root, dirs, files in os.walk(basedir, topdown=True):\n for name in files:\n path = os.path.join(root, name)\n try:\n st = os.stat(path)\n perms = st.st_mode & 0o777\n open_perms = (perms & any_other_perms) != 0\n if (open_perms and verbose_level >= 1):\n print(\n f\"[!] file '{path}' is mode {oct(perms)}\",\n file=sys.stderr\n )\n except OSError:\n pass\n for name in dirs:\n path = os.path.join(root, name)\n try:\n st = os.stat(path)\n perms = st.st_mode & 0o777\n open_perms = (perms & any_other_perms) != 0\n if (open_perms and verbose_level >= 1):\n print(\n (\n f\"[!] directory '{path}' is mode \"\n f\"{oct(perms)}\"\n ),\n file=sys.stderr\n )\n except OSError:\n pass",
"def setUp(self):\n tempDir.safe_mkdir(parents=True)\n os.chdir(tempDir.as_posix())",
"def test_specifiedPermissions(self):\n log1 = logfile.LogFile(self.name, self.dir, defaultMode=0o066)\n self.addCleanup(log1.close)\n mode = stat.S_IMODE(os.stat(self.path)[stat.ST_MODE])\n if runtime.platform.isWindows():\n # The only thing we can get here is global read-only\n self.assertEqual(mode, 0o444)\n else:\n self.assertEqual(mode, 0o066)",
"def ensure(self, *args, **kwargs):\n p = self.join(*args)\n if kwargs.get(\"dir\", 0):\n return p._ensuredirs()\n else:\n p.dirpath()._ensuredirs()\n if not p.check(file=1):\n p.open(\"wb\").close()\n return p",
"def _dodir ( self, dirpath, mkdir_p ):\n return"
] | [
"0.6981194",
"0.6887613",
"0.66907316",
"0.6682905",
"0.66455567",
"0.6617674",
"0.6571031",
"0.6519029",
"0.64940614",
"0.6311934",
"0.6296809",
"0.6276639",
"0.62766325",
"0.62750995",
"0.6265781",
"0.6251389",
"0.6222987",
"0.6193035",
"0.617428",
"0.6125918",
"0.6120425",
"0.61136055",
"0.6112969",
"0.6107384",
"0.60934937",
"0.6087708",
"0.60767347",
"0.6069418",
"0.605192",
"0.6045287"
] | 0.7481688 | 0 |
test file.managed with contents by using the default contents_newline flag. | def test_managed_contents_with_contents_newline(file, tmp_path, contents):
name = tmp_path / "foo"
# Create a file named foo with contents as above but with a \n at EOF
ret = file.managed(name=str(name), contents=contents, contents_newline=True)
assert ret.result is True
assert name.exists()
expected = contents
if not expected.endswith("\n"):
expected += "\n"
assert name.read_text() == expected | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_managed_contents(file, tmp_path, name, contents):\n name = tmp_path / \"managed-{}\".format(name)\n ret = file.managed(name=str(name), contents=contents)\n assert ret.result is True\n assert \"diff\" in ret.changes\n assert name.exists()",
"def test_onePerLine(self):\n fp = FilePath(self.mktemp())\n fp.setContent('something\\ncool')\n self.assertEqual(list(inventoryReader(fp.path)), ['something', 'cool'])",
"def test_managed_file_issue_51208(file, tmp_path, state_tree):\n vimrc_contents = \"\"\"\n set number\n syntax on\n set paste\n set ruler\n if has(\"autocmd\")\n au BufReadPost * if line(\"'\\\"\") > 1 && line(\"'\\\"\") <= line(\"$\") | exe \"normal! g'\\\"\" | endif\n endif\n\n \"\"\"\n with pytest.helpers.temp_file(\n \"vimrc.stub\", directory=state_tree / \"issue-51208\", contents=vimrc_contents\n ) as vimrc_file:\n name = tmp_path / \"issue_51208.txt\"\n ret = file.managed(name=str(name), source=\"salt://issue-51208/vimrc.stub\")\n assert ret.result is True\n assert name.read_text() == vimrc_file.read_text()",
"def test_get_file_content(self):\n pass",
"def test_binary_contents(file, tmp_path):\n name = tmp_path / \"1px.gif\"\n ret = file.managed(name=str(name), contents=BINARY_FILE)\n assert ret.result is True",
"def test_ignoreBlanks(self):\n fp = FilePath(self.mktemp())\n fp.setContent('something\\n\\n\\n\\n')\n self.assertEqual(list(inventoryReader(fp.path)), ['something'])",
"def test_read_file_from_disk_lines(self):\r\n actual_data = read_file_from_disk(self.test_file1, into_lines=True)\r\n self.assertEqual(self.test_file1_data, ''.join(actual_data))",
"def test_file_iterator_strips_newlines(self):\n for line in file_iterator('example_module.py'):\n self.assertFalse(line.endswith('\\n'))",
"def test_file_readlines(self):\n FileWriter(self.multiline_path).write(self.multiline_string)\n line_list = FileReader(self.multiline_path).readlines()\n self.assertEqual(line_list, self.multiline_list)",
"def test_binary_contents_twice(file, tmp_path):\n name = tmp_path / \"1px.gif\"\n\n # First run state ensures file is created\n ret = file.managed(name=str(name), contents=BINARY_FILE)\n assert ret.result is True\n\n # Second run of state ensures file is in correct state\n ret = file.managed(name=str(name), contents=BINARY_FILE)\n assert ret.result is True",
"def test_read_from_file():\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert \"\\n\" not in d.read_code_from_file()",
"def test_read_file():\n filename = 'sample'\n assert read_file(filename) == 'hello!\\n'",
"def test_LogicalLines(self) -> None:\n content = \"\"\"\nfoo \\\\\nbar \\\\\nbaz\nfoo\nbling \\\\\nbling \\\\ bling\nbling\n\"\"\"\n fobj = io.StringIO(content)\n lines = LogicalLines(fobj).readlines()\n assert lines == [\n '\\n',\n 'foo bar baz\\n',\n 'foo\\n',\n 'bling bling \\\\ bling\\n',\n 'bling\\n',\n ], lines",
"def test_comments(self):\n fp = FilePath(self.mktemp())\n fp.setContent('something\\n#commented\\ncool')\n self.assertEqual(list(inventoryReader(fp.path)), ['something', 'cool'])",
"def test_file_readlines_as_ascii(self):\n FileWriter(self.ascii_path).write(self.multiline_string)\n line_list = FileReader(self.ascii_path).readlines_as(\"ascii\")\n self.assertEqual(line_list, self.multiline_list)",
"def test_content(self):\n\n obj = FBO(\n path=TEST_FILES_ROOT,\n ).all().get(\n name='test1.md',\n )\n\n self.assertEqual(\n 'A short work by me, for you to read.\\n',\n obj.content,\n )",
"def test_open_read(self, remote_mock_dir):\n\n file_path = posixpath.join(remote_mock_dir, \"test.txt\")\n with HdfsHook() as hook:\n with hook.open(file_path) as file_:\n content = file_.read()\n assert content == b\"Test file\\n\"",
"def testReadFile(self):\n content = archive_parser.Archive.GLOBAL_SIG\n file_name = 'test_file'\n content += file_name + ' ' * (archive_parser.Archive.FILE_ID_LENGTH -\n len(file_name))\n content += ' ' * archive_parser.Archive.FILE_TIMESTAMP_LENGTH\n content += ' ' * archive_parser.Archive.OWNER_ID_LENGTH\n content += ' ' * archive_parser.Archive.GROUP_ID_LENGTH\n content += ' ' * archive_parser.Archive.FILE_MODE_LENGTH\n\n message = 'test file contents'\n message_size = str(len(message))\n content += message_size + ' ' * (archive_parser.Archive.CONTENT_SIZE_LENGTH -\n len(message_size))\n content += archive_parser.Archive.END_TAG\n content += message\n archive = archive_parser.Archive(content)\n archive.Parse()\n self.assertIn(file_name, archive.files)\n self.assertEquals(archive.files[file_name], message)",
"def testWriteLines(self):\n file_writer = writers.FileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer.WriteLines([\n 'First line of text',\n 'Second line of text'])\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n expected_output_data = (\n b'First line of text\\r\\nSecond line of text\\r\\n')\n self.assertEqual(output_data, expected_output_data)",
"def target_test_file_content():\n return 'initial content'",
"def test_template_local_file(file, tmp_path, prefix):\n source = tmp_path / \"source\"\n dest = tmp_path / \"dest\"\n source.write_text(\"{{ foo }}\\n\")\n\n ret = file.managed(\n name=str(dest),\n source=\"{}{}\".format(prefix, source),\n template=\"jinja\",\n context={\"foo\": \"Hello world!\"},\n )\n assert ret.result is True\n assert dest.read_text() == \"Hello world!\\n\"",
"def test_template_local_file_noclobber(file, tmp_path):\n source = dest = tmp_path / \"source\"\n source.write_text(\"{{ foo }}\\n\")\n\n ret = file.managed(\n name=str(dest),\n source=str(source),\n template=\"jinja\",\n context={\"foo\": \"Hello world!\"},\n )\n assert ret.result is False\n assert \"Source file cannot be the same as destination\" in ret.comment",
"def test_no_eof(self):",
"def read_and_test_file_content(self): # pragma: no cover\n\n # We print the CLI header.\n PyFunceble.CLICore.print_header()\n\n with open(self.file, \"r\", encoding=\"utf-8\") as file:\n # We open the file we have to test.\n\n for line in self._get_list_to_of_subjects_to_test_from_file(file):\n # We loop through the file decoded file\n # content.\n\n # We test the line.\n self._test_line(line)\n\n for index, line in self.mining.list_of_mined():\n # We loop through the list of mined domains\n # (if the mining subystem is activated.)\n\n # We test the line.\n self._test_line(line)\n # and remove the currently tested line\n # from the mining database.\n self.mining.remove(index, line)\n\n for subject in self.get_complements():\n # We loop through the list of complements.\n\n # We test the complement.\n self._test_line(subject)\n\n # We inform all subsystem that we are not testing for complements anymore.\n self.complements_test_started = False\n\n # We update the counters\n self.autocontinue.update_counters()\n # We clean the autocontinue subsystem, we finished\n # the test.\n self.autocontinue.clean()\n # We process the autosaving if necessary.\n self.autosave.process(test_completed=True)\n # We close the database connection\n if self.sqlite_db.authorized:\n self.sqlite_db.connection.close()\n if self.mysql_db.authorized:\n self.mysql_db.get_connection().close()",
"def test_read_quotes_no_final_newline(self):\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes2.txt\")\n quotes = api.read_quotes(path)\n self.assertEqual(4, len(quotes))",
"def test_read_from_file():\n from scraper import read_from_file\n assert read_from_file(TEST_FILE) == (TEST_CONTENT, 'utf-8')",
"def test_open_file_entity(self):\n virtpath = self.path_translator.split_virtual_path(\n \"/test/search1/rien_12345\")\n self.assertTrue(self.path_translator.is_file_entity(virtpath))\n ftp_file = self.path_translator.open_cw_file(virtpath)\n expected_file_content = \"nothing in 12345\"\n self.assertEqual(expected_file_content,\n ftp_file.readChunk(0, -1))\n self.assertEqual({\n \"size\": len(expected_file_content),\n \"uid\": 0,\n \"gid\": 0,\n \"mtime\": 0,\n \"atime\": 0,\n \"permissions\": self.path_translator.file_perm},\n ftp_file.getAttrs())\n self.assertTrue(hasattr(ftp_file, \"close\"))\n ftp_file.close()",
"def test_identify_contents_2(self):\n Path(self.base_dir, \"new_dir\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n ignore_set = set([\".DS_Store\"])\n list_of_items = basic.identify_contents(self.base_dir, kind=\"file\",\n ignore_set=ignore_set)\n exp_num_items = 1\n self.assertEqual(len(list_of_items), exp_num_items)",
"def Validate(self, relative_file, contents):\n pass",
"def test_identify_contents_1(self):\n Path(self.base_dir, \"new_dir\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n list_of_items = basic.identify_contents(self.base_dir, kind=\"file\")\n exp_num_items = 2\n self.assertEqual(len(list_of_items), exp_num_items)"
] | [
"0.74482167",
"0.7001798",
"0.66213626",
"0.6138996",
"0.61203164",
"0.60954994",
"0.59349686",
"0.5887044",
"0.58578086",
"0.5821733",
"0.57995737",
"0.57658714",
"0.57202387",
"0.5709132",
"0.57070374",
"0.56630605",
"0.56137776",
"0.5599135",
"0.55946815",
"0.55778825",
"0.5564798",
"0.55265224",
"0.55143064",
"0.5495484",
"0.5479307",
"0.5476094",
"0.54683906",
"0.54389656",
"0.5436477",
"0.54185736"
] | 0.8399223 | 0 |
Test file.managed passing a basic check_cmd kwarg. See Issue 38111. | def test_managed_check_cmd(file, tmp_path):
name = tmp_path / "sudoers"
ret = file.managed(name=str(name), mode="0440", check_cmd="test -f")
assert ret.result is True
assert "Empty file" in ret.comment
assert ret.changes == {
"new": "file {} created".format(name),
"mode": "0440",
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_check(self):\n\n self.assertTrue(Naive().check(self.file_gitignore))\n self.assertTrue(Naive().check(self.file_tests))\n self.assertTrue(Naive().check(self.file_bin))\n self.assertTrue(Naive().check(self.file_py))\n self.assertTrue(Naive().check(self.file_authors))",
"def test_managed_contents(file, tmp_path, name, contents):\n name = tmp_path / \"managed-{}\".format(name)\n ret = file.managed(name=str(name), contents=contents)\n assert ret.result is True\n assert \"diff\" in ret.changes\n assert name.exists()",
"def test_managed_file_issue_51208(file, tmp_path, state_tree):\n vimrc_contents = \"\"\"\n set number\n syntax on\n set paste\n set ruler\n if has(\"autocmd\")\n au BufReadPost * if line(\"'\\\"\") > 1 && line(\"'\\\"\") <= line(\"$\") | exe \"normal! g'\\\"\" | endif\n endif\n\n \"\"\"\n with pytest.helpers.temp_file(\n \"vimrc.stub\", directory=state_tree / \"issue-51208\", contents=vimrc_contents\n ) as vimrc_file:\n name = tmp_path / \"issue_51208.txt\"\n ret = file.managed(name=str(name), source=\"salt://issue-51208/vimrc.stub\")\n assert ret.result is True\n assert name.read_text() == vimrc_file.read_text()",
"def check(*cmd):\n print >>sys.stderr, 'Run:', cmd\n subprocess.check_call(cmd)",
"def cvv_test_check(argv):\n p = optparse.OptionParser()\n p.add_option('-d', '--debug',\n action='store_true', default=False, dest='debug',\n help='run the debugger')\n p.add_option('-i', '--id',\n action='store', default='', dest='id',\n help='id of entry to be checked')\n p.add_option('-p', '--path',\n action='store', default='', dest='path',\n help='name of path to be checked')\n p.add_option('-v', '--verbose',\n action='store_true', default=False, dest='verbose',\n help='more information')\n try:\n (o, a) = p.parse_args(argv)\n except SystemExit:\n return\n\n if o.debug:\n pdb.set_trace()\n\n if o.path != '' and o.id != '':\n print(\"Only --path or --id is allowed, not both.\")\n return\n elif o.path != '':\n c = Checkable.Checkable(path=o.path)\n elif o.id != '':\n c = Checkable.Checkable(rowid=int(o.id))\n else:\n print(\"One of --path or --id is required.\")\n return\n\n c.load()\n c.check()",
"def test_provider_system_hook_file(change_dir, clean_files):\n tackle(no_input=True)\n assert 'thing.yaml' in os.listdir()\n assert 'stuff' in os.listdir()\n # If the file has been moved properly there should be only one file\n assert len(os.listdir('stuff')) == 3",
"def test_invalidFile(self):\n self.assertRaises(cesmEnvLib.checkFile(\"blah\", \"write\"))",
"def test_check():\n for f in cfg.required_files:\n assert os.path.isfile(f)",
"def check_file_flag(file):\n return process_file_flag(file, None)",
"def run_command_check(self):\n pass",
"def test_check(self):\n\n self.assertTrue(DirExclude().check(self.file_gitignore))\n self.assertTrue(DirExclude().check(self.file_perceval))\n self.assertTrue(DirExclude().check(self.file_authors))\n\n self.assertFalse(DirExclude().check(self.file_tests))\n self.assertFalse(DirExclude().check(self.file_bin))",
"def test_check(self):\n\n self.assertTrue(PostfixExclude().check(self.file_gitignore))\n self.assertTrue(PostfixExclude().check(self.file_py))\n self.assertTrue(PostfixExclude().check(self.file_authors))\n self.assertTrue(PostfixExclude().check(self.file__init__))\n self.assertTrue(PostfixExclude().check(self.file_bin))",
"def test_is_check_filename_False(self):\n self.assertFalse(check_filename('sample.txt'))",
"def svn_fs_check_related(*args):\r\n return _fs.svn_fs_check_related(*args)",
"def test_supply_file(self):\n f = open(self.junk_file, 'w')\n f.close()\n self.assertRaises(argparse.ArgumentTypeError, generic.check_path, self.junk_file)",
"def test_defaultFile(self):\n found = cesmEnvLib.checkFile(\"./test_checkXMLvar.py\", \"read\")\n self.assertTrue(found)",
"def test_files(host, f):\n assert host.file(f).exists",
"def _check_cmd(hook_name, project, commit, cmd, fixup_func=None, **kwargs):\n return [rh.results.HookCommandResult(hook_name, project, commit,\n _run(cmd, **kwargs),\n fixup_func=fixup_func)]",
"def test_provider_system_hook_file_shred(change_dir, clean_files):\n files = ['stuff', 'thing', 'foo']\n for f in files:\n file = open(f, \"w\")\n file.write(f)\n file.close()\n\n tackle('.', no_input=True, context_file='shred.yaml')\n\n for f in files:\n assert not os.path.isfile(f)",
"def test_managed_source_hash_indifferent_case(file, tmp_path, state_tree, test):\n name = tmp_path / \"source_hash_indifferent_case\"\n hello_world_contents = \"Hello, World!\"\n with pytest.helpers.temp_file(\n \"hello_world.txt\", hello_world_contents, state_tree\n ) as local_path:\n actual_hash = hashlib.sha256(local_path.read_bytes()).hexdigest()\n\n # `name` needs to exist for this test, like a previous file.managed run\n shutil.copyfile(str(local_path), str(name))\n\n # Test uppercase source_hash: should return True with no changes\n ret = file.managed(\n name=str(name),\n source=str(local_path),\n source_hash=actual_hash.upper(),\n test=test,\n )\n assert ret.result is True\n assert ret.changes == {}",
"def test_args_valid_file(fake_file):\n args = cli.parse_args(['-f', str(fake_file.path)])\n assert args.file == fake_file.path",
"def test_is_check_filename(self):\n self.assertTrue(check_filename('sample.csv'))",
"def test_file_managed_http_source_skip_verify(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=True)\n assert ret.result is True",
"def check(c, path=None, lines=110):\n default_env = {\n \"SECRET_KEY\": os.environ.get(\"SECRET_KEY\", \"fab check key\"),\n \"PATH\": os.environ[\"PATH\"],\n \"LANG\": \"en_US.UTF-8\",\n }\n\n env = os.environ\n env.update(default_env)\n\n commands = [\n 'echo \"=> Checking in \\\\\"%s\\\\\":\"' % (path or \"t2f\"),\n # black\n 'echo \"\\n\\n=> running `black --version` ...\\n\"',\n f\"black --line-length={lines} %s\" % (path or \".\"),\n 'echo \"\\n=> done!\"',\n # mypy\n 'echo \"\\n\\n=> running `mypy --version` ...\\n\"',\n \"mypy --config-file ./mypy.ini %s\" % (path or \"t2f\"),\n 'echo \"\\n=> done!\"',\n # flake8\n 'echo \"\\n\\n=> running flake8 `flake8 --version` ...\\n\"',\n \"flake8 %s\" % (path or \"t2f\"),\n 'echo \"\\n=> done!\"',\n # pylint\n 'echo \"\\n\\n=>running `pylint --version` ...\\n\"',\n \"python pylint-checker.py %s --fail-under=9.8\" % (path or \"t2f\"),\n 'echo \"\\n=> done!\"',\n ]\n\n [c.run(cmd, env=env, warn=True) for cmd in commands]",
"def check_call(cmd):\r\n return subprocess.check_call(cmd)",
"def fileCheckDecorator(func):\n @wraps(func)\n def wrapper(self, src, dst, description):\n self.assertTrue(os.path.isfile(src),\n \"Couldn't find original %s file: %r\"\n % (str(description), src))\n func(self, src, dst, description)\n self.assertTrue(os.path.isfile(dst),\n \"Couldn't find new %s file: %r. Original: %r\"\n % (str(description), dst, src))\n return wrapper",
"def test_command_verify():\n wozardry.parse_args([\"verify\", kValid1])\n wozardry.parse_args([\"verify\", kValid2])",
"def test_file_managed_http_source_no_hash(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=False)\n # This should fail because no hash was provided\n assert ret.result is False",
"def do_check(name, tmpdir, sha):\n path = write_tmp_blob(tmpdir, name, sha)\n puppethooks.checkers.check(path)\n os.unlink(path)",
"def testIgnoredError(self):\n cmds = \"\"\"-chown 0 missingFile\npwd\nexit\n\"\"\"\n def _cbCheckResult(res):\n self.assertIn(self.testDir.asBytesMode().path, res)\n\n d = self._getBatchOutput(cmds)\n d.addCallback(_cbCheckResult)\n return d"
] | [
"0.70263505",
"0.64792174",
"0.63982093",
"0.6309626",
"0.6207292",
"0.6155844",
"0.6115211",
"0.5992373",
"0.5972797",
"0.58782244",
"0.5864632",
"0.58565617",
"0.58358836",
"0.58291245",
"0.5792029",
"0.5778755",
"0.57755667",
"0.57724106",
"0.5764978",
"0.57585824",
"0.57400703",
"0.5739647",
"0.5720151",
"0.5718843",
"0.5707535",
"0.56803864",
"0.5655996",
"0.5644456",
"0.5636961",
"0.5627296"
] | 0.7401335 | 0 |
Make sure that we enforce the source_hash even with local files | def test_managed_local_source_with_source_hash(
file, tmp_path, grail_scene33_file, grail_scene33_file_hash, proto, dest_file_exists
):
name = tmp_path / "local_source_with_source_hash"
if dest_file_exists:
name.touch()
# Test with wrong hash
bad_hash = grail_scene33_file_hash[::-1]
ret = file.managed(
name=str(name),
source=proto + str(grail_scene33_file),
source_hash="sha256={}".format(bad_hash),
)
assert ret.result is False
assert not ret.changes
assert "does not match actual checksum" in ret.comment
# Now with the right hash
ret = file.managed(
name=str(name),
source=proto + str(grail_scene33_file),
source_hash="sha256={}".format(grail_scene33_file_hash),
)
assert ret.result is True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_managed_source_hash_indifferent_case(file, tmp_path, state_tree, test):\n name = tmp_path / \"source_hash_indifferent_case\"\n hello_world_contents = \"Hello, World!\"\n with pytest.helpers.temp_file(\n \"hello_world.txt\", hello_world_contents, state_tree\n ) as local_path:\n actual_hash = hashlib.sha256(local_path.read_bytes()).hexdigest()\n\n # `name` needs to exist for this test, like a previous file.managed run\n shutil.copyfile(str(local_path), str(name))\n\n # Test uppercase source_hash: should return True with no changes\n ret = file.managed(\n name=str(name),\n source=str(local_path),\n source_hash=actual_hash.upper(),\n test=test,\n )\n assert ret.result is True\n assert ret.changes == {}",
"def checkfile(filename, source=None):\n if source:\n # Let's check some sums\n if os.path.exists(filename) and os.path.exists(source):\n src_sha = calchash(source)\n dest_sha = calchash(filename)\n if DRYRUN:\n print(\"{src} hash {src_sha}. {dest} hash {dest_sha}\".format(src=source, dest=filename, src_sha=src_sha.hexdigest(), dest_sha=dest_sha.hexdigest()))\n return src_sha.digest() == dest_sha.digest()\n else:\n return os.path.exists(filename)",
"def test_file_managed_http_source_no_hash(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=False)\n # This should fail because no hash was provided\n assert ret.result is False",
"def put_source(file_path: str, source: str, sha256sum: str) -> str:\n return g.ledger.file.set_source(file_path, source, sha256sum)",
"def generate_content_hash(source_path):\n\n sha256 = hashlib.sha256()\n\n if os.path.isdir(source_path):\n source_dir = source_path\n for source_file in list_files(source_dir):\n update_hash(sha256, source_dir, source_file)\n else:\n source_dir = os.path.dirname(source_path)\n source_file = source_path\n update_hash(sha256, source_dir, source_file)\n\n return sha256",
"def generate_hash(self):\r\n\r\n hash_list = []\r\n for root, dirs, files in os.walk(self.options['source']):\r\n for f in sorted([f for f in files if not f.startswith('.')]):\r\n hash_list.append(os.path.join(root, f))\r\n hash_list.append(str(os.path.getmtime(os.path.join(root, f))))\r\n hash_list = ''.join(hash_list)\r\n\r\n if sys.version < '3':\r\n return hashlib.sha1(hash_list).hexdigest()\r\n return hashlib.sha1(hash_list.encode('utf-8')).hexdigest()",
"def verifyFile(source, destination):\n\tsourceHash = hashlib.sha256(open(source, 'rb').read()).digest()\n\tdestinationHash = hashlib.sha256(open(destination, 'rb').read()).digest()\n\n\tif sourceHash == destinationHash:\n\t\treturn (True, str(sourceHash))\n\n\treturn False",
"def src_hash(self, src_hash):\n\n self._src_hash = src_hash",
"def hash_check_files(self):\n temp_error = 0\n if not self.hash_log_curr:\n self.hash_log_curr = self.hash_curr_files\n else:\n for key, value in self.hash_curr_files.iteritems():\n if key in self.hash_log_curr:\n #test for valid hash\n if self.valid is not None:\n #test any valid hahses are given\n if key in self.valid:\n # a hash code that is ok to duplicate\n self.print_to_log('Valid Duplicate HashCode, skipping: ' + value[5])\n self.hash_log_curr[key][3] = str(int(self.hash_log_curr[key][3]) + 1)\n self.hash_log_curr[key][4] = value[4]\n continue\n # not valid duplicate hash\n # a dupulicate hash found which is a failure and should abort import\n self.hash_log_curr[key][0] = 'Fail'\n self.hash_log_curr[key][3] = str(int(self.hash_log_curr[key][3]) + 1)\n self.hash_log_curr[key][4] = value[4]\n self.hash_log_curr[key][5] += ', ' + value[5]\n self.print_to_log('Duplicate hash found for file: ' + value[5])\n temp_error = 1\n else:\n #a new hash, no issues\n self.hash_log_curr[key] = value\n self.print_to_log('New Hash for file: ' + value[5])\n self.error = temp_error",
"def _source_filename_field_was_properly_initialized(self):\n if not Rule.sources_list_is_initialized:\n Rule.sources_list.append(self.source)\n Rule.sources_list_is_initialized = True\n # print(f\"if {self.source} not in {Rule.sources_list}\")\n if self.source not in Rule.sources_list:\n # print(f\"In rule: {self}\")\n # print(f\"Rule.sources_list = {Rule.sources_list}\")\n raise UninitializedSourceError(f\"{repr(self.source)} not initialized.\")\n if self.target not in Rule.sources_list:\n Rule.sources_list.append(self.target)\n return True",
"def _hash_source(func, name=None):\n source = inspect.getsource(func)\n if source.startswith(\"@task\"):\n source = source.split(\"\\n\", maxsplit=1)[1]\n assert source.startswith(\n \"def\"\n ), f\"Source for task {name} does not start with def, using decorator?\"\n return _hash_result(result=source, serializer=SourceSerializer())",
"def _actual_hash(self):\n return hash_of_file(join(self._temp_path, self._downloaded_filename()))",
"def test_verify_changed_source_file(self):\n # This test was made to pass in fixing Bug #1354880\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n\n # Edit source file\n with open('testfiles/various_file_types/executable', 'r+') as f:\n f.write('This changes a source file.')\n\n # Test verify for the file\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])",
"def _sources_hash(self, sha, paths):\r\n files = []\r\n for relative_filename, filename in self._walk_paths(paths):\r\n with open(filename, \"rb\") as fd:\r\n sha.update(Compatibility.to_bytes(relative_filename))\r\n sha.update(fd.read())\r\n files.append(filename)\r\n return files",
"def test_source_package_checksum_is_stable(self):\n response = self.client.head(\n f'/filemanager/api/{self.upload_id}/content',\n headers={'Authorization': self.token}\n )\n first_checksum = response.headers.get('ETag')\n self.assertIsNotNone(first_checksum)\n self.assertEqual(first_checksum, self.original_checksum)\n\n response = self.client.head(\n f'/filemanager/api/{self.upload_id}/content',\n headers={'Authorization': self.token}\n )\n second_checksum = response.headers.get('ETag')\n self.assertEqual(first_checksum, second_checksum)\n\n response = self.client.get(\n f'/filemanager/api/{self.upload_id}/content',\n headers={'Authorization': self.token}\n )\n third_checksum = response.headers.get('ETag')\n self.assertEqual(first_checksum, third_checksum)",
"def test_file_managed_keep_source_false_http(\n file, tmp_path, remote_grail_scene33, modules\n):\n name = str(tmp_path / \"testfile\")\n # Run the state\n ret = file.managed(\n name=name,\n source=remote_grail_scene33.url,\n source_hash=remote_grail_scene33.hash,\n keep_source=False,\n )\n assert ret.result is True\n\n # Now make sure that the file is not cached\n ret = modules.cp.is_cached(remote_grail_scene33.url)\n assert not ret, \"File is still cached at {}\".format(ret)",
"def test_cloud_file_hash(self, test_cloud_file_hash):\n\n self._test_cloud_file_hash = test_cloud_file_hash",
"def _check_hash(self, text):\n old = self.header.get(\"sha1sum\", None)\n if old is None:\n raise crexc.ChecksumError(\"sha1sum is missing in \" + repr(self.basename))\n if self._get_checksum(text) != self.header[\"sha1sum\"]:\n raise crexc.ChecksumError(\"sha1sum mismatch in \" + repr(self.basename))",
"def verify(self, src, extra_files_ok=False):\n for lk, e in self.walk():\n _check_hash_type_support(e.hash[\"type\"])\n\n src = PhysicalKey.from_url(fix_url(src))\n src_dict = dict(list_url(src))\n url_list = []\n size_list = []\n for logical_key, entry in self.walk():\n src_size = src_dict.pop(logical_key, None)\n if src_size is None:\n return False\n if entry.size != src_size:\n return False\n entry_url = src.join(logical_key)\n url_list.append(entry_url)\n size_list.append(src_size)\n\n if src_dict and not extra_files_ok:\n return False\n\n hash_list = calculate_sha256(url_list, size_list)\n for (logical_key, entry), url_hash in zip(self.walk(), hash_list):\n if isinstance(url_hash, Exception):\n raise url_hash\n if entry.hash['value'] != url_hash:\n return False\n\n return True",
"def __hash__(self):\n return hash(self._full_path)",
"def copy_file_check(self):\n pass",
"def fetch_local_hashcode(self, path):\n\t\treturn hashlib.sha256(open(self.config[\"daemon\"][\"rootdir\"] + path, \"rb\").read()).hexdigest()",
"def check_file(self, path, approve_if_no_dbhash=False):\r\n if self.mod.filehash:\r\n h = create_filehash(path)\r\n return h == self.mod.filehash\r\n return approve_if_no_dbhash",
"def test_create_SHA_256_hash_of_file_matches_cosmic_build_tool(\n file_name, expected_hash\n):\n file_path = str(Path(__file__).parent.parent / \"steps/component1\" / file_name)\n hash = utils.create_SHA_256_hash_of_file(file_path)\n\n assert hash == expected_hash",
"def _git_intern_file(self, file_contents, cwd, commit_hash):\n cmd = 'hash-object -t blob -w --stdin'.split(' ')\n stdin = self.api.m.raw_io.input(file_contents)\n stdout = self.api.m.raw_io.output()\n step_name = 'Hashing modified DEPS file with revision ' + commit_hash\n step_result = self.api.m.git(*cmd, cwd=cwd, stdin=stdin, stdout=stdout,\n name=step_name)\n hash_string = step_result.stdout.splitlines()[0]\n try:\n if hash_string:\n int(hash_string, 16)\n return hash_string\n except ValueError: # pragma: no cover\n reason = 'Git did not output a valid hash for the interned file.'\n self.api.m.halt(reason)\n raise self.api.m.step.StepFailure(reason)",
"def get_source(filename: str) -> dict[str, str]:\n file_path = (\n filename\n or g.ledger.fava_options.default_file\n or g.ledger.beancount_file_path\n )\n source, sha256sum = g.ledger.file.get_source(file_path)\n return {\"source\": source, \"sha256sum\": sha256sum, \"file_path\": file_path}",
"def hash_control(self):\n import hashlib\n from datetime import datetime\n import shutil\n\n # Generate hash with file content\n h = hashlib.md5()\n f = open(self.response, 'r')\n h.update(f.read())\n\n # Copy file to repository\n session = model.Session\n #metadata = model.metadata\n\n # Create table if it doesn't exists\n setup_model()\n\n # First check if hash is already in database\n results = session.query(DataRepository.hash).filter_by(hash=h.hexdigest()).all()\n #self.log(results)\n\n if len(results) > 0:\n #log.error('This file %s has the same hash of a file already in\\\n # database. Aborting' % self.response)\n self.log( 'This file %s has the same hash of a file already in\\\n database. Aborting' % self.response)\n os.remove(self.response)\n return True\n\n # Today's date\n file_date = datetime.today()\n\n # Filename hash to store\n filename, extension = os.path.splitext(os.path.basename(self.response))\n h2 = hashlib.md5()\n h2.update(file_date.__str__() + filename)\n filename = h2.hexdigest() + extension\n\n # Now add full repository path to filename\n filename2 = os.path.join(self.repository,filename)\n\n # Now insert data and copy file to repository\n #log.warning('Inserting file %s in repository' % self.response)\n self.log('Inserting file %s in repository' % self.response)\n\n # Copy file to repository\n shutil.copy2(self.response,filename2)\n\n # insert info in database\n repository = DataRepository(hash=h.hexdigest(), creation_date=file_date.today(), original_file = filename2, package_file=self.response)\n session.add(repository)\n session.commit()\n\n #log.warning('File inserted')\n self.log('File inserted')\n\n # Remove other file\n os.remove(self.response)\n\n self.response = filename2\n\n return False",
"def calculate_hash(self, include_md: bool = True) -> str:\n # sourcery skip: reintroduce-else, swap-if-else-branches, use-named-expression\n # BUF_SIZE is totally arbitrary,\n BUF_SIZE = 65536 * 16 # lets read stuff in 16 x 64kb chunks!\n\n file_hash = hashlib.sha1()\n # Stubs Only\n files = list((self.package_path).rglob(\"**/*.pyi\"))\n if include_md:\n files += (\n [self.package_path / \"LICENSE.md\"]\n + [self.package_path / \"README.md\"]\n # do not include [self.toml_file]\n )\n for file in sorted(files):\n # TODO: Extract function to allow for retry on file not found\n try:\n with open(file, \"rb\") as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n file_hash.update(data)\n except FileNotFoundError:\n log.warning(f\"File not found {file}\")\n # ignore file not found errors to allow the hash to be created WHILE GIT / VIRUS SCANNERS HOLD LINGERING FILES\n return file_hash.hexdigest()",
"def _hash_file(self, file_entry):\n if file_entry is None:\n return None\n\n if file_entry.IsDevice() or file_entry.IsPipe() or file_entry.IsSocket():\n # Ignore devices, FIFOs/pipes and sockets.\n return None\n\n hash_context = hashlib.sha256()\n\n try:\n file_object = file_entry.GetFileObject()\n except IOError as exception:\n logging.warning((\n 'Unable to open path specification:\\n{0:s}'\n 'with error: {1!s}').format(file_entry.path_spec.location, exception))\n return None\n\n if not file_object:\n return None\n\n try:\n data = file_object.read(self._READ_BUFFER_SIZE)\n while data:\n hash_context.update(data)\n data = file_object.read(self._READ_BUFFER_SIZE)\n except IOError as exception:\n logging.warning((\n 'Unable to read from path specification:\\n{0:s}'\n 'with error: {1!s}').format(file_entry.path_spec.location, exception))\n return None\n\n return hash_context.hexdigest()",
"def ensure_file(filename, old_contents=None, old_hash=None):\n hash_function = lambda text: hashlib.sha1(text.encode('utf-8')).digest()\n\n if old_hash is None and old_contents is not None:\n old_hash = hash_function(old_contents)\n\n if not os.path.exists(filename):\n # write the file if it doesn't exist\n if old_contents is not None:\n with open(filename, 'w') as f:\n f.write(old_contents)\n else:\n raise RuntimeError(\"No contents to write missing file \" +\n str(filename))\n\n with open(filename, mode='r') as f:\n contents = f.read()\n\n hashed = hash_function(contents)\n\n if old_hash and hashed != old_hash:\n raise RuntimeError(\"Existing file \" + str(filename) + \" does not\"\n + \" match stored file.\")\n\n return contents, hashed"
] | [
"0.674267",
"0.6463627",
"0.6416464",
"0.6111717",
"0.6101362",
"0.6064013",
"0.6063095",
"0.6057598",
"0.604401",
"0.5989828",
"0.5935335",
"0.5933105",
"0.59134775",
"0.5907258",
"0.5866222",
"0.5835483",
"0.5811649",
"0.5794201",
"0.57568145",
"0.57567805",
"0.5734459",
"0.5674582",
"0.566905",
"0.56468767",
"0.5635266",
"0.56224626",
"0.5610352",
"0.55928177",
"0.5586658",
"0.5584164"
] | 0.728525 | 0 |
Make sure that we exit gracefully when a local source doesn't exist | def test_managed_local_source_does_not_exist(file, tmp_path, grail_scene33_file, proto):
name = tmp_path / "local_source_does_not_exist"
ret = file.managed(
name=str(name),
source=proto + str(grail_scene33_file.with_name("scene99")),
)
assert ret.result is False
assert not ret.changes
assert "does not exist" in ret.comment | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_source (fileurl, path_unzip, outfile) :\n if outfile is not None and os.path.splitext (outfile)[1].lower () == os.path.splitext (fileurl)[1].lower () :\n file = _check_url_file (fileurl, path_download = path_unzip, outfile = outfile)\n return file\n else :\n file = _check_url_file (fileurl, path_download = path_unzip, outfile = None)\n txt = _check_zip_file (file, path_unzip = path_unzip, outfile = outfile)\n if not os.path.exists (txt):\n message = \"hal_core._check_source: unable to find file \" + txt + \" source (\" + fileurl + \")\"\n raise PQHException (message)\n return txt",
"def test_quickstart_fails_with_source_folder_removed(self):\n\n run_nbgrader([\"quickstart\", \"example_source_folder_fail\"])\n\n # it should fail if it already exists\n run_nbgrader([\"quickstart\", \"example_source_folder_fail\"], retcode=1)\n\n # it should succeed if source folder not present and create it\n shutil.rmtree(os.path.join(\"example_source_folder_fail\", \"source\"))\n\n # it should fail if it already source folder or config file exists\n run_nbgrader([\"quickstart\", \"example_source_folder_fail\"], retcode=1)",
"def local_assert_empty(path):\n try:\n local = get_local(path)\n except ValueError:\n return\n raise ValueError(\"Something exists at %s\" % local.path)",
"def test_not_present_file(self):\n\t\ttry:\n\t\t\tmain.Main(['input/abc.txt']).run()\n\t\texcept:\n\t\t\tself.assertTrue(True)",
"def checkPath(filename, projectSource):\n filePath = os.path.join(projectSource, filename)\n if os.path.exists(filePath):\n pass\n else:\n sys.stderr.write(\"Error: \" + filePath + \" not found\")\n sys.exit(1)\n return filePath",
"def test_source_path_does_not_exist(self, fake_repo_path, fake_package_name):\n source_dir = \"non_existent\"\n payload = {\n \"context_path\": f\"{fake_package_name}.run.ProjectContext\",\n \"source_dir\": source_dir,\n \"project_version\": kedro_version,\n \"project_name\": \"Test Project\",\n }\n _create_kedro_config(fake_repo_path, payload)\n non_existent_path = (fake_repo_path / source_dir).expanduser().resolve()\n\n pattern = f\"Source path '{non_existent_path}' cannot be found\"\n with pytest.raises(KedroContextError, match=re.escape(pattern)):\n load_context(str(fake_repo_path))",
"def do_source(self, line):\n\n if self.root_directory:\n self.source_file = self.root_directory + \"/\" + line\n self.do_check_file(self.source_file)\n else:\n self.source_file = line\n self.do_check_file(self.source_file)",
"def test_exit_on_missing_file(self):\n with self.assertRaises(SystemExit):\n pyint = Interpreter()\n pyint.run(file=MISSING_FILE)",
"def test_the_main_non_existent_file(self):\r\n with self.assertRaises(SystemExit):\r\n the_main_function(\"non existent file\")",
"def test_idea_missing_sources(self):\n self._idea_test(['testprojects/src/java/org/pantsbuild/testproject/missing_sources'])",
"def source(something):\n # How to source? It's impossible. It's like: we fork, run bash, run\n # ourselves in the environment set up by Bash.\n raise NotImplementedError('Impossible.')",
"def test_watch_no_source():\n with pytest.raises(ValueError):\n uflash.watch_file(None, lambda: \"should never be called!\")",
"def prepareLocal(self, client):\n if not os.path.exists( client.location ) or not os.path.isdir( client.location ):\n raise Exception( \"The sources of client {0} should be found in local directory '{1}', but that either doesn't exist or is not a directory.\".format( client.name, client.location ) )\n if not source.prepareLocal(self, client):\n return False\n try:\n subprocess.check_output( 'cp -r \"{0}/\"* \"{1}/\"'.format( escapeFileName( client.location ), escapeFileName( self.localLocation( client ) ) ), shell=True, stderr=STDOUT )\n except subprocess.CalledProcessError as cpe:\n Campaign.logger.log( \"Could not locally prepare source for client {0}: {1}\".format( client.name, cpe.output ) )\n raise cpe\n return True",
"def provoke_and_handle_FileNotFoundError():\n try:\n with open(\"NEIN.mp3\") as f:\n print(\"well\")\n except FileNotFoundError as fnfe:\n print(f\"Sorry! {fnfe}\")",
"def test_template_local_file_noclobber(file, tmp_path):\n source = dest = tmp_path / \"source\"\n source.write_text(\"{{ foo }}\\n\")\n\n ret = file.managed(\n name=str(dest),\n source=str(source),\n template=\"jinja\",\n context={\"foo\": \"Hello world!\"},\n )\n assert ret.result is False\n assert \"Source file cannot be the same as destination\" in ret.comment",
"def test_2():\n try:\n import flake8 # noqa: F401\n except ImportError:\n return None\n\n cwd = os.getcwd()\n os.chdir(PACKAGE_DIR)\n try:\n subprocess.check_call(['flake8'])\n os.chdir(cwd)\n except CalledProcessError:\n os.chdir(cwd)\n raise CalledProcessError",
"def check_source(self,source): \n\n kind = None\n if os.path.exists(source):\n if os.path.isfile(source):\n kind = \"file\"\n elif os.path.isdir(source):\n kind = \"dir\"\n else:\n print(\" Source path : \\n{}\\n Does not exist...\\n\".format(source))\n #print(\" Sys.exit() called by : {}\".format())\n sys.exit()\n\n return kind",
"def test_quickstart_fails_with_config_file_removed(self):\n\n run_nbgrader([\"quickstart\", \"example_source_folder_fail\"])\n\n # it should fail if it already exists\n run_nbgrader([\"quickstart\", \"example_source_folder_fail\"], retcode=1)\n\n # it should succeed if source folder not present and create it\n os.remove(os.path.join(\"example_source_folder_fail\", \"nbgrader_config.py\"))\n\n # it should fail if it already source folder or config file exists\n run_nbgrader([\"quickstart\", \"example_source_folder_fail\"], retcode=1)",
"def copy_to_local(src_file):\r\n if not_exists(src_file, \"Source File\"):\r\n return 1, 0\r\n _local_file = os.path.basename(src_file)\r\n if wrap_cp_file(src_file, _local_file):\r\n return 1, 0\r\n return 0, _local_file",
"def copy_to_local(src_file):\r\n if not_exists(src_file, \"Source File\"):\r\n return 1, 0\r\n _local_file = os.path.basename(src_file)\r\n if wrap_cp_file(src_file, _local_file):\r\n return 1, 0\r\n return 0, _local_file",
"def test_simple_source_constructor_exception():\n TESTPATH = \"/usr/local/share/testfile.mp3\"\n with pytest.raises(robox.RDJResourceErr):\n test01 = Source(path=TESTPATH, exist=True)",
"def _test_local_install():\n if os.getcwd() == os.sep.join(\n os.path.abspath(__file__).split(os.sep)[:-2]):\n import warnings\n warnings.warn('Running the tests from the install directory may '\n 'trigger some failures')",
"def file_name_check(file_name):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"9\")\n # END OF SOLUTION",
"def main(source):\n pass",
"def has_source_file( self ):\n return self._source_file is not None",
"def setSourcePath(self, offset=0):\n while True:\n tempSourcePath = input(\n offset * \" \" + \"Specify any change to the default source path [%s]: \" % self.sourcePath) or self.sourcePath\n if os.path.exists(tempSourcePath):\n self.sourcePath = tempSourcePath\n break\n else:\n print(\"Path does not exist!\")",
"def _checkSourcesAvailability(sourcesList):\n sources = sourcesList.split(\",\")\n for source in sources:\n try:\n importlib.import_module(\".sources.\"\n + source\n + \".main\",\n __package__).WesenSource\n except ImportError as e:\n print(e)\n print(\"The source code for one of your AIs could not be loaded: \",\n source)\n sys.exit()",
"def _resolveSourcePath(self, sources, source):\n source = copy.deepcopy(source)\n if source['path'] != '__none__':\n sourcePath = Path(source['path'])\n source['path'] = self._basePath / sourcePath\n if not source['path'].is_file():\n altpath = self._basePath.parent / sourcePath / sourcePath.name\n if altpath.is_file():\n source['path'] = altpath\n if not source['path'].is_file():\n raise TileSourceFileNotFoundError(str(source['path']))\n sources.append(source)",
"def check_for_missing_files(self, path):\n return None",
"def _check(self, config: Dict):\n if 'path' not in config:\n raise FileNotFoundError(\"File not found.\")"
] | [
"0.6148552",
"0.6142079",
"0.6057",
"0.60119665",
"0.6005793",
"0.5938739",
"0.5919889",
"0.5904504",
"0.58742243",
"0.5834198",
"0.5833697",
"0.58021307",
"0.5729534",
"0.57105595",
"0.5685573",
"0.5681292",
"0.5681117",
"0.566238",
"0.56549567",
"0.56549567",
"0.565292",
"0.5622155",
"0.56090796",
"0.55812234",
"0.55552024",
"0.5541739",
"0.5534037",
"0.5508565",
"0.54988927",
"0.5473843"
] | 0.6278259 | 0 |
Using {{ varname }} with a list or dictionary which contains unicode types on Python 2 will result in Jinja rendering the "u" prefix on each string. This tests that using the "tojson" jinja filter will dump them to a format which can be successfully loaded by our YAML loader. The two lines that should end up being rendered are meant to test two | def test_managed_unicode_jinja_with_tojson_filter(file, tmp_path, state_tree, modules):
if salt.utils.platform.is_windows() and os.environ.get("PYTHONUTF8", "0") == "0":
pytest.skip("Test will fail if PYTHONUTF8=1 is not set on windows")
test_file = tmp_path / "test-tojson.txt"
jinja_template_contents = """
{%- for key in ('Die Webseite', 'Der Zucker') -%}
{{ key }} ist {{ data[key] }}.
{% endfor -%}
"""
sls_contents = (
"""
{%- set data = '{"Der Zucker": "süß", "Die Webseite": "https://saltproject.io"}'|load_json -%}
"""
+ str(test_file)
+ """:
file.managed:
- source: salt://template.jinja
- template: jinja
- context:
data: {{ data|tojson }}
"""
)
with pytest.helpers.temp_file(
"template.jinja", jinja_template_contents, state_tree
), pytest.helpers.temp_file("tojson.sls", sls_contents, state_tree):
ret = modules.state.apply("tojson")
for state_run in ret:
assert state_run.result is True
expected = "Die Webseite ist https://saltproject.io.\nDer Zucker ist süß.\n\n"
assert test_file.read_text() == expected | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_jinja2(device_inventory):\n\n test_template = \"\"\"\n interface Ethernet{{ intf_id }}\n ip address {{ intf_ip }}\n \"\"\"\n\n test_result = \"\"\"\n interface Ethernet1\n ip address 1.2.3.4/24\n \"\"\"\n\n template = Template(test_template)\n result = template.render(intf_id=1, intf_ip=\"1.2.3.4/24\")\n\n assert result == test_result",
"def _demunge_jinja2_vars(meta: Union[dict, list], sentinel: str) -> Union[dict, list]:\n if isinstance(meta, collections.abc.MutableMapping):\n for key, val in meta.items():\n meta[key] = _demunge_jinja2_vars(val, sentinel)\n return meta\n elif isinstance(meta, collections.abc.MutableSequence):\n for i in range(len(meta)):\n meta[i] = _demunge_jinja2_vars(meta[i], sentinel)\n return meta\n elif isinstance(meta, str):\n return meta.replace(sentinel + \"{ \", \"{{ \")\n else:\n return meta",
"def jinja():\n template_path = '/tmp/pycheat-jinja-template.html'\n output_path = '/tmp/pycheat-jinja-output.html'\n\n # create the testing template\n with open(template_path, 'w') as f:\n f.write(\"\"\"Testing template with {{athlet_type}}:\n{% for a in athlets %}\n{{a.name}} is from {{a['country']}}\n{% endfor %}\"\"\")\n\n # testing dict with variables\n context = {\n 'athlet_type': 'tennis players',\n 'athlets': [\n {'name': 'Roger Federer', 'country': 'SUI'},\n {'name': 'Rafael Nadal', 'country': 'ESP'},\n {'name': 'Novak Djokovic', 'country': 'SRB'}\n ]\n }\n\n import jinja2\n import os\n # render the template\n template_dir, template_filename = os.path.split(template_path)\n loader = jinja2.FileSystemLoader(template_dir)\n\n # whitespace control:\n # http://jinja.pocoo.org/docs/2.9/templates/#whitespace-control\n jinja_env = jinja2.Environment(loader=loader, trim_blocks=True,\n lstrip_blocks=True)\n template = jinja_env.get_template(template_filename)\n rendered_output = template.render(context)\n # print and write the result to the file\n print rendered_output\n with open(output_path, 'w') as f:\n f.write(rendered_output.encode('utf-8'))",
"def test_template_json():\n for l in list(templates.data):\n t = templates[l]\n assert len(json.dumps(t.json())) > 0",
"def test_json_renderer(dummy_request: DummyRequest) -> None:\n tag = Tag(name=\"foö\")\n\n renderer = json_renderer()\n output = renderer(None)(tag, {})\n\n assert json.loads(output) == \"foö\"",
"def _remunge_jinja2_vars(meta: Union[dict, list], sentinel: str) -> Union[dict, list]:\n if isinstance(meta, collections.abc.MutableMapping):\n for key, val in meta.items():\n meta[key] = _remunge_jinja2_vars(val, sentinel)\n return meta\n elif isinstance(meta, collections.abc.MutableSequence):\n for i in range(len(meta)):\n meta[i] = _remunge_jinja2_vars(meta[i], sentinel)\n return meta\n elif isinstance(meta, str):\n return meta.replace(\"{{ \", sentinel + \"{ \")\n else:\n return meta",
"def render_json(template, data):\n result = render(template, data)\n result = _remove_ctl_chars(result)\n return _convert_to_json(result)",
"def test_jinja2(self):\n res = self.app.get('/bundle/DEFAULT/main?renderer=paths.jinja2')\n expected = self.stats1['chunks']['main'][0]\n self.assertEqual(res.body.decode('utf-8'), expected['path'] + '\\n')",
"def _render_str(self, template, ** params):\n\n for key in params:\n if(isinstance(params[key], str)):\n params[key] = params[key].decode('utf-8')\n if(isinstance(params[key], dict)):\n for sub_key in params[key]:\n if(isinstance(params[key][sub_key], str)):\n params[key][sub_key] = params[key][sub_key].decode('utf-8')\n t = constants.JINJA_ENV.get_template(template)\n return t.render(params)",
"def testPrettyPrintJSON(self):\n test_dict = {'test': [{'dict1': {'key1': 'val1'}, 'dict2': None}]}\n expected_string = ('{\\n \"test\": [\\n {\\n \"dict1\": {\\n'\n ' \"key1\": \"val1\"\\n }, \\n'\n ' \"dict2\": null\\n }\\n ]\\n}\\n')\n self.assertEqual(expected_string, utils.PrettyPrintJSON(test_dict))",
"def render(template, data):\n\n stringtemplate = template\n if isinstance(template, dict):\n stringtemplate = json.dumps(template, sort_keys=True)\n\n try:\n jtemplate = environment().from_string(stringtemplate)\n except TemplateSyntaxError as err:\n LOG.error(\"Render failed: %s, with template: %s\", str(err), stringtemplate)\n raise\n\n try:\n stringvalue = jtemplate.render(data)\n except TemplateError:\n LOG.error(\"Render failed, with data: %s\", data)\n raise\n return stringvalue",
"def _replace_jinja2_vars(lines: List[str], jinja2_vars: dict) -> List[str]:\n # these regex find jinja2 set statements without and with selectors\n jinja2_re = re.compile(r\"^(\\s*){%\\s*set\\s*(.*)=\\s*(.*)%}(.*)\")\n jinja2_re_selector = re.compile(r\"^(\\s*){%\\s*set\\s*(.*)=\\s*(.*)%}\\s*#\\s*\\[(.*)\\]\")\n\n all_jinja2_keys = set(list(jinja2_vars.keys()))\n used_jinja2_keys = set()\n\n # first replace everything we can\n # we track which kets have been used so we can add any unused keys ar the\n # end\n new_lines = []\n for line in lines:\n _re_sel = jinja2_re_selector.match(line)\n _re = jinja2_re.match(line)\n\n # we only replace simple constant set statements\n if (_re_sel or _re) and not _is_simple_jinja2_set(line):\n new_lines.append(line)\n continue\n\n if _re_sel:\n # if the line has a selector in it, then we need to pull\n # out the right key with the selector from jinja2_vars\n spc, var, val, sel = _re_sel.group(1, 2, 3, 4)\n key = var.strip() + CONDA_SELECTOR + sel\n if key in jinja2_vars:\n _new_line = (\n spc\n + \"{% set \"\n + var.strip()\n + \" = \"\n + json.dumps(jinja2_vars[key])\n + \" %} # [\"\n + sel\n + \"]\\n\"\n )\n used_jinja2_keys.add(key)\n else:\n _new_line = line\n elif _re:\n # no selector\n spc, var, val, end = _re.group(1, 2, 3, 4)\n if var.strip() in jinja2_vars:\n _new_line = (\n spc\n + \"{% set \"\n + var.strip()\n + \" = \"\n + json.dumps(jinja2_vars[var.strip()])\n + \" %}\"\n + end\n )\n used_jinja2_keys.add(var.strip())\n else:\n _new_line = line\n else:\n _new_line = line\n\n if _new_line is not None:\n if _new_line[-1] != \"\\n\":\n _new_line = _new_line + \"\\n\"\n\n new_lines.append(_new_line)\n\n # any unused keys, possibly with selectors, get added here\n if all_jinja2_keys != used_jinja2_keys:\n extra_lines = []\n extra_jinja2_keys = all_jinja2_keys - used_jinja2_keys\n for key in sorted(list(extra_jinja2_keys)):\n if CONDA_SELECTOR in key:\n _key, selector = key.split(CONDA_SELECTOR)\n extra_lines.append(\n \"{% set \"\n + _key\n + \" = \"\n + json.dumps(jinja2_vars[key])\n + \" %}\"\n + \" # [\"\n + selector\n + \"]\\n\",\n )\n else:\n extra_lines.append(\n \"{% set \"\n + key\n + \" = \"\n + json.dumps(jinja2_vars[key])\n + \" %}\"\n + \"\\n\",\n )\n\n new_lines = extra_lines + new_lines\n\n return new_lines",
"def tojson_filter(obj, **kwargs):\n # https://github.com/mitsuhiko/flask/blob/master/flask/json.py\n return Markup(dumps(obj, **kwargs))",
"def test_json_like_flat_yaml(\n before_yaml_flat: str,\n after_yaml_flat: str,\n json_like_flat_res: str,\n):\n assert gendiff(\n before_yaml_flat,\n after_yaml_flat,\n form=JSON_LIKE_FORMAT,\n ) == json_like_flat_res",
"def safe_json(value):\n return json.dumps(value).replace('</', '<\\\\/') # Fix injection of closing markup in strings",
"def test_json_flat_yaml(\n before_yaml_flat: str,\n after_yaml_flat: str,\n json_flat_res: str,\n):\n assert json.loads(\n gendiff(before_yaml_flat, after_yaml_flat),\n ) == json_flat_res",
"def process_vars(vars, dt_name):\n if vars is None:\n # not required\n return None\n\n if not isinstance(vars, dict):\n raise DeployableTypeValidationError(dt_name, 'vars must be a dict')\n\n for key, value in vars.iteritems():\n\n # special handling of list and dict types: push these through JSON\n # encoder and make them strings. Allows object trees in variables\n # which can be placed inline with other JSON.\n if isinstance(value, (dict, list)):\n vars[key] = json.dumps(value)\n\n return vars",
"def render(name, var_dict):\n tmpl_path = path.join(path.dirname(__file__), 'templates')\n tmpl_loader = FileSystemLoader(tmpl_path)\n env = Environment(loader=tmpl_loader)\n template = env.get_template('{}.j2'.format(name))\n result = template.render(var_dict)\n return result",
"def set_jinja_before_request():\n resource_provider.set_jinja_globals()",
"def DumpJson(data):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import json\n \n text = yaml.dumps(data)\n \n return text",
"def test_basic_usage(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" %}\\n'\n 'test{{num}}\\n'\n '{% enddefinevar %}'\n '{{myvar}}')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '\\ntest123\\n')",
"def render_str(template, **params):\n t = env.jinja_env.get_template(template)\n return t.render(params)",
"def jinja2(self):\n return jinja2.get_jinja2(app=self.app)",
"def test_with_strip(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" strip %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span>\\n <strong>\\n test123\\n </strong>\\n</span>]')",
"def test_with_unsafe(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" unsafe %}<hello>{% enddefinevar %}'\n '{{myvar}}')\n\n self.assertEqual(t.render(Context()), '<hello>')",
"def render_dict(dict):\n\t\treturn str.encode(str(dict))",
"def format_json(value: dict[str, Any], context: dict[str, Any] = {}) -> str:\n output = json.dumps(value, indent=2)\n output = pygments_highlight(output, \"json\", \"tango\")\n style = pygments_css(\"tango\")\n styled = context.get(\"styled\", True) # Used for testing.\n if styled and style:\n html = (\n f\"<style>{style}</style>\"\n f'<pre class=\"highlight\" style=\"margin: 0; padding: 1em;\">{output}</pre>'\n )\n else:\n html = f'<pre style=\"margin: 0;\">{output}</pre>'\n return format_html(\"<div>{}</div>\", mark_safe(html))",
"def format_yaml(template, config):\n formatted = template\n for k, v in config.items():\n formatted = formatted.replace('${%s}' % k, v)\n return formatted",
"def json(data):\n if isinstance(data, dict):\n data = ujson.encode(data)\n uid = str(uuid.uuid4())\n display(HTML('<div id=\"{0}\" style=\"height: 600px; width:100%;\"></div>'.format(uid)))\n display(Javascript(\"\"\"\n require([\"https://rawgit.com/caldwell/renderjson/master/renderjson.js\"], function() {\n document.getElementById('%s').appendChild(renderjson(%s))\n });\n \"\"\" % (uid, data)))",
"def string_factory(list_of_dicts):\n result = []\n for item in range(len(list_of_dicts)):\n result.append(template.format(**list_of_dicts[item]))\n return result"
] | [
"0.6214987",
"0.6041898",
"0.603948",
"0.5966153",
"0.581613",
"0.58153105",
"0.5790855",
"0.54614735",
"0.5438323",
"0.5436494",
"0.5423317",
"0.5416245",
"0.53863925",
"0.53596425",
"0.5354983",
"0.53020763",
"0.525357",
"0.52472496",
"0.52264094",
"0.52087677",
"0.52049106",
"0.52023673",
"0.51895803",
"0.51802653",
"0.5168673",
"0.51156795",
"0.5108896",
"0.5090726",
"0.50859666",
"0.5080316"
] | 0.7061903 | 0 |
Test passing a source_hash as an uppercase hash. This is a regression test for Issue 38914 and Issue 48230 (test=true use). | def test_managed_source_hash_indifferent_case(file, tmp_path, state_tree, test):
name = tmp_path / "source_hash_indifferent_case"
hello_world_contents = "Hello, World!"
with pytest.helpers.temp_file(
"hello_world.txt", hello_world_contents, state_tree
) as local_path:
actual_hash = hashlib.sha256(local_path.read_bytes()).hexdigest()
# `name` needs to exist for this test, like a previous file.managed run
shutil.copyfile(str(local_path), str(name))
# Test uppercase source_hash: should return True with no changes
ret = file.managed(
name=str(name),
source=str(local_path),
source_hash=actual_hash.upper(),
test=test,
)
assert ret.result is True
assert ret.changes == {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_basic(self):\n self.assertEqual(hash_str(\"world!\", salt=\"hello, \").hex()[:6], \"68e656\")",
"def test_hash(self):\r\n self.assertEqual(processor_hash('test'), 'GqNJWF7X7L07nEhqMAZ+OVyks1Y=')\r\n self.assertEqual(processor_hash('edx '), '/KowheysqM2PFYuxVKg0P8Flfk4=')",
"def test_hash_string(self):\n self.assertEqual(hexlify(self._hashdigest(pubkey_sha)), sample_ripe)",
"def test_hash_url(self):\r\n url = u'http://google.com'\r\n hashed = generate_hash(url)\r\n self.assertEqual('aa2239c17609b2', hashed)",
"def test_user_hash_without_salt(self):\n salt_bytes = \"my salt\".encode()\n salt_hex = salt_bytes.hex()\n with set_env(CSCI_SALT=salt_hex):\n expected = hash_str(\"johndoe\", salt=salt_bytes).hex()[:6]\n actual = get_user_hash(\"johndoe\").hex()[:6]\n self.assertEqual(expected, actual)",
"def test_get_string(self):\n hash_val = self.reverse_hash.get_hash('gil')\n get_string = self.reverse_hash.get_string(hash_val)\n self.assertEqual(get_string, 'gil')",
"def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))",
"def test_basic_user_id_hash(self):\n salt_bytes = \"my salt\".encode()\n salt_hex = salt_bytes.hex()\n with set_env(CSCI_SALT=salt_hex):\n # ensure we get the correct hash regardless of the letter case used\n self.assertEqual(get_user_id(\"johndoe\"), \"7d324c87\")\n self.assertEqual(get_user_id(\"JohnDoe\"), \"7d324c87\")\n self.assertEqual(get_user_id(\"johndoe\".upper()), \"7d324c87\")",
"def test_user1_method3():\n REGEX_MATCH_BCRYPT_HASH = r\"^\\$2[ayb]\\$.{56}$\"\n hashed_password = u.password.decode()\n assert re.match(REGEX_MATCH_BCRYPT_HASH, hashed_password), \"Password was not hashed correctly\"",
"def test_user_hash_with_salt(self):\n self.assertEqual(get_user_hash(\"johndoe\", salt=\"jane\").hex()[:6], \"fb0bf4\")",
"def test_user_hash_empty_salt(self):\n salt_bytes = \"my salt\".encode()\n salt_hex = salt_bytes.hex()\n with set_env(CSCI_SALT=salt_hex):\n expected = hash_str(\"johndoe\", salt=salt_bytes).hex()[:6]\n actual = get_user_hash(\"johndoe\", salt=\"\").hex()[:6]\n self.assertEqual(expected, actual)",
"def test_empty_string(self):\n self.assertEqual(hash_str(\"\", salt=\"\").hex()[:6], \"e3b0c4\")",
"def test_diff_inputs_diff_hash(self):\n # same strings, different salts\n self.assertNotEqual(\n hash_str(\"mystring\", salt=\"mysalt1\").hex(),\n hash_str(\"mystring\", salt=\"mysalt2\").hex(),\n )\n # different strings, same salts\n self.assertNotEqual(\n hash_str(\"mystring1\", salt=\"mysalt\").hex(),\n hash_str(\"mystring2\", salt=\"mysalt\").hex(),\n )",
"def test_none_hash(self):\n get_string = self.reverse_hash.get_string(None)\n self.assertEqual(get_string['error'], 'hash value passed is None')",
"def test_hash():\n if not cloud_aws.boto_is_current:\n skip(\"boto is not installed or is too old\")\n sky = cloud.Sky()\n east_prop = dict(provider=\"aws-ec2\", region=\"us-east-1\")\n east1 = sky.get_provider(east_prop)\n east2 = sky.get_provider(east_prop)\n assert hash(east1) == hash(east2)\n assert hash(east1) != hash(east1.get_provider_key(east_prop))\n assert east1 == east2\n assert not east1 != east2\n\n west = sky.get_provider(dict(provider=\"aws-ec2\", region=\"us-west-1\"))\n assert hash(east1) != hash(west)\n assert not east1 == west\n assert east1 != west",
"def test_cloud_file_hash(self, test_cloud_file_hash):\n\n self._test_cloud_file_hash = test_cloud_file_hash",
"def test_hash_sha256(self):\n block = self.blockchain.new_block(self.proof, self.previous_hash)\n hash_ = self.blockchain.hash(block)\n\n self.assertIsInstance(hash_, str)\n self.assertEqual(hashlib.sha256(json.dumps(block, sort_keys=True).encode()).hexdigest(), hash_)",
"def test_find_hash(twitter, message, expected):\n assert twitter.find_hash(message) == expected",
"def hash(self) -> str:\r\n ...",
"def test_upgrade_password_from_sha_to_ssha(self):\n name = u'/no such user/'\n password = '{SHA}jLIjfQZ5yojbZGTqxg2pY0VROWQ=' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'",
"def test_string_unicode_128(self):\n self.assertEqual(\n CityHash128WithSeed(EMPTY_STRING), CityHash128WithSeed(EMPTY_UNICODE)\n )",
"def test_unicode_2_128(self):\n test_case = u\"\\u2661\" # pylint: disable=redundant-u-string-prefix\n self.assertTrue(isinstance(CityHash128WithSeed(test_case), long))",
"def test_create_SHA_256_hash_of_file_matches_cosmic_build_tool(\n file_name, expected_hash\n):\n file_path = str(Path(__file__).parent.parent / \"steps/component1\" / file_name)\n hash = utils.create_SHA_256_hash_of_file(file_path)\n\n assert hash == expected_hash",
"def test_consistent_encoding_128(self):\n text = u\"abracadabra\" # pylint: disable=redundant-u-string-prefix\n self.assertEqual(\n CityHash128WithSeed(text), CityHash128WithSeed(text.encode(\"utf-8\"))\n )",
"def test_hash(self):\n self.assertEqual(hash(self._version1), hash(self._version1))\n self.assertNotEqual(hash(self._version2), hash(self._version1))\n self.assertEqual(hash(\"0.1\"), hash(self._version1))",
"def _hashsanitize(bytesin):\n # Used for converting raw byte data into a hex string. If the byte isn't a hex digit, use nothing instead.\n return \"\".join([x if x.lower() in 'abcdef0123456789' else '' for x in bytesin])",
"def test_hash_params():\n assert (\n hash_params(\n {\n \"name\": \"my-name\",\n \"labels\": {\n \"label1\": \"label\",\n },\n }\n )\n == \"1c67a2e8dd405725a4cdf7b58fed3e948aed135ac25c494a3b336c83a72ac0c8\"\n )",
"def test_string_unicode_64(self):\n self.assertEqual(\n CityHash64WithSeed(EMPTY_STRING), CityHash64WithSeed(EMPTY_UNICODE)\n )",
"def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"",
"def test_right_checksum(self):\n self.assertEqual(utils.checksum('fooo'), 'L')"
] | [
"0.63565356",
"0.62671137",
"0.6251786",
"0.60924065",
"0.6028704",
"0.5976406",
"0.595482",
"0.5913875",
"0.581719",
"0.5811794",
"0.5763702",
"0.5737824",
"0.5670293",
"0.5666691",
"0.56441087",
"0.5638604",
"0.56131727",
"0.5600814",
"0.5588752",
"0.5580525",
"0.555203",
"0.5543693",
"0.55362827",
"0.5519394",
"0.5517906",
"0.5510413",
"0.5486926",
"0.54601187",
"0.5453748",
"0.54473287"
] | 0.6330392 | 1 |
Tests that latin1 file contents are represented properly in the diff | def test_managed_latin1_diff(file, tmp_path, state_tree):
contents = "<html>\n<body>\n{}</body>\n</html>\n"
testfile = tmp_path / "issue-48777.html"
testfile.write_text(contents.format(""))
# Replace it with the new file and check the diff
with pytest.helpers.temp_file("issue-48777.html", "", state_tree) as src:
src.write_bytes(contents.format("räksmörgås").encode("latin1"))
ret = file.managed(name=str(testfile), source="salt://issue-48777.html")
assert ret.result is True
assert "+räksmörgås" in ret.changes["diff"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_diff_with_unicode(self):\n diff = (\n 'diff --git a/cfg/téstcase.ini b/cfg/téstcase.ini\\n'\n 'index cc18ec8..5e70b73 100644\\n'\n '--- a/cfg/téstcase.ini\\n'\n '+++ b/cfg/téstcase.ini\\n'\n '@@ -1,6 +1,7 @@\\n'\n '+blah blah blah\\n'\n ' [mysql]\\n'\n ' hóst = localhost\\n'\n ' pórt = 3306\\n'\n ' user = user\\n'\n ' pass = pass\\n'\n '-db = pyunít\\n'\n '+db = pyunít\\n'\n ).encode('utf-8')\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename='cfg/téstcase.ini'.encode('utf-8'),\n orig_file_details=b'cc18ec8',\n modified_filename='cfg/téstcase.ini'.encode('utf-8'),\n modified_file_details=b'5e70b73',\n old_unix_mode='100644',\n new_unix_mode='100644',\n insert_count=2,\n delete_count=1,\n data=diff)",
"def test_file_bin_read_unicode_as_bin(self):\n FileWriter(self.unicode_path).write_utf8(self.unicode_string)\n bin_data = FileReader(self.unicode_path).read_bin() #read unicode file as binary\n uni_text = bin_data.decode(\"utf-8\") #decode to utf-8\n self.assertEqual(uni_text, self.unicode_string)",
"def test_003(compiler, temp_builds_dir):\n filepath = temp_builds_dir.join(\"foo/bar/home.txt\")\n\n content = \"\"\"Some sample unicode text: フランス Furansu\"\"\"\n\n compiler.write_content(content, filepath.strpath)\n\n # Read file to compare\n with io.open(filepath.strpath, \"r\", encoding=\"utf-8\") as f:\n result = f.read()\n\n assert content == result",
"def test_utf8_cp1252_char_file(self):\n\t\tmain.Main(['input/utf8.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/utf8.csv'))",
"def test_windows_1252_1_file(self):\n\t\tmain.Main(['input/windows_1252_1.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/windows_1252_1.csv'))",
"def test_file_utf8_readwrite(self):\n FileWriter(self.unicode_path).write_utf8(self.unicode_string)\n unicode_text = FileReader(self.unicode_path).read_utf8()\n self.assertEqual(unicode_text, self.unicode_string)",
"def test_file_utf8_readwrite_noraise_unicodeerror(self):\n FileWriter(self.unicode_path).write(self.unicode_string)\n unicode_text = FileReader(self.unicode_path).read()\n self.assertEqual(self.unicode_string, unicode_text)",
"def test_common_non_ascii_positive(tmp_path):\n d = tmp_path\n some_file = d / \"test_01.txt\"\n some_file.write_text(\"\\\\u00fc! , : \\\\u00f6 \\\\u00f6 asdf\")\n assert get_most_common_non_ascii_char(str(some_file)) == \"\\u00f6\"",
"def test_file_utf8_write_noraise_unicodeerror(self):\n FileWriter(self.unicode_path).write(self.unicode_string)\n unicode_text = FileReader(self.unicode_path).read_utf8()\n self.assertEqual(self.unicode_string, unicode_text)",
"def test_load_verify_unicode_cafile(self, tmpfile):\n self._load_verify_cafile(\n tmpfile.decode(getfilesystemencoding()) + NON_ASCII\n )",
"def test_file_utf8_readas_writeas(self):\n FileWriter(self.unicode2_path).write_as(self.unicode_string, \"utf-8\")\n unicode_text = FileReader(self.unicode2_path).read_as(\"utf-8\")\n self.assertEqual(unicode_text, self.unicode_string)",
"def test_windows_1252_file(self):\n\t\tmain.Main(['input/windows_1252.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/windows_1252.csv'))",
"def test_file_gzip_utf8_readwrite_explicit_decode(self):\n if state.py2:\n FileWriter(self.unicode_path).gzip(self.unicode_string)\n gzip_contents = FileReader(self.unicode_path + \".gz\").read_gzip(\"utf-8\") # when read with explicit utf-8 decoding, strings should match\n self.assertEqual(gzip_contents, self.unicode_string)\n elif state.py3:\n FileWriter(self.unicode_path).gzip(bytes(self.unicode_string, 'utf-8'))\n gzip_contents = FileReader(self.unicode_path + \".gz\").read_gzip(\"utf-8\") # when read with explicit utf-8 decoding, strings should match\n self.assertEqual(gzip_contents, self.unicode_string)",
"def test_unicode(self):\n name = unicode(os.path.basename(self.cbf_filename))\n obj = fabio.open(self.cbf_filename)\n obj.write(os.path.join(self.tempdir, name))\n other = fabio.open(os.path.join(self.tempdir, name))\n self.assertEqual(abs(obj.data - other.data).max(), 0, \"data are the same\")\n for key in obj.header:\n if key in[ \"filename\", \"X-Binary-Size-Padding\"]:\n continue\n self.assertTrue(key in other.header, \"Key %s is in header\" % key)\n self.assertEqual(obj.header[key], other.header[key], \"value are the same for key %s [%s|%s]\" % (key, obj.header[key], other.header[key]))",
"def test_filename_transliterate(self):\n string = \"тест.mp3\"\n expected = \"test.mp3\"\n self.assertEqual(transliterate(string), expected)",
"def _test_this_file_encoding(\n fname, test_file,\n unicode_whitelist=unicode_whitelist,\n unicode_strict_whitelist=unicode_strict_whitelist):\n has_unicode = False\n\n is_in_whitelist = False\n is_in_strict_whitelist = False\n for patt in unicode_whitelist:\n if fnmatch.fnmatch(fname, patt):\n is_in_whitelist = True\n break\n for patt in unicode_strict_whitelist:\n if fnmatch.fnmatch(fname, patt):\n is_in_strict_whitelist = True\n is_in_whitelist = True\n break\n\n if is_in_whitelist:\n for idx, line in enumerate(test_file):\n try:\n line.encode(encoding='ascii')\n except (UnicodeEncodeError, UnicodeDecodeError):\n has_unicode = True\n\n if not has_unicode and not is_in_strict_whitelist:\n assert False, message_unicode_D % fname\n\n else:\n for idx, line in enumerate(test_file):\n try:\n line.encode(encoding='ascii')\n except (UnicodeEncodeError, UnicodeDecodeError):\n assert False, message_unicode_B % (fname, idx + 1)",
"def test_file_gzip_utf8_readwrite(self):\n if state.py2:\n FileWriter(self.unicode_path).gzip(self.unicode_string)\n gzip_contents = FileReader(self.unicode_path + \".gz\").read_gzip() # when read without explicit utf-8 decoding, the strings will not match\n self.assertNotEqual(gzip_contents, self.unicode_string)\n elif state.py3:\n FileWriter(self.unicode_path).gzip(bytes(self.unicode_string, 'utf-8'))\n gzip_contents = FileReader(self.unicode_path + \".gz\").read_gzip() # when read without explicit utf-8 decoding, the strings will not match\n self.assertNotEqual(gzip_contents, self.unicode_string)",
"def test_response_content_type_encoding():\n headers = {\"Content-Type\": \"text-plain; charset=latin-1\"}\n content = \"Latin 1: ÿ\".encode(\"latin-1\")\n response = httpcore.Response(200, content=content, headers=headers)\n assert response.text == \"Latin 1: ÿ\"\n assert response.encoding == \"latin-1\"",
"def test_utf8_bytes(self):\n # Python3 doesn't support bytestrings, don't run this test\n if str is unicode:\n return\n input = \"A r\\xc3\\xa9sum\\xc3\\xa9, also spelled resum\\xc3\\xa9 or resume\"\n output = input.split(\" \")\n output[1] = output[1][0:-1]\n for (itmO, itmV) in zip(output, tokenize_en(input)):\n self.assertEqual(itmO, itmV[0])\n self.assertTrue(input[itmV[1]:].startswith(itmO))",
"def test_unicodeBasic(self):\n input = raw_unicode(\n r\"Ik ben ge\\u00EFnteresseerd in de co\\u00F6rdinatie van mijn knie\\u00EBn, maar kan niet \\u00E9\\u00E9n \\u00E0 twee enqu\\u00EAtes vinden die recht doet aan mijn carri\\u00E8re op Cura\\u00E7ao\")\n output = input.split(\" \")\n output[8] = output[8][0:-1]\n for (itmO, itmV) in zip(output, tokenize_en(input)):\n self.assertEqual(itmO, itmV[0])\n self.assertTrue(input[itmV[1]:].startswith(itmO))",
"def test_codeanalysis_latin():\n code = io.open(TEST_FILE_LATIN, encoding=\"iso-8859-1\").read()\n check_results = (check_with_pyflakes(code, TEST_FILE_LATIN)\n + check_with_pep8(code, TEST_FILE_LATIN)\n + find_tasks(code))\n if PY2:\n num_results = 1\n else:\n num_results = 2\n assert len(check_results) == num_results",
"def compare_contents(first, second):\n def strip_lines(lines):\n return [line.strip() + '\\n' for line in lines]\n\n try:\n with open(first, mode='r', encoding='utf_8') as file:\n content1 = strip_lines(file.readlines())\n except IOError:\n return 'failed to load first file: ' + first\n\n try:\n with open(second, mode='r', encoding='utf_8') as file:\n content2 = strip_lines(file.readlines())\n except IOError:\n return 'failed to load second file: ' + second\n\n diff = unified_diff(content1, content2,\n fromfile=first, tofile=second, lineterm='\\n')\n diff_content = ''.join(list(diff))\n if diff_content:\n return 'unexpected file differences\\n{}'.format(diff_content)\n\n return None",
"def test_002(compiler, temp_builds_dir):\n filepath = temp_builds_dir.join(\"compiler_write_002\")\n\n content = \"\"\"Some sample unicode text: フランス Furansu\"\"\"\n\n compiler.write_content(content, filepath.strpath)\n\n # Read file to compare\n with io.open(filepath.strpath, \"r\", encoding=\"utf-8\") as f:\n result = f.read()\n\n assert content == result",
"def test_bad_ascii_data(self):\n\n # Bad file\n asc_filename = os.path.join(TESTDATA, 'bad_ascii_format.asc')\n try:\n read_layer(asc_filename)\n except ReadLayerError, e:\n # Check that error message is reasonable, e.g.\n # File /home/nielso/sandpit/inasafe_data/test/bad_ascii_format.asc\n # exists, but could not be read. Please check if the file can\n # be opened with e.g. qgis or gdalinfo\n\n msg = 'Unexpected error message for corrupt asc file: %s' % e\n assert 'exists' in str(e), msg\n assert 'gdalinfo' in str(e), msg\n assert 'qgis' in str(e), msg\n assert 'Please' in str(e), msg\n\n # No file\n asc_filename = 'nonexisting_ascii_file_234xxxlcrhgqjk.asc'\n try:\n read_layer(asc_filename)\n except ReadLayerError, e:\n # Check that this error message reflects that file did not exist\n msg = 'Unexpected error message for non existing asc file: %s' % e\n assert 'Could not find file' in str(e), msg",
"def test_decode():\n assert TextCleaner().transform([[\"tést\"]])[\"corpus\"][0] == \"test\"",
"def test_encoding_handling(self):\n non_utf8_file = datapath('poincare_cp852.tsv')\n relations = [relation for relation in PoincareRelations(non_utf8_file, encoding='cp852')]\n self.assertEqual(len(relations), 2)\n self.assertEqual(relations[0], (u'tímto', u'budeš'))\n\n utf8_file = datapath('poincare_utf8.tsv')\n relations = [relation for relation in PoincareRelations(utf8_file)]\n self.assertEqual(len(relations), 2)\n self.assertEqual(relations[0], (u'tímto', u'budeš'))",
"def test_read_file():\n assert read_file('test_read_file.txt') == \"\"\"ABCDEFGHIJKLMNOPQRSTUVWXYZ?\nabcdefghijklmnopqrstuvwxyz.\n\"\"\"",
"def test_response_content_type_encoding():\n headers = {\"Content-Type\": \"text-plain; charset=latin-1\"}\n content = \"Latin 1: ÿ\".encode(\"latin-1\")\n response = httpx.Response(\n 200,\n content=content,\n headers=headers,\n )\n assert response.text == \"Latin 1: ÿ\"\n assert response.encoding == \"latin-1\"",
"def test_import_with_bad_utf8(self):\n r = self.client.post('/import/results', data={\n 'results': (BytesIO(b'foo\\x80'), 'foo.txt'),\n })\n self.assertEqual(r.status_code, 400)",
"def test_write_file_to_disk(self):\r\n file_data = u'ß' * 100\r\n write_file_to_disk(self.test_file2, file_data)\r\n self.file_contents_is_equal(self.test_file2, file_data)"
] | [
"0.74459535",
"0.6566833",
"0.65601534",
"0.65575856",
"0.6530268",
"0.6500336",
"0.646092",
"0.6412934",
"0.63905233",
"0.6347647",
"0.628505",
"0.6277089",
"0.62725306",
"0.6255228",
"0.62516564",
"0.62102413",
"0.62016076",
"0.6180023",
"0.6168055",
"0.61065865",
"0.6096852",
"0.6082314",
"0.60738176",
"0.60424596",
"0.6029533",
"0.6016939",
"0.6004021",
"0.5994356",
"0.59862614",
"0.5972485"
] | 0.7496446 | 0 |
Test file.managed state with onchanges | def test_file_managed_requisites(modules, tmp_path, state_tree, requisite):
file1 = tmp_path / "file1"
file2 = tmp_path / "file2"
sls_contents = """
one:
file.managed:
- name: {file1}
- source: salt://testfile
# This should run because there were changes
two:
test.succeed_without_changes:
- {requisite}:
- file: one
# Run the same state as "one" again, this should not cause changes
three:
file.managed:
- name: {file2}
- source: salt://testfile
# This should not run because there should be no changes
four:
test.succeed_without_changes:
- {requisite}:
- file: three
""".format(
file1=file1, file2=file2, requisite=requisite
)
testfile_contents = "The test file contents!\n"
# Lay down the file used in the below SLS to ensure that when it is
# run, there are no changes.
file2.write_text(testfile_contents)
with pytest.helpers.temp_file(
"onchanges-prereq.sls", sls_contents, state_tree
), pytest.helpers.temp_file("testfile", testfile_contents, state_tree):
ret = modules.state.apply("onchanges-prereq", test=True)
# The file states should both exit with None
assert ret["one"].result is None
assert ret["three"].result is True
# The first file state should have changes, since a new file was
# created. The other one should not, since we already created that file
# before applying the SLS file.
assert ret["one"].changes
assert not ret["three"].changes
# The state watching 'one' should have been run due to changes
assert ret["two"].comment == "Success!"
# The state watching 'three' should not have been run
if requisite == "onchanges":
expected_comment = (
"State was not run because none of the onchanges reqs changed"
)
else:
expected_comment = "No changes detected"
assert ret["four"].comment == expected_comment | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_inotify(self):\n self.fail(\"write a test\")",
"def testDirtyRefresh(self):\n \n pass",
"def tests_ti_file_update(self):\n super().indicator_update()",
"def on_file_changed(self, path):\n\t\tpass",
"def test_managed_contents(file, tmp_path, name, contents):\n name = tmp_path / \"managed-{}\".format(name)\n ret = file.managed(name=str(name), contents=contents)\n assert ret.result is True\n assert \"diff\" in ret.changes\n assert name.exists()",
"def hook_file_opened(self):",
"def before_update(mapper, conn, target):\n\n assert bool(target.ref), \"File.ref can't be null (before_update)\"",
"def test_change_trigger(self):\n self._test_change_trigger(False)",
"def test_update_state(self):\n pass",
"def test_get_open_files():\n\n before = get_open_files()\n # Nothing changes...\n after = get_open_files()\n\n assert before\n assert before == after",
"def test_ensure_state_change_if_needed(self, setState, commit):\n advisory = errata.Advisory(errata_id=123, errata_state='QE')\n advisory.ensure_state('NEW_FILES')\n setState.assert_called()",
"def svn_fs_contents_changed(*args):\r\n return _fs.svn_fs_contents_changed(*args)",
"def check_line_edits_and_refresh_filestate(self):\r\n\t\t# line edit changes (other places where filestate is updated: browse button clicks, ok click)\r\n\t\tif self.source_img_entry.isModified():\r\n\t\t\tself.filestate.set_source_img_filename(self.source_img_entry.text().replace(\"\\\\\", \"/\"))\r\n\t\tif self.existing_case and self.source_db_entry.isModified():\r\n\t\t\tself.filestate.set_source_db_filename(self.source_db_entry.text().replace(\"\\\\\", \"/\"))\r\n\t\tif self.sink_dir_entry.isModified():\r\n\t\t\tself.filestate.set_sink_dir_name(self.sink_dir_entry.text().replace(\"\\\\\", \"/\"))",
"def test_change_trigger_carefully(self):\n self._test_change_trigger(True)",
"def process_IN_MODIFY(self, event):",
"def test_changedFile(self):\n self.write(\"service1.json\", [{\"host\": \"host1\", \"port\": 123},\n {\"host\": \"host2\", \"port\": 124}])\n self.pump()\n self.write(\"service1.json\", [{\"host\": \"host3\", \"port\": 125},\n {\"host\": \"host4\", \"port\": 126}])\n self.pump()\n self.assertNodesEqual(\n knownNodes(self.disco, \"service1\", \"staging\"),\n [self.node(\"service1\", \"host3\", 125),\n self.node(\"service1\", \"host4\", 126)])",
"def change():",
"def test_update_state1(self):\n pass",
"def _open_changed ( self ):\n file_name = open_file( extensions = FileInfo(), id = demo_id )\n if file_name != '':\n self.file_name = file_name",
"def test_update_state2(self):\n pass",
"def test_record_update_file(appctx, db, record_with_file_processed, obj_name, content):\n record = CernSearchRecord.get_record(record_with_file_processed.id)\n initial_file_name = \"hello.txt\"\n initial_file = record.files[initial_file_name].obj # type: ObjectVersion\n initial_file_content = record.files_content[initial_file_name].obj # type: ObjectVersion\n\n assert 1 == len(record.files)\n assert 1 == len(record.files_content)\n assert initial_file.file.readable is False\n assert initial_file.deleted is False\n assert initial_file_content.file.readable is True\n\n record.files[obj_name] = BytesIO(content)\n db.session.commit()\n\n # mimic file uploaded flow\n file_uploaded.send(record.files[obj_name].obj)\n\n record = CernSearchRecord.get_record(record.id)\n\n assert record[\"_bucket\"] == record.bucket_id\n assert record[\"_bucket_content\"] == record.bucket_content_id\n\n assert 1 == len(record.files)\n assert 1 == len(record.files_content)\n assert record.files[obj_name].obj.file.readable is False\n assert initial_file_content.file.readable is False\n\n # different file upload creates a delete marker\n if initial_file_name != obj_name:\n with raises(KeyError):\n record.files[initial_file_name]\n with raises(KeyError):\n record.files_content[initial_file_name]\n\n file_1 = record.files_content[obj_name]\n assert obj_name == file_1[\"key\"]\n\n storage = file_1.obj.file.storage() # type: FileStorage\n fp = storage.open(mode=READ_MODE_BINARY)\n\n try:\n assert content.decode() in json.load(fp)[\"content\"]\n finally:\n fp.close()",
"def run_file_change(op_list_file):\n if os.path.exists(\"flag_change_file.txt\"):\n print(\n \"-----maybe op_file has changed, so don't need to change again------\"\n )\n else:\n run_multi_thread(op_list_file)",
"def touched_files(self, parent):",
"def test_change_data(self):\n with pike.Graph('g') as graph:\n pike.glob('.', '*') | pike.ChangeListenerNode()\n self.make_files(foo='a', bar='b')\n ret = graph.run()\n self.assert_files_equal(ret['default'], ['foo', 'bar'])\n self.make_files(foo='asdf', bar='b')\n ret = graph.run()\n self.assert_files_equal(ret['default'], ['foo'])",
"def test_provider_system_hook_file(change_dir, clean_files):\n tackle(no_input=True)\n assert 'thing.yaml' in os.listdir()\n assert 'stuff' in os.listdir()\n # If the file has been moved properly there should be only one file\n assert len(os.listdir('stuff')) == 3",
"def test_update_sqlite_file(self):\n self.sqlite_file_obj = open(self.sqlite_file, 'r')\n self._create_composite_resource(self.sqlite_file_obj)\n res_file = self.composite_resource.files.first()\n # set the sqlite file to TimeSeries file type\n TimeSeriesLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)\n res_file = self.composite_resource.files.first()\n logical_file = res_file.logical_file\n logical_file.metadata.abstract = \"new abstract for time series file type\"\n logical_file.metadata.is_dirty = True\n logical_file.metadata.save()\n\n url_params = {'file_type_id': logical_file.id}\n url = reverse('update_sqlite_file', kwargs=url_params)\n request = self.factory.post(url, data={})\n request.user = self.user\n # this is the view function we are testing\n response = update_sqlite_file(request, file_type_id=logical_file.id)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n response_dict = json.loads(response.content)\n self.assertEqual('success', response_dict['status'])\n self.composite_resource.delete()",
"def test_update_state3(self):\n pass",
"def svn_fs_props_changed(*args):\r\n return _fs.svn_fs_props_changed(*args)",
"def assertState(self, files):\n\n for path, expected in files.iteritems():\n fullpath = os.path.join(self._dir, path)\n self.assertFile(fullpath, path)\n with open(fullpath, 'r') as file:\n actual = file.read() \n self.assertEqual(actual, expected)",
"def test_update_state4(self):\n pass"
] | [
"0.69554883",
"0.64876914",
"0.63309187",
"0.6319913",
"0.6254663",
"0.62185377",
"0.61961335",
"0.6126965",
"0.61166674",
"0.60585064",
"0.60108006",
"0.6001106",
"0.59958",
"0.5961527",
"0.59245294",
"0.5922512",
"0.58627933",
"0.5855503",
"0.58362347",
"0.5827941",
"0.5825151",
"0.5813928",
"0.57791156",
"0.5775393",
"0.5756012",
"0.5751265",
"0.573679",
"0.5735875",
"0.57352024",
"0.57087797"
] | 0.67222685 | 1 |
This test ensures that after a binary file is created, salt can confirm that the file is in the correct state. | def test_binary_contents_twice(file, tmp_path):
name = tmp_path / "1px.gif"
# First run state ensures file is created
ret = file.managed(name=str(name), contents=BINARY_FILE)
assert ret.result is True
# Second run of state ensures file is in correct state
ret = file.managed(name=str(name), contents=BINARY_FILE)
assert ret.result is True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_upload_binary(self):\n uploadFile = os.path.join(testdatadir, \"upload.data.gz\")\n r = gracedb.writeFile(eventId, uploadFile)\n self.assertEqual(r.status, 201) # CREATED",
"def test_binary_contents(file, tmp_path):\n name = tmp_path / \"1px.gif\"\n ret = file.managed(name=str(name), contents=BINARY_FILE)\n assert ret.result is True",
"def testRetrieveStateFile(self):\n # Test call was succesful\n self.assertEqual(resource_manager.RetrieveResourceState(), {})\n\n # Test file was created\n self.assertTrue(os.path.exists(config.RESOURCE_FILE))\n\n # Test bad resource file\n with open(config.RESOURCE_FILE, 'w') as fh:\n fh.write(\"blah\")\n fh.close()\n self.assertRaises(TurbiniaException, resource_manager.RetrieveResourceState)\n os.remove(config.RESOURCE_FILE)",
"def test_file_bin_readwrite(self):\n FileWriter(self.binary_path).write_bin(self.binary_string)\n bin_data = FileReader(self.binary_path).read_bin()\n self.assertEqual(bin_data, self.binary_string)",
"def test_make_file():\n with tempfile.TemporaryDirectory() as STATUS_DIR:\n Status.make_job_file(STATUS_DIR, 'generation', 'test1', TEST_1_ATTRS_1)\n status = Status.retrieve_job_status(STATUS_DIR, 'generation', 'test1')\n msg = 'Failed, status is \"{}\"'.format(status)\n assert status == 'R', msg",
"def test_create(self):\n path = self.tmp_py()\n # Creating a file that doesn't exist should succeed\n self.cls.create(path)\n self.assertTrue(os.path.exists(path))\n # Created file should be a valid script (If not, raises an error)\n self.cls.verify(path)\n # Can't create it again: it already exists\n self.assertRaises(exceptions.PathFoundError,self.cls.create,path)",
"def test_success_from_bin():\n createFromBin(\"tests/vbaProject.bin\", \"src/data\", \"success_bin.xlam\")\n # Assert that xlam file is created\n assert exists(\"success_bin.xlam\")\n #assert that bin file within success_bin.xlam matches tests/vbaProject.bin\n extractBinFromZip(\"success_bin.xlam\")\n md5hasher = FileHash('md5')\n assert md5hasher.hash_file(\"tests/vbaProject.bin\") == md5hasher.hash_file(\"xl/vbaProject.bin\")\n\n createFromZip(\"success_bin.xlam\", \"src/data\", \"success_xlam.xlam\")\n assert exists(\"success_xlam.xlam\")\n #assert that bin file within success_xlam.xlam matches bin file within success_bin.xlam\n extractBinFromZip(\"success_xlam.xlam\")\n assert md5hasher.hash_file(\"tests/vbaProject.bin\") == md5hasher.hash_file(\"xl/vbaProject.bin\")",
"def tests_ti_file_create_2(self):\n metadata = {\n 'size': 84504,\n 'sha256': '33af46377c0e52ca19aea233b3afb64505b32fac2231ec7a8a6795812fae0d10',\n 'md5': 'a9ba66af82897dadb82e3e89c70ae7ac',\n 'sha1': '19d08af69fe15af22ba81f045e31230150d4bdad',\n }\n file_indicator = self.ti.file(**metadata)\n file_indicator.delete()\n\n assert file_indicator.data['sha1'] == metadata['sha1']\n response = file_indicator.create()\n assert response.ok\n unique_id = ':'.join([metadata[x] for x in ['sha256', 'sha1', 'md5']])\n file_indicator = self.ti.file(unique_id=unique_id, **metadata)\n\n assert file_indicator.data['sha256'] == metadata['sha256']\n assert file_indicator.data['sha1'] == metadata['sha1']\n\n response = file_indicator.update()\n assert response.ok\n file_indicator.delete()",
"def __create_test_file(self):\n self.test_file = os.path.join(os.path.dirname(self.server_path), \"data\")\n with open(self.test_file, \"ab+\") as f:\n n_blocks = int(self.args.size) // self.max_block_size\n for i in range(n_blocks):\n f.write(bytearray(os.urandom(self.max_block_size)))\n remaining = int(self.args.size) % self.max_block_size\n if remaining > 0:\n f.write(bytearray(os.urandom(remaining)))\n self.assertEqual(int(self.args.size), os.path.getsize(self.test_file))",
"def test_file(tmpdir):\n file_path = tmpdir / 'test.txt'\n file_path = file_path.write_binary(b'This is some test data!')\n return file_path",
"def test_subversion_binary_file(host):\n assert host.file(PACKAGE_BINARY).is_file",
"def test_fs_instance(self):\n b1 = BaseModel()\n models.storage.save()\n self.assertEqual(os.path.exists('file.json'), True)",
"def test_empty_file(self):\n\n temp = tempfile.NamedTemporaryFile()\n temp.flush()\n self.assertRaises(MalformedFileError, NBTFile, temp.name)",
"def test_file_creation(data, logging_file_name):\n create_instance(data, logging_file_name)\n log_file_name = create_file_path(logging_file_name)\n print(log_file_name)\n if data is None or len(data) == 0:\n assert not os.path.exists(log_file_name)\n else:\n assert os.path.exists(log_file_name)",
"def test_hash_data(self):\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n self.assertEqual(expected, actual)\n\n File(self.file).write(\"\\n\".join(self.data_to_write))\n expected = True\n actual = PyFunceble.path.isfile(self.file)\n\n self.assertEqual(expected, actual)\n\n for algo, result in self.expected_hashed.items():\n self.assertEqual(\n result,\n Hash(self.file).hash_data(algo),\n msg=\"%s did not passed the test\" % repr(algo),\n )\n\n File(self.file).delete()\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n\n self.assertEqual(expected, actual)",
"def testWriteBinaryData(self):\n file_writer = writers.FileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer.WriteBinaryData(b'Binary data')\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n expected_output_data = b'Binary data'\n self.assertEqual(output_data, expected_output_data)",
"def test_restore_backup():",
"def test_write(self):\n with TemporaryDirectoryChanger():\n nhflux.NhfluxStream.writeBinary(self.nhf, \"NHFLUX2\")\n with open(SIMPLE_HEXZ_NHFLUX, \"rb\") as f1, open(\"NHFLUX2\", \"rb\") as f2:\n expectedData = f1.read()\n actualData = f2.read()\n for expected, actual in zip(expectedData, actualData):\n self.assertEqual(expected, actual)",
"def _test(self, file_name):\n data = bob.io.base.load(file_name)\n assert (_data == data).all()",
"def test_save_to_file(self):\n self.assertFalse(os.path.exists(\"file.json\"))",
"def test_06_verify_tar01(self, mock_isfile, mock_call, mock_msg):\n mock_msg.level = 0\n mock_isfile.return_value = False\n mock_call.return_value = 0\n status = udocker.FileUtil(\"tarball.tar\").verify_tar()\n self.assertFalse(status)",
"def test_08_verify_tar03(self, mock_isfile, mock_call, mock_msg):\n mock_msg.level = 0\n mock_isfile.return_value = True\n mock_call.return_value = 1\n status = udocker.FileUtil(\"tarball.tar\").verify_tar()\n self.assertFalse(status)",
"def test_create_SHA_256_hash_of_file_matches_cosmic_build_tool(\n file_name, expected_hash\n):\n file_path = str(Path(__file__).parent.parent / \"steps/component1\" / file_name)\n hash = utils.create_SHA_256_hash_of_file(file_path)\n\n assert hash == expected_hash",
"def test_self_write(self):\n self.assertFalse(os.path.exists(self.f1))\n self.assertFalse(os.path.exists(self.f2))\n self.sync.pickle_write()\n self.assertTrue(os.path.exists(self.f1))\n self.assertTrue(os.path.exists(self.f2))",
"def test_changing_defaults_doesnt_autocommit_to_file():\n state = StateFile()\n state.coolkey = StateAttr(state_file=state,\n key_name=\"cool_key\",\n default=3)\n with pytest.raises(StateNotAcquiredError):\n state.coolkey.read()\n\n with state:\n assert state.coolkey.read() == 3\n\n # Now create a new statefile with a DIFFERENT default, and make sure that\n # didn't get written to the file\n state = StateFile()\n state.coolkey = StateAttr(state_file=state,\n key_name=\"cool_key\",\n default=420)\n with state:\n assert state.coolkey.read() == 3",
"def test_record_create_files(db, location):\n record = CernSearchRecord.create({\"title\": \"test\"})\n\n assert 0 == len(record.files)\n assert 0 == len(record.files_content)\n\n file_content = b\"Hello world!\"\n record.files[\"hello.txt\"] = BytesIO(file_content)\n db.session.commit()\n\n record = CernSearchRecord.get_record(record.id)\n\n assert record[\"_bucket\"] == record.bucket_id\n assert record[\"_files\"]\n\n file_1 = record.files[\"hello.txt\"]\n assert \"hello.txt\" == file_1[\"key\"]\n assert 1 == len(record.files)\n assert 1 == len(record[\"_files\"])\n assert 0 == len(record.files_content)\n\n storage = file_1.obj.file.storage() # type: FileStorage\n fp = storage.open(mode=READ_MODE_BINARY)\n\n try:\n assert file_content == fp.read()\n finally:\n fp.close()",
"def test_write(self):\n with TemporaryDirectoryChanger():\n nhflux.NhfluxStreamVariant.writeBinary(self.nhf, \"NHFLUX2\")\n with open(SIMPLE_HEXZ_NHFLUX_VARIANT, \"rb\") as f1, open(\n \"NHFLUX2\", \"rb\"\n ) as f2:\n expectedData = f1.read()\n actualData = f2.read()\n for expected, actual in zip(expectedData, actualData):\n self.assertEqual(expected, actual)",
"def test_file_collection():\n with tempfile.TemporaryDirectory() as STATUS_DIR:\n Status.make_job_file(STATUS_DIR, 'generation', 'test1', TEST_1_ATTRS_1)\n Status.make_job_file(STATUS_DIR, 'generation', 'test2', TEST_2_ATTRS_1)\n\n Status.update(STATUS_DIR)\n with open(os.path.join(STATUS_DIR, 'rev_status.json'), 'r') as f:\n data = json.load(f)\n assert str(TEST_1_ATTRS_1) in str(data)\n assert str(TEST_2_ATTRS_1) in str(data)",
"def test_07_verify_tar02(self, mock_isfile, mock_call, mock_msg):\n mock_msg.level = 0\n mock_isfile.return_value = True\n mock_call.return_value = 0\n status = udocker.FileUtil(\"tarball.tar\").verify_tar()\n self.assertTrue(status)",
"def test_get_file_binary_content(self):\n content = image_helper.get_file_binary_content(self.subject)\n\n self.assertGreater(len(content), 0)\n\n with open(self.subject, \"rb\") as f:\n original_content = f.read()\n\n self.assertEqual(content, original_content)"
] | [
"0.7036948",
"0.6847068",
"0.6809927",
"0.66636294",
"0.6653442",
"0.66411066",
"0.6630125",
"0.64702857",
"0.63453937",
"0.6294835",
"0.6271192",
"0.62590516",
"0.62253207",
"0.62044054",
"0.6157578",
"0.6143613",
"0.61369556",
"0.6123887",
"0.6103147",
"0.60813063",
"0.6081261",
"0.60760576",
"0.6072544",
"0.60710937",
"0.6057193",
"0.60559446",
"0.60458297",
"0.60448354",
"0.6035752",
"0.6026188"
] | 0.7719376 | 0 |
Test to check file user/group after setting setuid or setgid. Because Python os.chown() does reset the setuid/setgid to 0. | def test_owner_after_setuid(file, modules, tmp_path, state_file_account):
# Desired configuration.
desired_file = tmp_path / "file_with_setuid"
mode = "4750"
# Run the state.
ret = file.managed(
name=str(desired_file),
user=state_file_account.username,
group=state_file_account.group.name,
mode=mode,
)
assert ret.result is True
# Check result.
user_check = modules.file.get_user(str(desired_file))
assert user_check == state_file_account.username
group_check = modules.file.get_group(str(desired_file))
assert group_check == state_file_account.group.name
mode_check = modules.file.get_mode(str(desired_file))
assert salt.utils.files.normalize_mode(mode_check) == mode | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_file_owner(host, fqpath, user):\n command = \"chown %s %s\" % (user, fqpath)\n rcode, _, rerr = g.run(host, command)\n\n if rcode == 0:\n return True\n\n g.log.error('chown failed: %s' % rerr)\n return False",
"def testChAttrs(self):\n def _check(results):\n self.flushLoggedErrors()\n self.assertTrue(results[0].startswith(b'-rw-r--r--'))\n self.assertEqual(results[1], b'')\n self.assertTrue(results[2].startswith(b'----------'), results[2])\n self.assertEqual(results[3], b'')\n\n d = self.runScript('ls -l testfile1', 'chmod 0 testfile1',\n 'ls -l testfile1', 'chmod 644 testfile1')\n return d.addCallback(_check)\n # XXX test chgrp/own",
"def test_component_chown_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('component chown component2 changed_owner')\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_6_1_3_etc_group_mode(host):\n assert host.file(ETC_PASSWD_DASH).mode == 0o644",
"def match_owner_group(dest_path, source_path):\n source_stat = os.stat(source_path)\n return os.chown(dest_path, source_stat[stat.ST_UID], source_stat[stat.ST_GID])",
"def test_lock_checks_user(tmpdir):\n uid = getuid()\n if uid not in group_ids():\n pytest.skip(\"user has no group with gid == uid\")\n\n # self-owned, own group\n tmpdir.chown(uid, uid)\n\n # safe\n path = str(tmpdir)\n tmpdir.chmod(0o744)\n lk.check_lock_safety(path)\n\n # safe\n tmpdir.chmod(0o774)\n lk.check_lock_safety(path)\n\n # unsafe\n tmpdir.chmod(0o777)\n with pytest.raises(spack.error.SpackError):\n lk.check_lock_safety(path)\n\n # safe\n tmpdir.chmod(0o474)\n lk.check_lock_safety(path)\n\n # safe\n tmpdir.chmod(0o477)\n lk.check_lock_safety(path)",
"def test_6_1_4_etc_group_mode(host):\n assert host.file(ETC_GROUP).mode == 0o644",
"def _setup_permissions(self, chown, chmod):\n if chown is not None:\n if isinstance(chown, str):\n user, group = chown, None\n\n else:\n try:\n # Try to extract tuple.\n user, group = chown\n\n except ValueError:\n # If length of iterable is not 2, then allow 1.\n assert len(chown) == 1, 'chown must be user or tuple'\n user, group = chown[0], None\n\n except TypeError:\n # If not iterable, use given value as user.\n user, group = chown, None\n\n # Lookup user id.\n if isinstance(user, str):\n user_info = pwd.getpwnam(user)\n user = user_info.pw_uid\n\n # Lookup group id, or use -1 (do not change group)\n if isinstance(group, str):\n group = grp.getgrnam(group).pw_gid\n\n elif group is None:\n group = -1\n\n # Return tuple usable by os.chown().\n chown = (user, group)\n\n # Ensure chmod is numeric if given.\n if chmod is not None:\n assert isinstance(chmod, numbers.Number), 'chmod must be a number'\n\n return chown, chmod",
"def pid_permissions():\n config = Config()\n try:\n user = pwd.getpwnam(config.user)\n group = grp.getgrnam(config.group)\n os.chown(config.pidfile, user.pw_uid, group.gr_gid)\n except (KeyError, PermissionError):\n logger.error(\"Unable to change pidfile ownership permissions.\")\n raise SystemExit(os.EX_USAGE)",
"def test_specifiedPermissions(self):\n log1 = logfile.LogFile(self.name, self.dir, defaultMode=0o066)\n self.addCleanup(log1.close)\n mode = stat.S_IMODE(os.stat(self.path)[stat.ST_MODE])\n if runtime.platform.isWindows():\n # The only thing we can get here is global read-only\n self.assertEqual(mode, 0o444)\n else:\n self.assertEqual(mode, 0o066)",
"def check_sane(self):\n st = os.stat(self.path)\n if st.st_uid != os.getuid():\n raise Exception('Auth dir %s not owned by user %d.' % (\n self.path, os.getuid()))\n # Mode 16832 is equal to (stat.S_IFDIR | stat.S_IRWXU)\n # In other words, a directory with mode bits rwx------\n if st.st_mode != 16832:\n raise Exception('Auth dir %s not a dir or wrong permissions.' % self.path)",
"def test_6_1_3_etc_group_user(host):\n assert host.file(ETC_PASSWD_DASH).user == 'root'",
"def test_6_1_4_etc_group_user(host):\n assert host.file(ETC_GROUP).user == 'root'",
"def test_lock_checks_group(tmpdir):\n uid = getuid()\n gid = next((g for g in group_ids() if g != uid), None)\n if not gid:\n pytest.skip(\"user has no group with gid != uid\")\n\n # self-owned, another group\n tmpdir.chown(uid, gid)\n\n # safe\n path = str(tmpdir)\n tmpdir.chmod(0o744)\n lk.check_lock_safety(path)\n\n # unsafe\n tmpdir.chmod(0o774)\n with pytest.raises(spack.error.SpackError):\n lk.check_lock_safety(path)\n\n # unsafe\n tmpdir.chmod(0o777)\n with pytest.raises(spack.error.SpackError):\n lk.check_lock_safety(path)\n\n # safe\n tmpdir.chmod(0o474)\n lk.check_lock_safety(path)\n\n # safe\n tmpdir.chmod(0o477)\n lk.check_lock_safety(path)",
"def check_writable ( self,\n fspath, mkdir_chown=False, mkdir_chmod=False, mkdir_p=True\n ):\n success = False\n\n ERRNOS_IGNORE = { errno.EACCES, }\n\n try:\n if self.do_touch ( fspath ):\n success = True\n\n except IOError as ioerr:\n if ioerr.errno == errno.EPERM:\n pass\n elif ioerr.errno == errno.ENOENT:\n try:\n if self.dodir (\n os.path.dirname ( fspath ),\n chown=mkdir_chown, chmod=mkdir_chmod, mkdir_p=mkdir_p\n ) and self.do_touch ( fspath ):\n success = True\n\n except ( OSError, IOError ) as err:\n if err.errno == errno.EPERM:\n pass\n elif err.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = err.__class__.__name__,\n code = err.errno,\n code_name = errno.errorcode [err.errno],\n )\n )\n else:\n raise\n # -- end <try again>\n elif ioerr.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = ioerr.__class__.__name__,\n code = ioerr.errno,\n code_name = errno.errorcode [ioerr.errno],\n )\n )\n else:\n raise\n return success",
"def set_file_owner_perm(path, permission, user, group):\n uid = pwd.getpwnam(user).pw_uid\n gid = grp.getgrnam(group).gr_gid\n\n current_perm = get_permissions(path)\n try:\n logger.debug('Current permission: {0}, changing to {1}'.format(current_perm, oct(permission)))\n os.chmod(path, permission)\n os.chown(path, uid, gid)\n except Exception as e:\n logger.warning('Unable to change permissions on {0}: {1}'.format(path, e))",
"def chown_chmod ( fspath, uid=None, gid=None, mode=None, pretend=False ):\n return ChownChmod ( uid, gid, mode, pretend ).chown_chmod ( fspath )",
"def test_specifiedPermissions(self):\n log1 = logfile.LogFile(self.name, self.dir, defaultMode=0066)\n mode = stat.S_IMODE(os.stat(self.path)[stat.ST_MODE])\n if runtime.platform.isWindows():\n # The only thing we can get here is global read-only\n self.assertEquals(mode, 0444)\n else:\n self.assertEquals(mode, 0066)",
"def test_6_1_6_etc_shadow_group(host):\n assert host.file(ETC_SHADOW).group == 'root'",
"def test_6_1_2_etc_passwd_mode(host):\n assert host.file(ETC_PASSWD).mode == 0o644",
"def test_6_1_6_etc_shadow_user(host):\n assert host.file(ETC_SHADOW).user == 'root'",
"def chmod_chown ( self, fspath ):\n if os.path.isdir ( fspath ):\n return (\n self.chmod_dir ( fspath ), self.chown_dir ( fspath )\n )\n else:\n return (\n self.chmod_file ( fspath ), self.chown_file ( fspath )\n )",
"def test_6_1_8_etc_gshadow_user(host):\n assert host.file(ETC_GSHADOW).user == 'root'",
"def chown_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )",
"def chown ( self, fspath ):\n if os.path.isdir ( fspath ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )",
"def chown(self, user, group, rec=0):\n uid = getuserid(user)\n gid = getgroupid(group)\n if rec:\n for x in self.visit(rec=lambda x: x.check(link=0)):\n if x.check(link=0):\n error.checked_call(os.chown, str(x), uid, gid)\n error.checked_call(os.chown, str(self), uid, gid)",
"def chgrp_file(filename, group, sudo=True):\n LOG.info(\"Changing file permissions for {}\".format(filename))\n cmd = \"chgrp {} {}\".format(group, filename)\n _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False)",
"def chown_file ( self, fspath ):\n return",
"def test_permissions(self):\n self.assertEqual(dir_perm, 0o2750)\n self.assertEqual(file_perm, 0o0440)",
"def assert_same_owner(path):\n try:\n assert find_owner(path) == getuser(), f\"{path} must be owned by {getuser()}\"\n except AssertionError as error:\n raise click.UsageError(str(error))\n except FileNotFoundError:\n pass"
] | [
"0.72370446",
"0.7138705",
"0.70169157",
"0.67163527",
"0.6711687",
"0.66944087",
"0.6687051",
"0.6519431",
"0.6500234",
"0.64717335",
"0.64700246",
"0.64388907",
"0.643171",
"0.63576084",
"0.63489926",
"0.6337333",
"0.6331592",
"0.6297125",
"0.6230173",
"0.6204743",
"0.6187353",
"0.6133163",
"0.61240196",
"0.61221457",
"0.6104958",
"0.60876614",
"0.60731286",
"0.60694253",
"0.6065526",
"0.6047356"
] | 0.7603757 | 0 |
Test a remote file with no hash | def test_file_managed_http_source_no_hash(file, tmp_path, remote_grail_scene33):
name = str(tmp_path / "testfile")
ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=False)
# This should fail because no hash was provided
assert ret.result is False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_file_managed_http_source_skip_verify(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=True)\n assert ret.result is True",
"def testRemote(self):\n try:\n remoteLocator = self.__httpsFileUrl\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n ok = self.__fileU.exists(remoteLocator)\n self.assertTrue(ok)\n size = self.__fileU.size(remoteLocator)\n self.assertGreaterEqual(size, 1000)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def test_raw_file_url_error(self):\n repository = self.remote_repository\n\n self.spy_on(repository._get_file_uncached,\n op=kgb.SpyOpReturn(b'first'))\n\n self.assertEqual(repository.get_file('PATH', 'd7e96b3'),\n b'first')\n\n # Ensure output of fake result matches.\n repository._get_file_uncached.unspy()\n self.spy_on(repository._get_file_uncached,\n op=kgb.SpyOpReturn(b'second'))\n\n # Grab from cache when no changes and change fake result to confirm\n # it is not called.\n self.assertEqual(repository.get_file('PATH', 'd7e96b3'),\n b'first')\n\n # When raw_file_url changed, do not grab from cache and ensure output\n # equals second fake value.\n repository.raw_file_url = \\\n 'http://github.com/api/v2/yaml/blob/show/reviewboard/<revision>'\n\n self.assertEqual(repository.get_file('PATH', 'd7e96b3'),\n b'second')",
"def check_remote_file_exists(url, login=None, password=None):\r\n credentials = None\r\n if login and password:\r\n credentials = login, password\r\n\r\n response = requests.get(url,\r\n stream=True,\r\n verify=False,\r\n auth=credentials)\r\n if response.status_code >= 400 or response.status_code < 200:\r\n raise Exception('Returned wrong status code: {}'.format(response.status_code))\r\n\r\n response.close()",
"def test_get_file_exists_caching_with_raw_url(self):\n repository = self.remote_repository\n\n self.spy_on(repository._get_file_exists_uncached,\n op=kgb.SpyOpReturn(True))\n\n # Use spy to put key into cache\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Remove spy to ensure key is still in cache without needing spy\n repository._get_file_exists_uncached.unspy()\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Does not exist when raw_file_url changed because it is not cached.\n repository.raw_file_url = \\\n 'http://github.com/api/v2/yaml/blob/show/reviewboard/<revision>'\n\n self.assertFalse(repository.get_file_exists('PATH', 'd7e96b3'))",
"def test_get_file_with_remote_and_short_SHA1_error(self):\n with self.assertRaises(ShortSHA1Error):\n self.remote_tool.get_file('README', 'd7e96b3')",
"def test_file_managed_keep_source_false_http(\n file, tmp_path, remote_grail_scene33, modules\n):\n name = str(tmp_path / \"testfile\")\n # Run the state\n ret = file.managed(\n name=name,\n source=remote_grail_scene33.url,\n source_hash=remote_grail_scene33.hash,\n keep_source=False,\n )\n assert ret.result is True\n\n # Now make sure that the file is not cached\n ret = modules.cp.is_cached(remote_grail_scene33.url)\n assert not ret, \"File is still cached at {}\".format(ret)",
"def test_nonexistant_file(self):\n # Send the request\n request = webhttp.message.Request()\n request.method = \"GET\"\n request.uri = \"/test/nofilewiththisnameright.html\"\n request.set_header(\"Host\", \"localhost:{}\".format(portnr))\n request.set_header(\"Connection\", \"close\")\n self.client_socket.send(str(request).encode())\n\n # Test response\n message = self.client_socket.recv(1024)\n response = self.parser.parse_response(message)\n self.assertEqual(response.code, 404)\n self.assertEqual(response.body, \"404 \" + webhttp.consts.REASON_DICT[404])",
"def test_file_managed_http_source(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(\n name=name,\n source=remote_grail_scene33.url,\n source_hash=remote_grail_scene33.hash,\n skip_verify=False,\n )\n assert ret.result is True",
"def test_download_file_no_sha(token):\n\n # github => repo => release => asset_list => asset => url => download\n\n g_h = github.Github(token, per_page=100)\n repo = g_h.get_repo(TEST_SLUG, lazy=False)\n release = repo.get_release(TEST_TAG)\n asset_list = release.get_assets()\n sha_filename = Template(Arguments.HASH_FILE).safe_substitute({\n 'platform': platform.system().lower()\n })\n\n pass_test = True\n\n for check_asset in asset_list:\n # look through list of assets for uploaded file and sha file\n\n if check_asset.name == sha_filename:\n\n pass_test = False\n\n assert pass_test",
"def test_get_path_not_exist(self):\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n self.assertEqual(expected, actual)\n\n expected = None\n actual = Hash(self.file).get()\n self.assertEqual(expected, actual)",
"def test_unpack_http_url_bad_downloaded_checksum(mock_unpack_file):\n base_url = 'http://www.example.com/somepackage.tgz'\n contents = b'downloaded'\n download_hash = hashlib.new('sha1', contents)\n link = Link(base_url + '#sha1=' + download_hash.hexdigest())\n\n session = Mock()\n session.get = Mock()\n response = session.get.return_value = MockResponse(contents)\n response.headers = {'content-type': 'application/x-tar'}\n response.url = base_url\n\n download_dir = mkdtemp()\n try:\n downloaded_file = os.path.join(download_dir, 'somepackage.tgz')\n create_file(downloaded_file, 'some contents')\n\n unpack_http_url(\n link,\n 'location',\n download_dir=download_dir,\n session=session,\n hashes=Hashes({'sha1': [download_hash.hexdigest()]})\n )\n\n # despite existence of downloaded file with bad hash, downloaded again\n session.get.assert_called_once_with(\n 'http://www.example.com/somepackage.tgz',\n headers={\"Accept-Encoding\": \"identity\"},\n stream=True,\n )\n # cached file is replaced with newly downloaded file\n with open(downloaded_file) as fh:\n assert fh.read() == 'downloaded'\n\n finally:\n rmtree(download_dir)",
"def check_ssh_file(filename, ssh):\n stdin, stdout, stderr = ssh.exec_command(\"[ ! -f %s ] && echo \\\"0\\\"\" % filename)\n output = stdout.readlines()\n if len(output) == 0:\n return True\n else:\n return False",
"def file_exist(file_url):\n try:\n response = requests.head(file_url)\n if 200 <= response.status_code < 300:\n return True\n return False\n except ConnectionError:\n return False",
"def test_unfetchable_url(self):\r\n url = u'file://test.html'\r\n read = readable.ReadUrl.parse(url)\r\n self.assertEqual(read.status, 901)",
"def test_files(host, f):\n assert host.file(f).exists",
"def test_retrieve_files_single(self):\n os.makedirs('/tmp/remote_pacha/localhost/another_dir')\n os.makedirs('/tmp/remote_pacha/localhost/single_dir')\n remote_file = open('/tmp/remote_pacha/localhost/single_dir/remote.txt', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n self.assertTrue(os.path.isfile('/tmp/remote_pacha/localhost/single_dir/remote.txt'))\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha',\n directory='single_dir')\n run.retrieve_files()\n result = os.path.isfile('/tmp/localhost/single_dir/remote.txt')\n line = open('/tmp/localhost/single_dir/remote.txt')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote file\")\n self.assertTrue(result)",
"def test_read_with_file_id_that_doesnt_exist(self):\n\n url = reverse('file', kwargs={'file_id': '3efcd977-184d-4124-acb4-d5c50cdffc79'})\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def remote_assert_empty(path):\n path = normpath(path)\n try:\n remote = get_remote(path)\n except ValueError: # Nothing exists at path, nothing to worry about.\n return\n raise ValueError(\"Something exists at %s\" % remote.uri)",
"def remote_file(request, running_cluster):\n file_path = '/tmp/remote_dummy_file_for_testing'\n p = subprocess.run([\n 'flintrock', 'run-command', running_cluster, '--',\n 'echo -e \"{data}\" > {path}'.format(\n data='test\\n' * 3,\n path=file_path)])\n assert p.returncode == 0\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'run-command', running_cluster, '--',\n 'rm', '-f', file_path])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return file_path",
"def test_importfile_valid_remotepath_valid_localpath(self):\n\n # create a temporary file\n handle,remotepath = tempfile.mkstemp()\n indata = \"hubcheck\\ntool session shell test\\n%s\" % (remotepath)\n os.write(handle,indata)\n os.close(handle)\n\n # perform the transfer\n localpath,es = self.shell.execute('echo ${PWD}/$RANDOM.tmp')\n size = self.shell.importfile(remotepath,localpath)\n\n outdata = self.shell.read_file(localpath)\n\n # clean up the files\n self.shell.execute(\"rm -f %s\" % (localpath))\n os.remove(remotepath)\n\n # check the transfer\n self.assertTrue(size == len(indata),\n \"size mismatch: wrote %s bytes, expected %s bytes\" \\\n % (size,len(indata)))\n\n self.assertTrue(indata == outdata,\n \"file data mismatch: wrote '%s', expected '%s'\" \\\n % (repr(outdata),repr(indata)))",
"def test_retrieve_files_all(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_2)\n self.assertTrue(result_1)",
"def test_download_nonexistent(client: FlaskClient):\n response = util.download_file(client, DEFAULT_USER, \"test-nonexistent\")\n assert response.status == \"404 NOT FOUND\"",
"def test_fileDoesNotExist(self):\n fp = FilePath(self.mktemp())\n protocol = self.makeConnectedDccFileReceive(fp.path)\n\n self.allDataReceivedForProtocol(protocol, b\"I <3 Twisted\")\n\n self.assertEqual(fp.getContent(), b\"I <3 Twisted\")",
"def test_no_io_on_url():\n file = get_image_cache_file()\n file.url\n assert not file.storage.exists.called\n assert not file.storage.open.called",
"def test_managed_local_source_with_source_hash(\n file, tmp_path, grail_scene33_file, grail_scene33_file_hash, proto, dest_file_exists\n):\n name = tmp_path / \"local_source_with_source_hash\"\n\n if dest_file_exists:\n name.touch()\n\n # Test with wrong hash\n bad_hash = grail_scene33_file_hash[::-1]\n\n ret = file.managed(\n name=str(name),\n source=proto + str(grail_scene33_file),\n source_hash=\"sha256={}\".format(bad_hash),\n )\n assert ret.result is False\n assert not ret.changes\n assert \"does not match actual checksum\" in ret.comment\n\n # Now with the right hash\n ret = file.managed(\n name=str(name),\n source=proto + str(grail_scene33_file),\n source_hash=\"sha256={}\".format(grail_scene33_file_hash),\n )\n assert ret.result is True",
"def test_retrieve_files_error_message(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmpp/remote_pacha')\n sys.stdout = MockSys()\n sys.exit = MockSys()\n run.retrieve_files()\n actual = sys.stdout.captured()\n expected = \"\"\"\nPacha was not able to retrieve the files from the SSH server provided.\nCheck your configuration file settings and try again.\n\"\"\"\n self.assertEqual(actual, expected)",
"def test_send_empty_file():\n\n # Generate the blocks for the empty test file which is not already present on the server\n empty_file = os.path.join(os.path.dirname(__file__), \"../test_files/empty.txt\")\n # Ask the server for the hash of the last block\n response = client.get(\"/latest_block_hash\")\n last_block_hash = response.json()[\"last_block_hash\"]\n block = generate_blocks(empty_file, last_block_hash)\n # Encode the generated block into a binary file using pickle\n block_pickled = pickle.dumps(block)\n # Send the encoded block to the test server\n response = client.post(\"/send\",\n files={\"file\": block_pickled})\n\n assert response.ok\n assert response.json() \\\n == {\"success\": True,\n \"new_file\": True,\n \"hash\": \"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\",\n \"index_all\": 1}\n\n # Send the SHA256 checksum of the empty file to the server to be checked\n response = client.get(\"/check\",\n params={\"file_hash\": block[0].hash,\n \"index_all\": block[0].index_all})\n assert response.ok\n assert response.json() \\\n == {\"check\": True,\n \"hash\": \"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\"}",
"def get_rmt_file(uri, creds, sFile):\n\n import urllib\n try:\n urllib.urlretrieve(uri, sFile)\n return True\n\n except:\n return False",
"def test_existing_file(self):\n # Send the request\n request = webhttp.message.Request()\n request.method = \"GET\"\n request.uri = \"/test/index.html\"\n request.set_header(\"Host\", \"localhost:{}\".format(portnr))\n request.set_header(\"Connection\", \"close\")\n self.client_socket.send(str(request).encode())\n\n #Get the resource to compare\n wantedres = webhttp.resource.Resource(\"/test/index.html\")\n \n # Test response\n message = self.client_socket.recv(1024)\n response = self.parser.parse_response(message)\n self.assertEqual(response.code, 200)\n self.assertEqual(response.body, wantedres.get_content())"
] | [
"0.72236395",
"0.7172954",
"0.71605164",
"0.6851394",
"0.6806955",
"0.67788607",
"0.6724458",
"0.67171365",
"0.66721904",
"0.6553629",
"0.6403871",
"0.64003515",
"0.6344764",
"0.63181955",
"0.63075525",
"0.6287276",
"0.6229529",
"0.6194949",
"0.61841774",
"0.61640656",
"0.6149345",
"0.6126047",
"0.6107422",
"0.6101808",
"0.60977995",
"0.6089418",
"0.60859025",
"0.6069404",
"0.6038248",
"0.60290843"
] | 0.77850294 | 0 |
Test a remote file using skip_verify | def test_file_managed_http_source_skip_verify(file, tmp_path, remote_grail_scene33):
name = str(tmp_path / "testfile")
ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=True)
assert ret.result is True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testRemote(self):\n try:\n remoteLocator = self.__httpsFileUrl\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n ok = self.__fileU.exists(remoteLocator)\n self.assertTrue(ok)\n size = self.__fileU.size(remoteLocator)\n self.assertGreaterEqual(size, 1000)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def test_file_managed_http_source_no_hash(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=False)\n # This should fail because no hash was provided\n assert ret.result is False",
"def test_file_managed_keep_source_false_http(\n file, tmp_path, remote_grail_scene33, modules\n):\n name = str(tmp_path / \"testfile\")\n # Run the state\n ret = file.managed(\n name=name,\n source=remote_grail_scene33.url,\n source_hash=remote_grail_scene33.hash,\n keep_source=False,\n )\n assert ret.result is True\n\n # Now make sure that the file is not cached\n ret = modules.cp.is_cached(remote_grail_scene33.url)\n assert not ret, \"File is still cached at {}\".format(ret)",
"def test_file_managed_http_source(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(\n name=name,\n source=remote_grail_scene33.url,\n source_hash=remote_grail_scene33.hash,\n skip_verify=False,\n )\n assert ret.result is True",
"def check_remote_file_exists(url, login=None, password=None):\r\n credentials = None\r\n if login and password:\r\n credentials = login, password\r\n\r\n response = requests.get(url,\r\n stream=True,\r\n verify=False,\r\n auth=credentials)\r\n if response.status_code >= 400 or response.status_code < 200:\r\n raise Exception('Returned wrong status code: {}'.format(response.status_code))\r\n\r\n response.close()",
"def test_retrieve_files_error_message(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmpp/remote_pacha')\n sys.stdout = MockSys()\n sys.exit = MockSys()\n run.retrieve_files()\n actual = sys.stdout.captured()\n expected = \"\"\"\nPacha was not able to retrieve the files from the SSH server provided.\nCheck your configuration file settings and try again.\n\"\"\"\n self.assertEqual(actual, expected)",
"def test_download_host(self):\n pass",
"def test_raw_file_url_error(self):\n repository = self.remote_repository\n\n self.spy_on(repository._get_file_uncached,\n op=kgb.SpyOpReturn(b'first'))\n\n self.assertEqual(repository.get_file('PATH', 'd7e96b3'),\n b'first')\n\n # Ensure output of fake result matches.\n repository._get_file_uncached.unspy()\n self.spy_on(repository._get_file_uncached,\n op=kgb.SpyOpReturn(b'second'))\n\n # Grab from cache when no changes and change fake result to confirm\n # it is not called.\n self.assertEqual(repository.get_file('PATH', 'd7e96b3'),\n b'first')\n\n # When raw_file_url changed, do not grab from cache and ensure output\n # equals second fake value.\n repository.raw_file_url = \\\n 'http://github.com/api/v2/yaml/blob/show/reviewboard/<revision>'\n\n self.assertEqual(repository.get_file('PATH', 'd7e96b3'),\n b'second')",
"def test_verify_ssl_https_source(file, tmp_path, ssl_webserver, verify_ssl):\n name = tmp_path / \"test_verify_ssl_true.txt\"\n source = ssl_webserver.url(\"this.txt\")\n source_hash = f\"{source}.sha256\"\n\n ret = file.managed(\n str(name),\n source=source,\n source_hash=source_hash,\n verify_ssl=verify_ssl,\n skip_verify=False,\n )\n if verify_ssl is True:\n assert ret.result is False\n assert \"SSL: CERTIFICATE_VERIFY_FAILED\" in ret.comment\n assert not name.exists()\n else:\n if IS_WINDOWS and not os.environ.get(\"GITHUB_ACTIONS_PIPELINE\"):\n pytest.xfail(\n \"This test fails when running from Jenkins but not on the GitHub \"\n \"Actions Pipeline\"\n )\n assert ret.result is True\n assert ret.changes\n # mode, if present is not important for this test\n ret.changes.pop(\"mode\", None)\n assert ret.changes == {\"diff\": \"New file\"}\n assert name.exists()",
"def test_retrieve_files_single(self):\n os.makedirs('/tmp/remote_pacha/localhost/another_dir')\n os.makedirs('/tmp/remote_pacha/localhost/single_dir')\n remote_file = open('/tmp/remote_pacha/localhost/single_dir/remote.txt', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n self.assertTrue(os.path.isfile('/tmp/remote_pacha/localhost/single_dir/remote.txt'))\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha',\n directory='single_dir')\n run.retrieve_files()\n result = os.path.isfile('/tmp/localhost/single_dir/remote.txt')\n line = open('/tmp/localhost/single_dir/remote.txt')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote file\")\n self.assertTrue(result)",
"def test_retrieve_files_all(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_2)\n self.assertTrue(result_1)",
"def test_nonexistant_file(self):\n # Send the request\n request = webhttp.message.Request()\n request.method = \"GET\"\n request.uri = \"/test/nofilewiththisnameright.html\"\n request.set_header(\"Host\", \"localhost:{}\".format(portnr))\n request.set_header(\"Connection\", \"close\")\n self.client_socket.send(str(request).encode())\n\n # Test response\n message = self.client_socket.recv(1024)\n response = self.parser.parse_response(message)\n self.assertEqual(response.code, 404)\n self.assertEqual(response.body, \"404 \" + webhttp.consts.REASON_DICT[404])",
"def test_retrieve_files_with_pre_hook(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n os.makedirs('/tmp/remote_pacha/localhost/pacha_pre')\n touch_script = open('/tmp/remote_pacha/localhost/pacha_pre/foo.sh', 'w')\n touch_script.write('''touch /tmp/remote_pacha/localhost/pre_got_executed.txt''')\n touch_script.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_2)\n self.assertTrue(result_1)\n self.assertTrue(os.path.isfile('/tmp/remote_pacha/localhost/pre_got_executed.txt'))",
"def test_download_nonexistent(client: FlaskClient):\n response = util.download_file(client, DEFAULT_USER, \"test-nonexistent\")\n assert response.status == \"404 NOT FOUND\"",
"def test_retrieve_files_move_existing_file(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n os.mkdir('/tmp/localhost')\n\n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n result_3 = os.path.isdir('/tmp/localhost.%s' % strftime('%H%M%s'))\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_3)\n self.assertTrue(result_2)\n self.assertTrue(result_1)",
"def test_file_download(self):\n\n # Downloading without auth = unauthorized error (401)\n with self.assertRaises(requests.exceptions.HTTPError):\n self.assertFalse(self.api.downloadFile('/media/part/files/1/test.pdf', 'test.pdf'))",
"def test_files(host, f):\n assert host.file(f).exists",
"def test_get_file_with_remote_and_short_SHA1_error(self):\n with self.assertRaises(ShortSHA1Error):\n self.remote_tool.get_file('README', 'd7e96b3')",
"def test_vmcp_file_not_found(self):\r\n res = self.app.get('api/vmcp', follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 501, err\r\n assert err['status_code'] == 501, err\r\n assert err['status'] == \"failed\", err\r\n assert err['target'] == \"vmcp\", err\r\n assert err['action'] == \"GET\", err",
"def test_load_verify_invalid_file(self, tmpfile):\n clientContext = Context(SSLv23_METHOD)\n with pytest.raises(Error):\n clientContext.load_verify_locations(tmpfile)",
"def remote_file(request, running_cluster):\n file_path = '/tmp/remote_dummy_file_for_testing'\n p = subprocess.run([\n 'flintrock', 'run-command', running_cluster, '--',\n 'echo -e \"{data}\" > {path}'.format(\n data='test\\n' * 3,\n path=file_path)])\n assert p.returncode == 0\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'run-command', running_cluster, '--',\n 'rm', '-f', file_path])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return file_path",
"def test_check_negative():\n\n # Generate the blocks for the test file which is not present on the server\n test_file = os.path.join(os.path.dirname(__file__),\n \"../test_files/debashis-rc-biswas-3U4gGsGNsMY-unsplash.jpg\")\n # Ask the server for the hash of the last block\n response = client.get(\"/latest_block_hash\")\n last_block_hash = response.json()[\"last_block_hash\"]\n blocks = generate_blocks(test_file, last_block_hash)\n\n # Send the SHA256 checksum of the file to the server to be checked\n response = client.get(\"/check\",\n params={\"file_hash\": blocks[0].hash,\n \"index_all\": blocks[0].index_all})\n assert response.ok\n assert response.json() \\\n == {\"check\": False,\n \"hash\": \"415d4f66e1b8b9083014dcdca5ddd7d1dcca3f5a4a120603169b951b1c5fa0c9\"}",
"def test_download_file_no_sha(token):\n\n # github => repo => release => asset_list => asset => url => download\n\n g_h = github.Github(token, per_page=100)\n repo = g_h.get_repo(TEST_SLUG, lazy=False)\n release = repo.get_release(TEST_TAG)\n asset_list = release.get_assets()\n sha_filename = Template(Arguments.HASH_FILE).safe_substitute({\n 'platform': platform.system().lower()\n })\n\n pass_test = True\n\n for check_asset in asset_list:\n # look through list of assets for uploaded file and sha file\n\n if check_asset.name == sha_filename:\n\n pass_test = False\n\n assert pass_test",
"def test_08_verify_tar03(self, mock_isfile, mock_call, mock_msg):\n mock_msg.level = 0\n mock_isfile.return_value = True\n mock_call.return_value = 1\n status = udocker.FileUtil(\"tarball.tar\").verify_tar()\n self.assertFalse(status)",
"def test_download(self):\n pass",
"def test_get_file_exists_caching_with_raw_url(self):\n repository = self.remote_repository\n\n self.spy_on(repository._get_file_exists_uncached,\n op=kgb.SpyOpReturn(True))\n\n # Use spy to put key into cache\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Remove spy to ensure key is still in cache without needing spy\n repository._get_file_exists_uncached.unspy()\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Does not exist when raw_file_url changed because it is not cached.\n repository.raw_file_url = \\\n 'http://github.com/api/v2/yaml/blob/show/reviewboard/<revision>'\n\n self.assertFalse(repository.get_file_exists('PATH', 'd7e96b3'))",
"def test_fileDoesNotExist(self):\n fp = FilePath(self.mktemp())\n protocol = self.makeConnectedDccFileReceive(fp.path)\n\n self.allDataReceivedForProtocol(protocol, b\"I <3 Twisted\")\n\n self.assertEqual(fp.getContent(), b\"I <3 Twisted\")",
"def test_file_download_fail(self):\n with mock.patch(\"JumpScale.j\") as j_mock:\n from JumpScale import j\n import JumpScale.tools.cuisine.CuisineCore\n JumpScale.tools.cuisine.CuisineCore.j = j\n from JumpScale.tools.cuisine.CuisineCore import CuisineCore\n from JumpScale.core.errorhandling import JSExceptions\n executor_mock = mock.MagicMock()\n j.tools.executor.getLocal.return_value = executor_mock\n executor = j.tools.executor.getLocal()\n cuisine = j.tools.cuisine.local\n cuisine_core = CuisineCore(executor, cuisine)\n url = 'http://hallo.com/downloadme.txt'\n to = '/tmp/path'\n cuisine_core.file_exists = mock.MagicMock()\n cuisine_core.file_exists.return_value = False\n cuisine_core.createDir = mock.MagicMock()\n cuisine_core.file_unlink = mock.MagicMock()\n cuisine_core.run = mock.MagicMock()\n cuisine_core.run.side_effect = [(32, '', 'err'), (0, 'Ok', '')]\n cuisine_core.touch = mock.MagicMock()\n j.exceptions.RuntimeError = JSExceptions.RuntimeError\n self.assertRaises(JSExceptions.RuntimeError, cuisine_core.file_download, url, to)",
"async def test_reading_non_exitisting_certificate_file() -> None:\n assert (\n mqtt.util.migrate_certificate_file_to_content(\"/home/file_not_exists\") is None\n )",
"def test_read_with_file_id_that_doesnt_exist(self):\n\n url = reverse('file', kwargs={'file_id': '3efcd977-184d-4124-acb4-d5c50cdffc79'})\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)"
] | [
"0.7251301",
"0.7052198",
"0.6622951",
"0.6618357",
"0.6608947",
"0.6568611",
"0.6540163",
"0.6483996",
"0.6450933",
"0.6418295",
"0.64047265",
"0.6379879",
"0.6374588",
"0.6258586",
"0.62566084",
"0.6240293",
"0.6215035",
"0.62099105",
"0.6209236",
"0.6159993",
"0.615858",
"0.61013544",
"0.61012685",
"0.60563254",
"0.60541266",
"0.6031716",
"0.6017447",
"0.6015486",
"0.5974994",
"0.5968849"
] | 0.78441286 | 0 |
test verify_ssl when its False and True when managing a file with an https source and skip_verify is false. | def test_verify_ssl_https_source(file, tmp_path, ssl_webserver, verify_ssl):
name = tmp_path / "test_verify_ssl_true.txt"
source = ssl_webserver.url("this.txt")
source_hash = f"{source}.sha256"
ret = file.managed(
str(name),
source=source,
source_hash=source_hash,
verify_ssl=verify_ssl,
skip_verify=False,
)
if verify_ssl is True:
assert ret.result is False
assert "SSL: CERTIFICATE_VERIFY_FAILED" in ret.comment
assert not name.exists()
else:
if IS_WINDOWS and not os.environ.get("GITHUB_ACTIONS_PIPELINE"):
pytest.xfail(
"This test fails when running from Jenkins but not on the GitHub "
"Actions Pipeline"
)
assert ret.result is True
assert ret.changes
# mode, if present is not important for this test
ret.changes.pop("mode", None)
assert ret.changes == {"diff": "New file"}
assert name.exists() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_ssl_default(self):\n assert security.security_settings.ssl_verify()",
"def test_ssl_default(self):\n e = ErrataConnector()\n assert e.ssl_verify",
"def test_use_certificate_file_missing(self, tmpfile):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(Error):\n ctx.use_certificate_file(tmpfile)",
"def test_use_certificate_chain_file_missing_file(self, tmpfile):\n context = Context(SSLv23_METHOD)\n with pytest.raises(Error):\n context.use_certificate_chain_file(tmpfile)",
"def test_load_verify_invalid_file(self, tmpfile):\n clientContext = Context(SSLv23_METHOD)\n with pytest.raises(Error):\n clientContext.load_verify_locations(tmpfile)",
"def test_file_managed_http_source_skip_verify(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=True)\n assert ret.result is True",
"def test_use_certificate_file_wrong_args(self):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n ctx.use_certificate_file(object(), FILETYPE_PEM)\n with pytest.raises(TypeError):\n ctx.use_certificate_file(b\"somefile\", object())\n with pytest.raises(TypeError):\n ctx.use_certificate_file(object(), FILETYPE_PEM)",
"def test_set_default_verify_paths(self):\n # Testing this requires a server with a certificate signed by one\n # of the CAs in the platform CA location. Getting one of those\n # costs money. Fortunately (or unfortunately, depending on your\n # perspective), it's easy to think of a public server on the\n # internet which has such a certificate. Connecting to the network\n # in a unit test is bad, but it's the only way I can think of to\n # really test this. -exarkun\n context = Context(SSLv23_METHOD)\n context.set_default_verify_paths()\n context.set_verify(\n VERIFY_PEER,\n lambda conn, cert, errno, depth, preverify_ok: preverify_ok,\n )\n\n client = socket_any_family()\n client.connect((\"encrypted.google.com\", 443))\n clientSSL = Connection(context, client)\n clientSSL.set_connect_state()\n clientSSL.set_tlsext_host_name(b\"encrypted.google.com\")\n clientSSL.do_handshake()\n clientSSL.send(b\"GET / HTTP/1.0\\r\\n\\r\\n\")\n assert clientSSL.recv(1024)",
"def use_skip_ssl_verify(self, val=True, force=False):\n if val:\n self.ssl_verify = False\n else:\n self.ssl_verify = True\n\n if force:\n self.force_skip_ssl_verify = True\n else:\n self.force_skip_ssl_verify = False\n\n return val",
"def test_fallback_default_verify_paths(self, monkeypatch):\n context = Context(SSLv23_METHOD)\n monkeypatch.setattr(\n _lib, \"SSL_CTX_set_default_verify_paths\", lambda x: 1\n )\n monkeypatch.setattr(\n SSL,\n \"_CRYPTOGRAPHY_MANYLINUX_CA_FILE\",\n _ffi.string(_lib.X509_get_default_cert_file()),\n )\n monkeypatch.setattr(\n SSL,\n \"_CRYPTOGRAPHY_MANYLINUX_CA_DIR\",\n _ffi.string(_lib.X509_get_default_cert_dir()),\n )\n context.set_default_verify_paths()\n store = context.get_cert_store()\n sk_obj = _lib.X509_STORE_get0_objects(store._store)\n assert sk_obj != _ffi.NULL\n num = _lib.sk_X509_OBJECT_num(sk_obj)\n assert num != 0",
"async def test_reading_non_exitisting_certificate_file() -> None:\n assert (\n mqtt.util.migrate_certificate_file_to_content(\"/home/file_not_exists\") is None\n )",
"def test_non_ssl_ports_after_enabling_tls(self):\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n CbServer.use_https = True\n rest = RestConnection(self.cluster.master)\n for non_ssl_request in self.sample_urls_map.keys():\n api = non_ssl_request % self.cluster.master.ip\n try:\n rest._http_request(api=api, timeout=10)\n except Exception as _:\n ssl_request = self.sample_urls_map[non_ssl_request]\n api = ssl_request % self.cluster.master.ip\n status, content, response = rest._http_request(api=api, timeout=10)\n if not status:\n self.fail(\"{0} failed\".format(api))\n else:\n self.log.error(\"{0} worked\".format(api))\n\n self.disable_n2n_encryption_cli_on_nodes(nodes=[self.cluster.master])\n CbServer.use_https = False\n rest = RestConnection(self.cluster.master)\n for non_ssl_request in self.sample_urls_map.keys():\n api = non_ssl_request % self.cluster.master.ip\n status, content, response = rest._http_request(api=api, timeout=10)\n if not status:\n self.fail(\"{0} api failed with content {1}\".format(api, content))",
"def test_set_verify_default_callback(self, mode):\n serverContext = Context(TLSv1_2_METHOD)\n serverContext.use_privatekey(\n load_privatekey(FILETYPE_PEM, root_key_pem)\n )\n serverContext.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )\n\n clientContext = Context(TLSv1_2_METHOD)\n clientContext.set_verify(mode, None)\n\n if mode == SSL.VERIFY_PEER:\n with pytest.raises(Exception) as exc:\n self._handshake_test(serverContext, clientContext)\n assert \"certificate verify failed\" in str(exc.value)\n else:\n self._handshake_test(serverContext, clientContext)",
"def verify_SSL_certificate(self, code: str) -> bool:\n return True",
"def _use_certificate_file_test(self, certificate_file):\n # TODO\n # Hard to assert anything. But we could set a privatekey then ask\n # OpenSSL if the cert and key agree using check_privatekey. Then as\n # long as check_privatekey works right we're good...\n with open(certificate_file, \"wb\") as pem_file:\n pem_file.write(root_cert_pem)\n\n ctx = Context(SSLv23_METHOD)\n ctx.use_certificate_file(certificate_file)",
"def testRemote(self):\n try:\n remoteLocator = self.__httpsFileUrl\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n ok = self.__fileU.exists(remoteLocator)\n self.assertTrue(ok)\n size = self.__fileU.size(remoteLocator)\n self.assertGreaterEqual(size, 1000)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def ssl_verification(verify: bool = True) -> Generator[None, None, None]:\n\n old_request = requests.Session.request\n requests.Session.request = partialmethod(old_request, verify=verify) # type: ignore\n\n warnings.filterwarnings(\"ignore\", \"Unverified HTTPS request\")\n yield\n warnings.resetwarnings()\n\n requests.Session.request = old_request # type: ignore",
"def test_host_ssl(self):\n url = create_url(host=\"www.example.com\", ssl=True, scheme_ssl=\"https\")\n self.assertEqual(url, \"https://www.example.com\")",
"def test_external_reference_https(self):\n assert self.search(\n LINK_PATTERN\n % (\n EXTERNAL_CLASS,\n EXTERNAL_REFERENCE_LINK_HTTPS,\n EXTERNAL_REFERENCE_TEXT_HTTPS,\n )\n )",
"def test_no_verify_no_ca(self, host_str_fs, tmpdir):\n tmpdir.join('cert.pem').ensure()\n tmpdir.join('key.pem').ensure()\n tmpdir.join('ca.pem').ensure()\n\n out = client_kwargs_from_config(\n host_str_fs.format(cert_path=tmpdir.strpath),\n )\n\n assert out['tls'].cert == (\n tmpdir.join('cert.pem').strpath,\n tmpdir.join('key.pem').strpath,\n )\n assert out['tls'].verify == False",
"def test_http_over_https_error(\n http_request_timeout,\n tls_http_server, adapter_type,\n ca, ip_addr,\n tls_certificate,\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n):\n # disable some flaky tests\n # https://github.com/cherrypy/cheroot/issues/225\n issue_225 = (\n IS_MACOS\n and adapter_type == 'builtin'\n )\n if issue_225:\n pytest.xfail('Test fails in Travis-CI')\n\n tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)\n tls_adapter = tls_adapter_cls(\n tls_certificate_chain_pem_path, tls_certificate_private_key_pem_path,\n )\n if adapter_type == 'pyopenssl':\n tls_adapter.context = tls_adapter.get_context()\n\n tls_certificate.configure_cert(tls_adapter.context)\n\n interface, _host, port = _get_conn_data(ip_addr)\n tlshttpserver = tls_http_server((interface, port), tls_adapter)\n\n interface, _host, port = _get_conn_data(\n tlshttpserver.bind_addr,\n )\n\n fqdn = interface\n if ip_addr is ANY_INTERFACE_IPV6:\n fqdn = '[{fqdn}]'.format(**locals())\n\n expect_fallback_response_over_plain_http = (\n (\n adapter_type == 'pyopenssl'\n )\n )\n if expect_fallback_response_over_plain_http:\n resp = requests.get(\n 'http://{host!s}:{port!s}/'.format(host=fqdn, port=port),\n timeout=http_request_timeout,\n )\n assert resp.status_code == 400\n assert resp.text == (\n 'The client sent a plain HTTP request, '\n 'but this server only speaks HTTPS on this port.'\n )\n return\n\n with pytest.raises(requests.exceptions.ConnectionError) as ssl_err:\n requests.get( # FIXME: make stdlib ssl behave like PyOpenSSL\n 'http://{host!s}:{port!s}/'.format(host=fqdn, port=port),\n timeout=http_request_timeout,\n )\n\n if IS_LINUX:\n expected_error_code, expected_error_text = (\n 104, 'Connection reset by peer',\n )\n if IS_MACOS:\n expected_error_code, expected_error_text = (\n 54, 'Connection reset by peer',\n )\n if IS_WINDOWS:\n expected_error_code, expected_error_text = (\n 10054,\n 'An existing connection was forcibly closed by the remote host',\n )\n\n underlying_error = ssl_err.value.args[0].args[-1]\n err_text = str(underlying_error)\n assert underlying_error.errno == expected_error_code, (\n 'The underlying error is {underlying_error!r}'.\n format(**locals())\n )\n assert expected_error_text in err_text",
"def _verification_needed(cacert, insecure):\n if insecure is False or insecure is None:\n verify = cacert or True\n else:\n verify = False\n return verify",
"def skip_or_run_ssl_password_test_call(self):\n\n return skip_or_run_test_tarantool_call(self, '2.11.0',\n 'does not support SSL passwords')",
"def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):\n context = Context(SSLv23_METHOD)\n monkeypatch.setattr(\n _lib, \"SSL_CTX_set_default_verify_paths\", lambda x: 1\n )\n dir_env_var = _ffi.string(_lib.X509_get_default_cert_dir_env()).decode(\n \"ascii\"\n )\n file_env_var = _ffi.string(\n _lib.X509_get_default_cert_file_env()\n ).decode(\"ascii\")\n monkeypatch.setenv(dir_env_var, \"value\")\n monkeypatch.setenv(file_env_var, \"value\")\n context.set_default_verify_paths()\n\n monkeypatch.setattr(\n context, \"_fallback_default_verify_paths\", raiser(SystemError)\n )\n context.set_default_verify_paths()",
"def initialize_ssl(self):\n self.ssl_context = ssl.SSLContext()\n # if self.config.get('ca_file', None):\n # self.ssl_context.load_verify_locations(ca_file=self.config['ca_file'])\n\n # TODO : Remove this\n\n verify_ssl = self.config[\"AUTH\"][\"verify_ssl\"]\n if isinstance(verify_ssl, str):\n verify_ssl = strtobool(verify_ssl)\n\n if not verify_ssl:\n self.ssl_context.verify_mode = ssl.CERT_NONE",
"def enable_ssl_verification(self) -> bool:\n return pulumi.get(self, \"enable_ssl_verification\")",
"def test_wrong_sni_hint(self) -> None:\n self.start_destination_server()\n self.start_proxy_server()\n\n sock = socket.create_connection(\n (self.proxy_server.host, self.proxy_server.port)\n )\n with self.client_context.wrap_socket(\n sock, server_hostname=\"localhost\"\n ) as proxy_sock:\n with pytest.raises(ssl.SSLCertVerificationError):\n SSLTransport(\n proxy_sock, self.client_context, server_hostname=\"veryverywrong\"\n )",
"def test_no_ca_no_error(self, tmpdir):\n tmpdir.join('cert.pem').ensure()\n tmpdir.join('key.pem').ensure()\n\n out = client_kwargs_from_config(\n 'http://l cert_path=%s' % tmpdir.strpath\n )\n\n assert out['tls'].cert == (\n tmpdir.join('cert.pem').strpath,\n tmpdir.join('key.pem').strpath,\n )\n assert out['tls'].verify == None",
"def test_fallback_path_is_not_file_or_dir(self):\n context = Context(SSLv23_METHOD)\n context._fallback_default_verify_paths([], [])\n context._fallback_default_verify_paths([\"/not/a/file\"], [\"/not/a/dir\"])",
"def skip_if_no_ssl (func):\n try:\n import evy.patched.ssl\n except ImportError:\n try:\n import evy.patched.OpenSSL\n except ImportError:\n skipped(func)"
] | [
"0.73158944",
"0.6783668",
"0.67815685",
"0.63655317",
"0.63652956",
"0.63621306",
"0.6331079",
"0.6180326",
"0.6170911",
"0.612429",
"0.6118464",
"0.6111109",
"0.6038211",
"0.6006704",
"0.59966445",
"0.59884673",
"0.59882975",
"0.5967463",
"0.595505",
"0.5917905",
"0.5915687",
"0.5903694",
"0.58972704",
"0.5878415",
"0.5864815",
"0.5856553",
"0.5846817",
"0.5843316",
"0.5826928",
"0.5816096"
] | 0.8198307 | 0 |
Given a series, series length, a, b, g, and number of preds, return a series of...smoothed numbers. | def triple_exponential_smoothing(series: [int], slen: int, alpha: float, beta: float, gamma: float, n_preds: int) -> [int]:
result = []
seasonals = initial_seasonal_components(series, slen)
for i in range(len(series)+n_preds):
if i == 0: # initial values
smooth = series[0]
trend = initial_trend(series, slen)
result.append(series[0])
continue
if i >= len(series): # we are forecasting
m = i - len(series) + 1
result.append((smooth + m*trend) + seasonals[i%slen])
else:
val = series[i]
last_smooth, smooth = smooth, alpha*(val-seasonals[i%slen]) + (1-alpha)*(smooth+trend)
trend = beta * (smooth-last_smooth) + (1-beta)*trend
seasonals[i%slen] = gamma*(val-smooth) + (1-gamma)*seasonals[i%slen]
result.append(smooth+trend+seasonals[i%slen])
# Took out pd.Series and pyplot.show()
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def smooth_series(y,p = 6.25):\n cycle, trend = sm.tsa.filters.hpfilter(y, p)\n return trend",
"def clean_series(y,smooth = False,p = 6.25,logsmooth = True):\n\n # Remove null values in the middle of the series using interpolate\n # First null values are not interpolated but later filled by 0.0\n y = y.replace(0.0,np.NaN).interpolate().fillna(0.0)\n\n # Smooth using Hodrick Prescott filter with parameter p\n if smooth:\n y = smooth_series(y,p)\n y.loc[(y < 1) & (y > 0)] = 1\n\n if logsmooth:\n y = y.map(lambda x : np.log(1+x))\n y = smooth_series(y,p)\n y = y.map(lambda x : np.exp(x) - 1)\n y.loc[(y < 1) & (y > 0)] = 1\n y.loc[y < 0] = 0\n\n return y",
"def _smooth(values, std):\n width = std * 4\n x = np.linspace(-width, width, min(2 * width + 1, len(values)))\n kernel = np.exp(-(x / 5)**2)\n\n values = np.array(values)\n weights = np.ones_like(values)\n\n smoothed_values = np.convolve(values, kernel, mode='same')\n smoothed_weights = np.convolve(weights, kernel, mode='same')\n\n return smoothed_values / smoothed_weights",
"def smoothed(sequence, step=1, start=0):\n next_index = start + 1\n last = len(sequence) \n new_sequence = []\n if not step:\n return sequence\n ratio_step = step + 1\n for item in sequence:\n new_sequence.append(item)\n if next_index < last:\n next_item = sequence[next_index]\n ratio = (item + next_item) / (step + 1)\n ratio = int(ratio)\n for x in range(step):\n value = (ratio * x) + item\n new_sequence.append(int(value))\n next_index = next_index + 1\n return new_sequence",
"def smooth_slowsignal(a, n=10):\n ret = np.cumsum(a, axis=0, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1 :] / n",
"def _lidstone_smooth(prob, smoothing, observations, outcomes):\n return (prob + smoothing) / (observations + (smoothing * outcomes))",
"def smoothed(values, qsize, average=mean):\n values = _normalize(values)\n if qsize < 1:\n raise ValueError(\"qsize must be 1 or larger\")\n q = []\n it = iter(values)\n try:\n for i in range(qsize):\n q.append(next(it))\n for i in cycle(range(qsize)):\n yield average(q)\n q[i] = next(it)\n except StopIteration:\n pass",
"def smooth(f, g):\r\n chi_f = f.apply(lambda x: 0.0 if pd.isna(x) else 1.0)\r\n f_ext = pd.concat([f, chi_f], axis=1).prod(axis=1)\r\n a = convolve(f_ext, g)\r\n b = convolve(chi_f, g)\r\n return a.div(b)",
"def single_exponential_smoothing(self, series, horizon, alpha=0.5):\n result = [0, series[0]]\n for i in range(1, len(series) + horizon - 1):\n if i >= len(series):\n result.append((series[-1] * alpha) + ((1-alpha) * result[i]))\n else:\n result.append((series[i] * alpha) + ((1-alpha) * result[i]))\n return result[len(series):len(series)+horizon]",
"def _smooth(x, window_len=11, window='hanning'):\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n \n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n \n if window_len<3:\n return x\n \n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise(ValueError,\n \"Window is not one of '{0}', '{1}', '{2}', '{3}', '{4}'\".format(\n *('flat', 'hanning', 'hamming', 'bartlett', 'blackman')))\n \n s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w = np.ones(window_len,'d')\n else:\n w = eval('np.' + window + '(window_len)')\n \n y = np.convolve(w / w.sum(), s, mode = 'valid')\n return y",
"def get_series(gval, series):\n minlen = min([len(d[series]) for f, d in gval])\n return np.stack([d[series][:minlen] for f, d in gval])",
"def smooth(x, window_len=11, window='hanning'):\n\n# if x.ndim != 1:\n# raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n# if x.size < window_len:\n# raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n if window_len<3:\n return x\n\n# if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n# raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n\n\n s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]\n# print(len(s))\n if window == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n y = np.convolve(w / w.sum(), s, mode='valid')\n return y",
"def data_generate_process():\n\n a = 0.8\n b = 0.4\n c = 0.1\n d = 0.3\n e = 0.7\n y_0 = 0.0\n a_0 = 0.2\n sigma_0 = 0.35\n\n data_len = 10000\n y_series = pd.Series([np.nan] * data_len)\n a_series = pd.Series([np.nan] * data_len)\n sigma_series = pd.Series([np.nan] * data_len)\n\n epsilon_normal = np.random.normal(loc=0.0, scale=1.0, size=data_len)\n\n y_series[0] = y_0\n a_series[0] = a_0\n sigma_series[0] = sigma_0\n\n for idx in range(1, data_len):\n epsilon_t = epsilon_normal[idx]\n sigma_t = np.sqrt(c + d * a_series[idx - 1] ** 2 + e * sigma_series[idx - 1] ** 2)\n a_t = epsilon_t * sigma_t\n y_series[idx] = a * y_series[idx - 1] + b + a_t\n a_series[idx] = a_t\n sigma_series[idx] = sigma_t\n\n return y_series, a_series, sigma_series",
"def smooth(x,window_len=11,window='hanning'):\r\n\r\n if window_len<3:\r\n return x\r\n\r\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\r\n #print(len(s))\r\n if window == 'flat': #moving average\r\n w=np.ones(window_len,'d')\r\n else:\r\n w=eval('np.'+window+'(window_len)')\r\n\r\n y=np.convolve(w/w.sum(),s,mode='valid')\r\n return y[0:256]",
"def smooth_curve(points, factor=0.8):\n\n smoothed_points = []\n for point in points:\n if smoothed_points:\n previous = smoothed_points[-1]\n smoothed_points.append(previous * factor + point * (1 - factor))\n else:\n smoothed_points.append(point)\n return smoothed_points",
"def smooth(x, window_len=3, window='hanning'):\n s = np.r_[2*x[0]-x[window_len:1:-1], x, 2*x[-1]-x[-1:-window_len:-1]]\n w = getattr(np, window)(window_len)\n y = np.convolve(w/w.sum(), s, mode='same') \n return y[window_len-1:-window_len+1]",
"def smooth_curve(times, magnitudes):\n x = times[:,0]\n y = magnitudes[:,0]\n\n smoothed_times = np.linspace(np.min(x), np.max(x), 1000)\n\n itp = interp1d(x,y, kind='linear')\n window_size, poly_order = 101, 3\n smoothed_magnitudes = savgol_filter(itp(smoothed_times), window_size, poly_order)\n\n smoothed_times = smoothed_times.reshape(smoothed_times.size ,1)\n smoothed_magnitudes = smoothed_magnitudes.reshape(smoothed_magnitudes.size ,1)\n\n return (smoothed_times, smoothed_magnitudes)",
"def smooth(F, x_predicted, P_predicted, x_filtered, P_filtered, z):\n ntimesteps, nmatrices = get_observations_shape(z)\n x_smooth = [None] * ntimesteps\n P_smooth = [None] * ntimesteps\n L = [None] * ntimesteps\n # set mean and covariance at the end to the forward filtered data to start the smoother\n x_smooth[-1] = x_filtered[-1]\n P_smooth[-1] = P_filtered[-1]\n\n # Run the smoother backwards\n for t in reversed(range(ntimesteps - 1)):\n F_cur = pick_nth_step(F, t)\n x_smooth[t], P_smooth[t], L[t] = kalman_smoothing_step(F_cur, x_filtered[t], P_filtered[t], x_predicted[t + 1],\n P_predicted[t + 1], x_smooth[t + 1], P_smooth[t + 1])\n return np.array(x_smooth), np.array(P_smooth), np.array(L)",
"def smooth(x, window_len=11, window='hanning'):\n window_len = min(window_len, len(x) - 1)\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n if window_len < 3:\n return x\n\n if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]\n # print(len(s))\n if window == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n y = np.convolve(w / w.sum(), s, mode='valid')\n return y",
"def smooth(x,window_len=11,window='hanning'): \n \n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n \n\n if window_len<3:\n return x\n \n \n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n \n\n s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n \n y=np.convolve(w/w.sum(),s,mode='valid')\n return y",
"def smooth(x, window_len=10, window='hanning'): # From http://scipy-cookbook.readthedocs.io/items/SignalSmooth.html\n x = np.array(x)\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n window_len = int(x.size/3)\n # raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n if window_len < 3:\n return x\n\n if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n s=np.r_[2*x[0]-x[window_len:1:-1], x, 2*x[-1]-x[-1:-window_len:-1]]\n\n if window == 'flat': #moving average\n w = np.ones(window_len,'d')\n else:\n w = getattr(np, window)(window_len)\n y = np.convolve(w/w.sum(), s, mode='same')\n return y[window_len-1:-window_len+1]",
"def smooth(x,window_len=11,window='hanning'):\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='same')\n return y",
"def smooth(x,window_len=11,window='hanning'):\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n\n\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='valid')\n #return y\n return y[(window_len/2):-(window_len/2)]",
"def smooth(x,window_len=11,window='hanning'):\n\n\t# if x.ndim != 1:\n\t# raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n\n\t# if x.size < window_len:\n\t# raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n\tassert x.ndim==1\n\tassert x.size==window_len\n\n\tif window_len<3:\n\t\treturn x\n\n\tflag = (window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman'])\n\tassert flag==1\n\n\ts=numpy.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n\tif window == 'flat': #moving average\n\t\tw=numpy.ones(window_len,'d')\n\telse:\n\t\tw=eval('numpy.'+window+'(window_len)')\n\n\ty=numpy.convolve(w/w.sum(),s,mode='valid')\n\treturn y",
"def smooth(x,window_len=11,window='hanning'):\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='valid')\n return y",
"def double_exponential_smoothing(self, series, horizon, alpha=0.5, beta=0.5):\n result = [0, series[0]]\n level, trend = series[0], series[1] - series[0]\n for i in range(1, len(series) + horizon - 1):\n if i >= len(series):\n m = i - len(series) + 2\n result.append(level + m * trend)\n else:\n value = series[i]\n last_level, level = level, alpha * value + (1 - alpha) * (level + trend)\n trend = beta * (level - last_level) + (1 - beta) * trend\n result.append(level + trend)\n return result[len(series):len(series) + horizon]",
"def smooth(x, window_len=11, window=\"hanning\"):\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n if window_len < 3:\n return x\n\n if not window in [\"flat\", \"hanning\", \"hamming\", \"bartlett\", \"blackman\"]:\n raise ValueError(\n \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n )\n\n s = np.r_[x[window_len - 1 : 0 : -1], x, x[-2 : -window_len - 1 : -1]]\n # print(len(s))\n if window == \"flat\": # moving average\n w = np.ones(window_len, \"d\")\n else:\n w = eval(\"np.\" + window + \"(window_len)\")\n\n y = np.convolve(w / w.sum(), s, mode=\"valid\")\n return y[(window_len // 2 - 1) : -(window_len // 2 + 1)]",
"def smooth(x,window_len=10,window='hanning'):\n #\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n #\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n #\n if window_len<3:\n return x\n #\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n #\n s=r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=ones(window_len,'d')\n else:\n w=eval(window+'(window_len)')\n #\n y=convolve(w/w.sum(),s,mode='same')\n return y[window_len-1:-window_len+1]",
"def smooth(x, window_len=11, window='hanning'):\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n if window_len < 3:\n return x\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n\n s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n y = np.convolve(w / w.sum(), s, mode='valid')\n return y",
"def smooth(x,window_len=11,window='hanning'):\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n\n\n s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=ones(window_len,'d')\n else:\n w=eval('numpy.'+window+'(window_len)')\n\n y=numpy.convolve(w/w.sum(),s,mode='same')\n return y[window_len-1:-window_len+1]"
] | [
"0.6874529",
"0.61065215",
"0.60670906",
"0.6025021",
"0.58545476",
"0.5738396",
"0.5616307",
"0.5581053",
"0.5547493",
"0.55354017",
"0.5523303",
"0.55053633",
"0.54205126",
"0.54196185",
"0.54133016",
"0.5408546",
"0.54081184",
"0.539662",
"0.53922886",
"0.53792864",
"0.5348143",
"0.53402656",
"0.533259",
"0.53314954",
"0.53283525",
"0.531001",
"0.53099436",
"0.53081214",
"0.5306247",
"0.5304897"
] | 0.65945506 | 1 |
Register details of an extension's reports | def register_reports(self):
from ckanext.qa import reports
return [reports.openness_report_info] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gReport(self, event):\n \n reports.createReports()",
"def register(ext_id, name, display_admin=False, superuser=False, version=None):\n if ext_id in extensions.keys():\n raise ExtensionExists(\n 'An extension with id %s has already been registered' % ext_id\n )\n\n extensions[ext_id] = {\n 'ext_id': ext_id,\n 'name': name,\n 'version': version,\n 'display_admin': display_admin,\n 'superuser': superuser,\n 'index_url': ext_id + ':index'\n }",
"def addExtension(self, extension_response):\n extension_response.toMessage(self.fields)",
"def discover(self, name=None):\n for obj in iter_entry_points(group=self.group, name=name):\n ext = _Extension(self.type_, obj)\n self.extensions[ext.name] = ext",
"def buildReports(self):\n pass",
"def extensions():\r\n document.add_page_break()\r\n document.add_heading('Extensions', level=1)\r\n extensions = get_qlik_sense.get_extensions()\r\n num_of_extensions = len(extensions)\r\n table = document.add_table(rows=num_of_extensions+1, cols=1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'name'\r\n\r\n for extension in range(num_of_extensions):\r\n row = table.rows[extension+1]\r\n row.cells[0].text = str(extensions[extension])",
"def extensionregistry():\n registry = ExtensionRegistry()\n yield registry",
"def inject_extensions(self, extensions: Dict[str, str]) -> None:\n self.extensions = extensions",
"def report_callback(self, object, report, request):\n ...",
"def _generate_report(self):\n raise NotImplementedError",
"def report():\n pass",
"def ExtendReportData(self, key, value):\n self._dict_report.update({key: value})",
"def extensions():\n\n pass",
"def name(self):\n return 'Report'",
"def _register_application_extension(cls, extension):\n # Should skip registering AppExtensionBase, cannot use isinstance(),\n # referring to app_extension_hooks introduces a dependency cycle\n if extension.__name__ == 'AppExtensionBase':\n return\n\n if getattr(extension, 'init', None):\n extension.init()\n\n cls._set_hooks_for_application(extension)\n\n # Record application extension information\n hooks_info = cls._info.setdefault('AppExtension', [])\n hooks_info.append(extension.__name__) # type: ignore",
"def extensions(self, extensions):\n\n self._extensions = extensions",
"def register_classes():\n AnalyzeExtension.register_class()\n AnalyzeExtension_SG.register_class()",
"def register(self, klass):\n if klass not in self.extensions:\n self.extensions.append(klass)",
"def addExtension(self, *args):\n return _libsbml.SBMLExtensionRegistry_addExtension(self, *args)",
"def reports_cli():",
"def register(self, what, obj):\n # print(\"Registering pattern\", name, pattern)\n name = obj.name\n version = obj.version\n enable = obj.enable\n if enable == 'n':\n return\n\n key = Key(name, version)\n self.plugins[what][key] = obj",
"def report(self) -> Any:",
"def add_to_pr_export(self, exp_template):",
"async def on_register_report_full(report, futures=None):\n if futures:\n futures.pop().set_result(True)\n granularity = min(*[rd['sampling_rate']['min_period'] for rd in report['report_descriptions']])\n report_requests = [(rd['r_id'], on_update_report, granularity) for rd in report['report_descriptions'] if report['report_name'] == 'METADATA_TELEMETRY_USAGE']\n return report_requests",
"def extensions(app: CommandGroup):\n\n @argument(\n \"--verbose\", dest=\"verbose\", action=\"store_true\", help_text=\"Verbose output.\"\n )\n @argument(\n \"--out\",\n dest=\"out\",\n default=sys.stdout,\n type=FileType(mode=\"w\"),\n help_text=\"File to output extension report to; default is stdout.\",\n )\n @app.command(name=\"extensions\")\n def _handler(opts) -> Optional[int]:\n \"\"\"\n Report of installed PyApp extensions.\n \"\"\"\n from pyapp.extensions.report import ExtensionReport\n\n return ExtensionReport(opts.verbose, opts.no_color, opts.out).run()",
"def registerExtensions(self, extensions, configs):\n for ext in extensions:\n try:\n if isinstance(ext, util.string_type):\n ext = self.build_extension(ext, configs.get(ext, []))\n if isinstance(ext, Extension):\n ext.extendMarkdown(self, globals())\n elif ext is not None:\n raise TypeError(\n 'Extension \"%s.%s\" must be of type: \"markdown.Extension\"'\n % (ext.__class__.__module__, ext.__class__.__name__))\n except:\n print(str(traceback.format_exc()))\n continue\n\n return self",
"def _report():\n return {\n 'type' : 'class',\n 'name' : 'report',\n 'base' : None,\n 'is_abstract' : False,\n 'doc' : None,\n 'properties' : [\n ('date', 'datetime', '0.1', None),\n ('evaluation', 'quality.evaluation', '1.1', None),\n ('evaluator', 'shared.responsible_party', '0.1', None),\n ('measure', 'quality.measure', '1.1', None),\n ],\n 'decodings' : [\n ('date', 'child::gmd:dateTime/gco:DateTime'),\n ('evaluation', 'self::cim:report'),\n ('evaluator', 'child::cim:evaluator'),\n ('measure', 'self::cim:report/cim:measure'),\n ]\n }",
"def register(self):\n raise NotImplementedError",
"def register(self):\n raise NotImplementedError",
"def _add_extensions(self):\n ext_cache_down = 'cache_downloading'\n ext_cache_up = 'cache_uploading'\n cmd_args = self.task_data.get('cmd_args', {})\n if not isinstance(cmd_args, dict):\n cmd_args = {}\n if cmd_args.get('save_raw_pages', False):\n self.required_signals[SIGNAL_SPIDER_OPENED]['wait'] += \\\n EXTENSION_SIGNALS[ext_cache_up]\n if cmd_args.get('load_raw_pages'):\n self.required_signals[SIGNAL_SCRIPT_CLOSED]['wait'] += \\\n EXTENSION_SIGNALS[ext_cache_down]"
] | [
"0.60241216",
"0.5790581",
"0.5619827",
"0.5555276",
"0.5469413",
"0.5383979",
"0.5313093",
"0.5309887",
"0.52991444",
"0.5272507",
"0.5268033",
"0.5226153",
"0.5188942",
"0.51880896",
"0.5184478",
"0.5159821",
"0.51484185",
"0.5145538",
"0.5137522",
"0.512625",
"0.51222086",
"0.5102552",
"0.5102228",
"0.5085026",
"0.5068978",
"0.50479054",
"0.5037855",
"0.50347465",
"0.50347465",
"0.50318104"
] | 0.6834808 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.